HAMMER - Implement experimental volume removal
[dragonfly.git] / sys / vfs / hammer / hammer_blockmap.c
index 6d117d6..bff567f 100644 (file)
@@ -31,7 +31,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  * 
- * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.24 2008/07/14 03:20:49 dillon Exp $
+ * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.27 2008/07/31 22:30:33 dillon Exp $
  */
 
 /*
 #include "hammer.h"
 
 static int hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2);
-static int hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv,
-                        hammer_off_t zone2_offset);
-
+static void hammer_reserve_setdelay_offset(hammer_mount_t hmp,
+                                   hammer_off_t base_offset, int zone,
+                                   struct hammer_blockmap_layer2 *layer2);
+static void hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv);
 
 /*
  * Reserved big-blocks red-black tree support
@@ -64,8 +65,8 @@ hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2)
  * Allocate bytes from a zone
  */
 hammer_off_t
-hammer_blockmap_alloc(hammer_transaction_t trans, int zone,
-                     int bytes, int *errorp)
+hammer_blockmap_alloc(hammer_transaction_t trans, int zone, int bytes,
+                     hammer_off_t hint, int *errorp)
 {
        hammer_mount_t hmp;
        hammer_volume_t root_volume;
@@ -85,6 +86,7 @@ hammer_blockmap_alloc(hammer_transaction_t trans, int zone,
        hammer_off_t base_off;
        int loops = 0;
        int offset;             /* offset within big-block */
+       int use_hint;
 
        hmp = trans->hmp;
 
@@ -107,8 +109,26 @@ hammer_blockmap_alloc(hammer_transaction_t trans, int zone,
        freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
        KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
 
-       next_offset = blockmap->next_offset;
+       /*
+        * Use the hint if we have one.
+        */
+       if (hint && HAMMER_ZONE_DECODE(hint) == zone) {
+               next_offset = (hint + 15) & ~(hammer_off_t)15;
+               use_hint = 1;
+       } else {
+               next_offset = blockmap->next_offset;
+               use_hint = 0;
+       }
 again:
+
+       /*
+        * use_hint is turned off if we leave the hinted big-block.
+        */
+       if (use_hint && ((next_offset ^ hint) & ~HAMMER_HINTBLOCK_MASK64)) {
+               next_offset = blockmap->next_offset;
+               use_hint = 0;
+       }
+
        /*
         * Check for wrap
         */
@@ -144,14 +164,31 @@ again:
         */
        layer1_offset = freemap->phys_offset +
                        HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
+
+       /*
+        * Skip this block if it is belonging to a volume that we are
+         * currently trying to remove from the file-system.
+        */
+       if ((int)HAMMER_VOL_DECODE(layer1_offset) == hmp->volume_to_remove) {
+               next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
+                             ~HAMMER_BLOCKMAP_LAYER2_MASK;
+               goto again;
+       }
+
        layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
-       KKASSERT(*errorp == 0);
+       if (*errorp) {
+               result_offset = 0;
+               goto failed;
+       }
 
        /*
         * Check CRC.
         */
        if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
-               Debugger("CRC FAILED: LAYER1");
+               hammer_lock_ex(&hmp->blkmap_lock);
+               if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
+                       panic("CRC FAILED: LAYER1");
+               hammer_unlock(&hmp->blkmap_lock);
        }
 
        /*
@@ -172,13 +209,20 @@ again:
        layer2_offset = layer1->phys_offset +
                        HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
        layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
-       KKASSERT(*errorp == 0);
+       if (*errorp) {
+               result_offset = 0;
+               goto failed;
+       }
 
        /*
-        * Check CRC.
+        * Check CRC.  This can race another thread holding the lock
+        * and in the middle of modifying layer2.
         */
        if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
-               Debugger("CRC FAILED: LAYER2");
+               hammer_lock_ex(&hmp->blkmap_lock);
+               if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
+                       panic("CRC FAILED: LAYER2");
+               hammer_unlock(&hmp->blkmap_lock);
        }
 
        /*
@@ -193,6 +237,26 @@ again:
                goto again;
        }
 
+       /*
+        * If operating in the current non-hint blockmap block, do not
+        * allow it to get over-full.  Also drop any active hinting so
+        * blockmap->next_offset is updated at the end.
+        *
+        * We do this for B-Tree and meta-data allocations to provide
+        * localization for updates.
+        */
+       if ((zone == HAMMER_ZONE_BTREE_INDEX ||
+            zone == HAMMER_ZONE_META_INDEX) &&
+           offset >= HAMMER_LARGEBLOCK_OVERFILL &&
+           !((next_offset ^ blockmap->next_offset) & ~HAMMER_LARGEBLOCK_MASK64)
+       ) {
+               if (offset >= HAMMER_LARGEBLOCK_OVERFILL) {
+                       next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
+                       use_hint = 0;
+                       goto again;
+               }
+       }
+
        /*
         * We need the lock from this point on.  We have to re-check zone
         * ownership after acquiring the lock and also check for reservations.
@@ -230,6 +294,7 @@ again:
                        next_offset += resv->append_off - offset;
                        goto again;
                }
+               ++resv->refs;
        }
 
        /*
@@ -271,9 +336,17 @@ again:
        hammer_modify_buffer_done(buffer2);
        KKASSERT(layer2->bytes_free >= 0);
 
+       /*
+        * We hold the blockmap lock and should be the only ones
+        * capable of modifying resv->append_off.  Track the allocation
+        * as appropriate.
+        */
+       KKASSERT(bytes != 0);
        if (resv) {
                KKASSERT(resv->append_off <= offset);
                resv->append_off = offset + bytes;
+               resv->flags &= ~HAMMER_RESF_LAYER2FREE;
+               hammer_blockmap_reserve_complete(hmp, resv);
        }
 
        /*
@@ -287,11 +360,15 @@ again:
        result_offset = next_offset;
 
        /*
-        * Process allocated result_offset
+        * If we weren't supplied with a hint or could not use the hint
+        * then we wound up using blockmap->next_offset as the hint and
+        * need to save it.
         */
-       hammer_modify_volume(NULL, root_volume, NULL, 0);
-       blockmap->next_offset = next_offset + bytes;
-       hammer_modify_volume_done(root_volume);
+       if (use_hint == 0) {
+               hammer_modify_volume(NULL, root_volume, NULL, 0);
+               blockmap->next_offset = next_offset + bytes;
+               hammer_modify_volume_done(root_volume);
+       }
        hammer_unlock(&hmp->blkmap_lock);
 failed:
 
@@ -398,13 +475,17 @@ again:
        layer1_offset = freemap->phys_offset +
                        HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
        layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
-       KKASSERT(*errorp == 0);
+       if (*errorp)
+               goto failed;
 
        /*
         * Check CRC.
         */
        if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
-               Debugger("CRC FAILED: LAYER1");
+               hammer_lock_ex(&hmp->blkmap_lock);
+               if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
+                       panic("CRC FAILED: LAYER1");
+               hammer_unlock(&hmp->blkmap_lock);
        }
 
        /*
@@ -426,14 +507,18 @@ again:
        layer2_offset = layer1->phys_offset +
                        HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
        layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
-       KKASSERT(*errorp == 0);
+       if (*errorp)
+               goto failed;
 
        /*
         * Check CRC if not allocating into uninitialized space (which we
         * aren't when reserving space).
         */
        if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
-               Debugger("CRC FAILED: LAYER2");
+               hammer_lock_ex(&hmp->blkmap_lock);
+               if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
+                       panic("CRC FAILED: LAYER2");
+               hammer_unlock(&hmp->blkmap_lock);
        }
 
        /*
@@ -488,11 +573,13 @@ again:
                ++resv->refs;
                resx = NULL;
        } else {
-               resx = kmalloc(sizeof(*resv), M_HAMMER,
+               resx = kmalloc(sizeof(*resv), hmp->m_misc,
                               M_WAITOK | M_ZERO | M_USE_RESERVE);
                resx->refs = 1;
                resx->zone = zone;
                resx->zone_offset = base_off;
+               if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
+                       resx->flags |= HAMMER_RESF_LAYER2FREE;
                resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
                KKASSERT(resv == NULL);
                resv = resx;
@@ -533,105 +620,112 @@ failed:
 }
 
 /*
- * Backend function - undo a portion of a reservation.
- */
-void
-hammer_blockmap_reserve_undo(hammer_reserve_t resv,
-                        hammer_off_t zone_offset, int bytes)
-{
-       resv->bytes_freed += bytes;
-}
-
-
-/*
- * A record with a storage reservation calls this function when it is
- * being freed.  The storage may or may not have actually been allocated.
- *
- * This function removes the lock that prevented other entities from
- * allocating out of the storage or removing the zone assignment.
+ * Dereference a reservation structure.  Upon the final release the
+ * underlying big-block is checked and if it is entirely free we delete
+ * any related HAMMER buffers to avoid potential conflicts with future
+ * reuse of the big-block.
  */
 void
 hammer_blockmap_reserve_complete(hammer_mount_t hmp, hammer_reserve_t resv)
 {
-       hammer_off_t zone2_offset;
+       hammer_off_t base_offset;
+       int error;
 
        KKASSERT(resv->refs > 0);
+       KKASSERT((resv->zone_offset & HAMMER_OFF_ZONE_MASK) ==
+                HAMMER_ZONE_RAW_BUFFER);
+
+       /*
+        * Setting append_off to the max prevents any new allocations
+        * from occuring while we are trying to dispose of the reservation,
+        * allowing us to safely delete any related HAMMER buffers.
+        *
+        * If we are unable to clean out all related HAMMER buffers we
+        * requeue the delay.
+        */
+       if (resv->refs == 1 && (resv->flags & HAMMER_RESF_LAYER2FREE)) {
+               resv->append_off = HAMMER_LARGEBLOCK_SIZE;
+               base_offset = resv->zone_offset & ~HAMMER_OFF_ZONE_MASK;
+               base_offset = HAMMER_ZONE_ENCODE(resv->zone, base_offset);
+               error = hammer_del_buffers(hmp, base_offset,
+                                          resv->zone_offset,
+                                          HAMMER_LARGEBLOCK_SIZE,
+                                          0);
+               if (error)
+                       hammer_reserve_setdelay(hmp, resv);
+       }
        if (--resv->refs == 0) {
                KKASSERT((resv->flags & HAMMER_RESF_ONDELAY) == 0);
-
-               zone2_offset = (resv->zone_offset & ~HAMMER_OFF_ZONE_MASK) |
-                               HAMMER_ZONE_RAW_BUFFER;
-
-               /*
-                * If we are releasing a zone and all of its reservations
-                * were undone we have to clean out all hammer and device
-                * buffers associated with the big block.
-                *
-                * Any direct allocations will cause this test to fail
-                * (bytes_freed will never reach append_off), which is
-                * the behavior we desire.  Once the zone has been assigned
-                * to the big-block the only way to allocate from it in the
-                * future is if the reblocker can completely clean it out,
-                * and that will also properly call hammer_del_buffers().
-                *
-                * If we don't we risk all sorts of buffer cache aliasing
-                * effects, including overlapping buffers with different
-                * sizes.
-                */
-               if (resv->bytes_freed == resv->append_off) {
-                       kprintf("U");
-                       hammer_del_buffers(hmp, resv->zone_offset,
-                                          zone2_offset,
-                                          HAMMER_LARGEBLOCK_SIZE);
-               }
                RB_REMOVE(hammer_res_rb_tree, &hmp->rb_resv_root, resv);
-               kfree(resv, M_HAMMER);
+               kfree(resv, hmp->m_misc);
                --hammer_count_reservations;
        }
 }
 
 /*
- * This ensures that no data reallocations will take place at the specified
- * zone2_offset (pointing to the base of a bigblock) for 2 flush cycles,
- * preventing deleted data space, which has no UNDO, from being reallocated 
- * too quickly.
+ * Prevent a potentially free big-block from being reused until after
+ * the related flushes have completely cycled, otherwise crash recovery
+ * could resurrect a data block that was already reused and overwritten.
+ *
+ * The caller might reset the underlying layer2 entry's append_off to 0, so
+ * our covering append_off must be set to max to prevent any reallocation
+ * until after the flush delays complete, not to mention proper invalidation
+ * of any underlying cached blocks.
  */
-static int
-hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv,
-                       hammer_off_t zone2_offset)
+static void
+hammer_reserve_setdelay_offset(hammer_mount_t hmp, hammer_off_t base_offset,
+                       int zone, struct hammer_blockmap_layer2 *layer2)
 {
-       int error;
+       hammer_reserve_t resv;
 
+       /*
+        * Allocate the reservation if necessary.
+        *
+        * NOTE: need lock in future around resv lookup/allocation and
+        * the setdelay call, currently refs is not bumped until the call.
+        */
+again:
+       resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_offset);
        if (resv == NULL) {
-               resv = kmalloc(sizeof(*resv), M_HAMMER,
+               resv = kmalloc(sizeof(*resv), hmp->m_misc,
                               M_WAITOK | M_ZERO | M_USE_RESERVE);
-               resv->refs = 1; /* ref for on-delay list */
-               resv->zone_offset = zone2_offset;
+               resv->zone = zone;
+               resv->zone_offset = base_offset;
+               resv->refs = 0;
                resv->append_off = HAMMER_LARGEBLOCK_SIZE;
+
+               if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
+                       resv->flags |= HAMMER_RESF_LAYER2FREE;
                if (RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resv)) {
-                       error = EAGAIN;
-                       kfree(resv, M_HAMMER);
-               } else {
-                       error = 0;
-                       ++hammer_count_reservations;
+                       kfree(resv, hmp->m_misc);
+                       goto again;
                }
-       } else if (resv->flags & HAMMER_RESF_ONDELAY) {
-               --hmp->rsv_fromdelay;
-               resv->flags &= ~HAMMER_RESF_ONDELAY;
+               ++hammer_count_reservations;
+       } else {
+               if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
+                       resv->flags |= HAMMER_RESF_LAYER2FREE;
+       }
+       hammer_reserve_setdelay(hmp, resv);
+}
+
+/*
+ * Enter the reservation on the on-delay list, or move it if it
+ * is already on the list.
+ */
+static void
+hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv)
+{
+       if (resv->flags & HAMMER_RESF_ONDELAY) {
                TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
                resv->flush_group = hmp->flusher.next + 1;
-               error = 0;
+               TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
        } else {
-               ++resv->refs;   /* ref for on-delay list */
-               error = 0;
-       }
-       if (error == 0) {
+               ++resv->refs;
                ++hmp->rsv_fromdelay;
                resv->flags |= HAMMER_RESF_ONDELAY;
                resv->flush_group = hmp->flusher.next + 1;
                TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
        }
-       return(error);
 }
 
 void
@@ -646,6 +740,8 @@ hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv)
 
 /*
  * Backend function - free (offset, bytes) in a zone.
+ *
+ * XXX error return
  */
 void
 hammer_blockmap_free(hammer_transaction_t trans,
@@ -653,7 +749,6 @@ hammer_blockmap_free(hammer_transaction_t trans,
 {
        hammer_mount_t hmp;
        hammer_volume_t root_volume;
-       hammer_reserve_t resv;
        hammer_blockmap_t blockmap;
        hammer_blockmap_t freemap;
        struct hammer_blockmap_layer1 *layer1;
@@ -695,11 +790,15 @@ hammer_blockmap_free(hammer_transaction_t trans,
        layer1_offset = freemap->phys_offset +
                        HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
        layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
-       KKASSERT(error == 0);
+       if (error)
+               goto failed;
        KKASSERT(layer1->phys_offset &&
                 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
        if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
-               Debugger("CRC FAILED: LAYER1");
+               hammer_lock_ex(&hmp->blkmap_lock);
+               if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
+                       panic("CRC FAILED: LAYER1");
+               hammer_unlock(&hmp->blkmap_lock);
        }
 
        /*
@@ -708,9 +807,13 @@ hammer_blockmap_free(hammer_transaction_t trans,
        layer2_offset = layer1->phys_offset +
                        HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
        layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
-       KKASSERT(error == 0);
+       if (error)
+               goto failed;
        if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
-               Debugger("CRC FAILED: LAYER2");
+               hammer_lock_ex(&hmp->blkmap_lock);
+               if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
+                       panic("CRC FAILED: LAYER2");
+               hammer_unlock(&hmp->blkmap_lock);
        }
 
        hammer_lock_ex(&hmp->blkmap_lock);
@@ -718,50 +821,34 @@ hammer_blockmap_free(hammer_transaction_t trans,
        hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
 
        /*
-        * Freeing previously allocated space
+        * Free space previously allocated via blockmap_alloc().
         */
        KKASSERT(layer2->zone == zone);
        layer2->bytes_free += bytes;
        KKASSERT(layer2->bytes_free <= HAMMER_LARGEBLOCK_SIZE);
+
+       /*
+        * If a big-block becomes entirely free we must create a covering
+        * reservation to prevent premature reuse.  Note, however, that
+        * the big-block and/or reservation may still have an append_off
+        * that allows further (non-reused) allocations.
+        *
+        * Once the reservation has been made we re-check layer2 and if
+        * the big-block is still entirely free we reset the layer2 entry.
+        * The reservation will prevent premature reuse.
+        *
+        * NOTE: hammer_buffer's are only invalidated when the reservation
+        * is completed, if the layer2 entry is still completely free at
+        * that time.  Any allocations from the reservation that may have
+        * occured in the mean time, or active references on the reservation
+        * from new pending allocations, will prevent the invalidation from
+        * occuring.
+        */
        if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
                base_off = (zone_offset & (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | HAMMER_ZONE_RAW_BUFFER;
-again:
-               resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root,
-                                base_off);
-               if (resv) {
-                       /*
-                        * Portions of this block have been reserved, do
-                        * not free it.
-                        *
-                        * Make sure the reservation remains through
-                        * the next flush cycle so potentially undoable
-                        * data is not overwritten.
-                        */
-                       KKASSERT(resv->zone == zone);
-                       hammer_reserve_setdelay(hmp, resv, base_off);
-               } else if ((blockmap->next_offset ^ zone_offset) &
-                           ~HAMMER_LARGEBLOCK_MASK64) {
-                       /*
-                        * Our iterator is not in the now-free big-block
-                        * and we can release it.
-                        *
-                        * Make sure the reservation remains through
-                        * the next flush cycle so potentially undoable
-                        * data is not overwritten.
-                        */
-                       if (hammer_reserve_setdelay(hmp, NULL, base_off))
-                               goto again;
-                       KKASSERT(layer2->zone == zone);
-                       /*
-                        * XXX maybe incorporate this del call in the
-                        * release code by setting base_offset, bytes_freed,
-                        * etc.
-                        */
-                       hammer_del_buffers(hmp,
-                                          zone_offset &
-                                             ~HAMMER_LARGEBLOCK_MASK64,
-                                          base_off,
-                                          HAMMER_LARGEBLOCK_SIZE);
+
+               hammer_reserve_setdelay_offset(hmp, base_off, zone, layer2);
+               if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
                        layer2->zone = 0;
                        layer2->append_off = 0;
                        hammer_modify_buffer(trans, buffer1,
@@ -779,11 +866,11 @@ again:
                        hammer_modify_volume_done(trans->rootvol);
                }
        }
-
        layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
        hammer_modify_buffer_done(buffer2);
        hammer_unlock(&hmp->blkmap_lock);
 
+failed:
        if (buffer1)
                hammer_rel_buffer(buffer1, 0);
        if (buffer2)
@@ -795,8 +882,9 @@ again:
  *
  * Allocate space that was previously reserved by the frontend.
  */
-void
+int
 hammer_blockmap_finalize(hammer_transaction_t trans,
+                        hammer_reserve_t resv,
                         hammer_off_t zone_offset, int bytes)
 {
        hammer_mount_t hmp;
@@ -814,7 +902,7 @@ hammer_blockmap_finalize(hammer_transaction_t trans,
        int offset;
 
        if (bytes == 0)
-               return;
+               return(0);
        hmp = trans->hmp;
 
        /*
@@ -840,11 +928,15 @@ hammer_blockmap_finalize(hammer_transaction_t trans,
        layer1_offset = freemap->phys_offset +
                        HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
        layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
-       KKASSERT(error == 0);
+       if (error)
+               goto failed;
        KKASSERT(layer1->phys_offset &&
                 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
        if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
-               Debugger("CRC FAILED: LAYER1");
+               hammer_lock_ex(&hmp->blkmap_lock);
+               if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
+                       panic("CRC FAILED: LAYER1");
+               hammer_unlock(&hmp->blkmap_lock);
        }
 
        /*
@@ -853,9 +945,13 @@ hammer_blockmap_finalize(hammer_transaction_t trans,
        layer2_offset = layer1->phys_offset +
                        HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
        layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
-       KKASSERT(error == 0);
+       if (error)
+               goto failed;
        if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
-               Debugger("CRC FAILED: LAYER2");
+               hammer_lock_ex(&hmp->blkmap_lock);
+               if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
+                       panic("CRC FAILED: LAYER2");
+               hammer_unlock(&hmp->blkmap_lock);
        }
 
        hammer_lock_ex(&hmp->blkmap_lock);
@@ -888,7 +984,10 @@ hammer_blockmap_finalize(hammer_transaction_t trans,
        if (layer2->zone != zone)
                kprintf("layer2 zone mismatch %d %d\n", layer2->zone, zone);
        KKASSERT(layer2->zone == zone);
+       KKASSERT(bytes != 0);
        layer2->bytes_free -= bytes;
+       if (resv)
+               resv->flags &= ~HAMMER_RESF_LAYER2FREE;
 
        /*
         * Finalizations can occur out of order, or combined with allocations.
@@ -902,10 +1001,12 @@ hammer_blockmap_finalize(hammer_transaction_t trans,
        hammer_modify_buffer_done(buffer2);
        hammer_unlock(&hmp->blkmap_lock);
 
+failed:
        if (buffer1)
                hammer_rel_buffer(buffer1, 0);
        if (buffer2)
                hammer_rel_buffer(buffer2, 0);
+       return(error);
 }
 
 /*
@@ -943,21 +1044,35 @@ hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset,
        layer1_offset = freemap->phys_offset +
                        HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
        layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
-       KKASSERT(*errorp == 0);
+       if (*errorp) {
+               bytes = 0;
+               goto failed;
+       }
        KKASSERT(layer1->phys_offset);
        if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
-               Debugger("CRC FAILED: LAYER1");
+               hammer_lock_ex(&hmp->blkmap_lock);
+               if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
+                       panic("CRC FAILED: LAYER1");
+               hammer_unlock(&hmp->blkmap_lock);
        }
 
        /*
         * Dive layer 2, each entry represents a large-block.
+        *
+        * (reuse buffer, layer1 pointer becomes invalid)
         */
        layer2_offset = layer1->phys_offset +
                        HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
        layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
-       KKASSERT(*errorp == 0);
+       if (*errorp) {
+               bytes = 0;
+               goto failed;
+       }
        if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
-               Debugger("CRC FAILED: LAYER2");
+               hammer_lock_ex(&hmp->blkmap_lock);
+               if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
+                       panic("CRC FAILED: LAYER2");
+               hammer_unlock(&hmp->blkmap_lock);
        }
        KKASSERT(layer2->zone == zone);
 
@@ -967,12 +1082,13 @@ hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset,
                *curp = 0;
        else
                *curp = 1;
+failed:
        if (buffer)
                hammer_rel_buffer(buffer, 0);
        hammer_rel_volume(root_volume, 0);
        if (hammer_debug_general & 0x0800) {
                kprintf("hammer_blockmap_getfree: %016llx -> %d\n",
-                       zone_offset, bytes);
+                       (long long)zone_offset, bytes);
        }
        return(bytes);
 }
@@ -1030,10 +1146,14 @@ hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t zone_offset,
        layer1_offset = freemap->phys_offset +
                        HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
        layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
-       KKASSERT(*errorp == 0);
+       if (*errorp)
+               goto failed;
        KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
        if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
-               Debugger("CRC FAILED: LAYER1");
+               hammer_lock_ex(&hmp->blkmap_lock);
+               if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
+                       panic("CRC FAILED: LAYER1");
+               hammer_unlock(&hmp->blkmap_lock);
        }
 
        /*
@@ -1043,7 +1163,8 @@ hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t zone_offset,
                        HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
        layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
 
-       KKASSERT(*errorp == 0);
+       if (*errorp)
+               goto failed;
        if (layer2->zone == 0) {
                base_off = (zone_offset & (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | HAMMER_ZONE_RAW_BUFFER;
                resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root,
@@ -1055,15 +1176,19 @@ hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t zone_offset,
                        layer2->zone, zone);
        }
        if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
-               Debugger("CRC FAILED: LAYER2");
+               hammer_lock_ex(&hmp->blkmap_lock);
+               if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
+                       panic("CRC FAILED: LAYER2");
+               hammer_unlock(&hmp->blkmap_lock);
        }
 
+failed:
        if (buffer)
                hammer_rel_buffer(buffer, 0);
        hammer_rel_volume(root_volume, 0);
        if (hammer_debug_general & 0x0800) {
                kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n",
-                       zone_offset, result_offset);
+                       (long long)zone_offset, (long long)result_offset);
        }
        return(result_offset);
 }
@@ -1073,36 +1198,28 @@ hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t zone_offset,
  * Check space availability
  */
 int
-hammer_checkspace(hammer_mount_t hmp, int slop)
+_hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp)
 {
        const int in_size = sizeof(struct hammer_inode_data) +
                            sizeof(union hammer_btree_elm);
        const int rec_size = (sizeof(union hammer_btree_elm) * 2);
        int64_t usedbytes;
 
-       /*
-        * Hopefully a quick and fast check.
-        */
-       if (hmp->copy_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE >=
-           (int64_t)hidirtybufspace * 4 + 10 * HAMMER_LARGEBLOCK_SIZE) {
-               hammer_count_extra_space_used = -1;
-               return(0);
-       }
-
-       /*
-        * Do a more involved check
-        */
        usedbytes = hmp->rsv_inodes * in_size +
                    hmp->rsv_recs * rec_size +
                    hmp->rsv_databytes +
-                   hmp->rsv_fromdelay * HAMMER_LARGEBLOCK_SIZE +
-                   hidirtybufspace +
-                   slop * HAMMER_LARGEBLOCK_SIZE;
+                   ((int64_t)hmp->rsv_fromdelay << HAMMER_LARGEBLOCK_BITS) +
+                   ((int64_t)hidirtybufspace << 2) +
+                   (slop << HAMMER_LARGEBLOCK_BITS);
 
-       hammer_count_extra_space_used = usedbytes;
+       hammer_count_extra_space_used = usedbytes;      /* debugging */
+       if (resp)
+               *resp = usedbytes;
 
-       if (hmp->copy_stat_freebigblocks >= usedbytes / HAMMER_LARGEBLOCK_SIZE)
+       if (hmp->copy_stat_freebigblocks >=
+           (usedbytes >> HAMMER_LARGEBLOCK_BITS)) {
                return(0);
+       }
        return (ENOSPC);
 }