X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/blobdiff_plain/c30750c088df685e59014c324cec6781ffe9e0e2..865c96096227db330f1ccca3f3f4391f25930e54:/sys/vfs/hammer/hammer_blockmap.c diff --git a/sys/vfs/hammer/hammer_blockmap.c b/sys/vfs/hammer/hammer_blockmap.c index b3d9fbeeac..bff567f92c 100644 --- a/sys/vfs/hammer/hammer_blockmap.c +++ b/sys/vfs/hammer/hammer_blockmap.c @@ -40,10 +40,10 @@ #include "hammer.h" static int hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2); -static void hammer_reserve_setdelay(hammer_mount_t hmp, - hammer_off_t base_offset, +static void hammer_reserve_setdelay_offset(hammer_mount_t hmp, + hammer_off_t base_offset, int zone, struct hammer_blockmap_layer2 *layer2); - +static void hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv); /* * Reserved big-blocks red-black tree support @@ -65,8 +65,8 @@ hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2) * Allocate bytes from a zone */ hammer_off_t -hammer_blockmap_alloc(hammer_transaction_t trans, int zone, - int bytes, int *errorp) +hammer_blockmap_alloc(hammer_transaction_t trans, int zone, int bytes, + hammer_off_t hint, int *errorp) { hammer_mount_t hmp; hammer_volume_t root_volume; @@ -86,6 +86,7 @@ hammer_blockmap_alloc(hammer_transaction_t trans, int zone, hammer_off_t base_off; int loops = 0; int offset; /* offset within big-block */ + int use_hint; hmp = trans->hmp; @@ -108,8 +109,26 @@ hammer_blockmap_alloc(hammer_transaction_t trans, int zone, freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX]; KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone); - next_offset = blockmap->next_offset; + /* + * Use the hint if we have one. + */ + if (hint && HAMMER_ZONE_DECODE(hint) == zone) { + next_offset = (hint + 15) & ~(hammer_off_t)15; + use_hint = 1; + } else { + next_offset = blockmap->next_offset; + use_hint = 0; + } again: + + /* + * use_hint is turned off if we leave the hinted big-block. + */ + if (use_hint && ((next_offset ^ hint) & ~HAMMER_HINTBLOCK_MASK64)) { + next_offset = blockmap->next_offset; + use_hint = 0; + } + /* * Check for wrap */ @@ -145,6 +164,17 @@ again: */ layer1_offset = freemap->phys_offset + HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset); + + /* + * Skip this block if it is belonging to a volume that we are + * currently trying to remove from the file-system. + */ + if ((int)HAMMER_VOL_DECODE(layer1_offset) == hmp->volume_to_remove) { + next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) & + ~HAMMER_BLOCKMAP_LAYER2_MASK; + goto again; + } + layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1); if (*errorp) { result_offset = 0; @@ -207,6 +237,26 @@ again: goto again; } + /* + * If operating in the current non-hint blockmap block, do not + * allow it to get over-full. Also drop any active hinting so + * blockmap->next_offset is updated at the end. + * + * We do this for B-Tree and meta-data allocations to provide + * localization for updates. + */ + if ((zone == HAMMER_ZONE_BTREE_INDEX || + zone == HAMMER_ZONE_META_INDEX) && + offset >= HAMMER_LARGEBLOCK_OVERFILL && + !((next_offset ^ blockmap->next_offset) & ~HAMMER_LARGEBLOCK_MASK64) + ) { + if (offset >= HAMMER_LARGEBLOCK_OVERFILL) { + next_offset += (HAMMER_LARGEBLOCK_SIZE - offset); + use_hint = 0; + goto again; + } + } + /* * We need the lock from this point on. We have to re-check zone * ownership after acquiring the lock and also check for reservations. @@ -244,6 +294,7 @@ again: next_offset += resv->append_off - offset; goto again; } + ++resv->refs; } /* @@ -285,10 +336,17 @@ again: hammer_modify_buffer_done(buffer2); KKASSERT(layer2->bytes_free >= 0); + /* + * We hold the blockmap lock and should be the only ones + * capable of modifying resv->append_off. Track the allocation + * as appropriate. + */ + KKASSERT(bytes != 0); if (resv) { KKASSERT(resv->append_off <= offset); resv->append_off = offset + bytes; resv->flags &= ~HAMMER_RESF_LAYER2FREE; + hammer_blockmap_reserve_complete(hmp, resv); } /* @@ -302,11 +360,15 @@ again: result_offset = next_offset; /* - * Process allocated result_offset + * If we weren't supplied with a hint or could not use the hint + * then we wound up using blockmap->next_offset as the hint and + * need to save it. */ - hammer_modify_volume(NULL, root_volume, NULL, 0); - blockmap->next_offset = next_offset + bytes; - hammer_modify_volume_done(root_volume); + if (use_hint == 0) { + hammer_modify_volume(NULL, root_volume, NULL, 0); + blockmap->next_offset = next_offset + bytes; + hammer_modify_volume_done(root_volume); + } hammer_unlock(&hmp->blkmap_lock); failed: @@ -557,19 +619,6 @@ failed: return(resv); } -#if 0 -/* - * Backend function - undo a portion of a reservation. - */ -void -hammer_blockmap_reserve_undo(hammer_mount_t hmp, hammer_reserve_t resv, - hammer_off_t zone_offset, int bytes) -{ - resv->bytes_freed += bytes; -} - -#endif - /* * Dereference a reservation structure. Upon the final release the * underlying big-block is checked and if it is entirely free we delete @@ -580,6 +629,7 @@ void hammer_blockmap_reserve_complete(hammer_mount_t hmp, hammer_reserve_t resv) { hammer_off_t base_offset; + int error; KKASSERT(resv->refs > 0); KKASSERT((resv->zone_offset & HAMMER_OFF_ZONE_MASK) == @@ -589,14 +639,20 @@ hammer_blockmap_reserve_complete(hammer_mount_t hmp, hammer_reserve_t resv) * Setting append_off to the max prevents any new allocations * from occuring while we are trying to dispose of the reservation, * allowing us to safely delete any related HAMMER buffers. + * + * If we are unable to clean out all related HAMMER buffers we + * requeue the delay. */ if (resv->refs == 1 && (resv->flags & HAMMER_RESF_LAYER2FREE)) { resv->append_off = HAMMER_LARGEBLOCK_SIZE; - resv->flags &= ~HAMMER_RESF_LAYER2FREE; - base_offset = resv->zone_offset & ~HAMMER_ZONE_RAW_BUFFER; - base_offset = HAMMER_ZONE_ENCODE(base_offset, resv->zone); - hammer_del_buffers(hmp, base_offset, resv->zone_offset, - HAMMER_LARGEBLOCK_SIZE); + base_offset = resv->zone_offset & ~HAMMER_OFF_ZONE_MASK; + base_offset = HAMMER_ZONE_ENCODE(resv->zone, base_offset); + error = hammer_del_buffers(hmp, base_offset, + resv->zone_offset, + HAMMER_LARGEBLOCK_SIZE, + 0); + if (error) + hammer_reserve_setdelay(hmp, resv); } if (--resv->refs == 0) { KKASSERT((resv->flags & HAMMER_RESF_ONDELAY) == 0); @@ -611,26 +667,33 @@ hammer_blockmap_reserve_complete(hammer_mount_t hmp, hammer_reserve_t resv) * the related flushes have completely cycled, otherwise crash recovery * could resurrect a data block that was already reused and overwritten. * - * Return 0 if the layer2 entry is still completely free after the - * reservation has been allocated. + * The caller might reset the underlying layer2 entry's append_off to 0, so + * our covering append_off must be set to max to prevent any reallocation + * until after the flush delays complete, not to mention proper invalidation + * of any underlying cached blocks. */ static void -hammer_reserve_setdelay(hammer_mount_t hmp, hammer_off_t base_offset, - struct hammer_blockmap_layer2 *layer2) +hammer_reserve_setdelay_offset(hammer_mount_t hmp, hammer_off_t base_offset, + int zone, struct hammer_blockmap_layer2 *layer2) { hammer_reserve_t resv; /* * Allocate the reservation if necessary. + * + * NOTE: need lock in future around resv lookup/allocation and + * the setdelay call, currently refs is not bumped until the call. */ again: resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_offset); if (resv == NULL) { resv = kmalloc(sizeof(*resv), hmp->m_misc, M_WAITOK | M_ZERO | M_USE_RESERVE); + resv->zone = zone; resv->zone_offset = base_offset; resv->refs = 0; - /* XXX inherent lock until refs bumped later on */ + resv->append_off = HAMMER_LARGEBLOCK_SIZE; + if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) resv->flags |= HAMMER_RESF_LAYER2FREE; if (RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resv)) { @@ -638,12 +701,20 @@ again: goto again; } ++hammer_count_reservations; + } else { + if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) + resv->flags |= HAMMER_RESF_LAYER2FREE; } + hammer_reserve_setdelay(hmp, resv); +} - /* - * Enter the reservation on the on-delay list, or move it if it - * is already on the list. - */ +/* + * Enter the reservation on the on-delay list, or move it if it + * is already on the list. + */ +static void +hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv) +{ if (resv->flags & HAMMER_RESF_ONDELAY) { TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry); resv->flush_group = hmp->flusher.next + 1; @@ -776,7 +847,7 @@ hammer_blockmap_free(hammer_transaction_t trans, if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) { base_off = (zone_offset & (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | HAMMER_ZONE_RAW_BUFFER; - hammer_reserve_setdelay(hmp, base_off, layer2); + hammer_reserve_setdelay_offset(hmp, base_off, zone, layer2); if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) { layer2->zone = 0; layer2->append_off = 0; @@ -913,6 +984,7 @@ hammer_blockmap_finalize(hammer_transaction_t trans, if (layer2->zone != zone) kprintf("layer2 zone mismatch %d %d\n", layer2->zone, zone); KKASSERT(layer2->zone == zone); + KKASSERT(bytes != 0); layer2->bytes_free -= bytes; if (resv) resv->flags &= ~HAMMER_RESF_LAYER2FREE; @@ -1016,7 +1088,7 @@ failed: hammer_rel_volume(root_volume, 0); if (hammer_debug_general & 0x0800) { kprintf("hammer_blockmap_getfree: %016llx -> %d\n", - zone_offset, bytes); + (long long)zone_offset, bytes); } return(bytes); } @@ -1116,7 +1188,7 @@ failed: hammer_rel_volume(root_volume, 0); if (hammer_debug_general & 0x0800) { kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n", - zone_offset, result_offset); + (long long)zone_offset, (long long)result_offset); } return(result_offset); } @@ -1126,7 +1198,7 @@ failed: * Check space availability */ int -hammer_checkspace(hammer_mount_t hmp, int slop) +_hammer_checkspace(hammer_mount_t hmp, int slop, int64_t *resp) { const int in_size = sizeof(struct hammer_inode_data) + sizeof(union hammer_btree_elm); @@ -1141,6 +1213,8 @@ hammer_checkspace(hammer_mount_t hmp, int slop) (slop << HAMMER_LARGEBLOCK_BITS); hammer_count_extra_space_used = usedbytes; /* debugging */ + if (resp) + *resp = usedbytes; if (hmp->copy_stat_freebigblocks >= (usedbytes >> HAMMER_LARGEBLOCK_BITS)) {