2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.5 2008/02/23 20:55:23 dillon Exp $
43 * Allocate bytes from a zone
46 hammer_blockmap_alloc(hammer_mount_t hmp, int zone, int bytes, int *errorp)
48 hammer_volume_t root_volume;
49 hammer_blockmap_t rootmap;
50 struct hammer_blockmap_layer1 *layer1;
51 struct hammer_blockmap_layer2 *layer2;
52 hammer_buffer_t buffer1 = NULL;
53 hammer_buffer_t buffer2 = NULL;
54 hammer_buffer_t buffer3 = NULL;
55 hammer_off_t tmp_offset;
56 hammer_off_t next_offset;
57 hammer_off_t layer1_offset;
58 hammer_off_t layer2_offset;
59 hammer_off_t bigblock_offset;
62 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
63 root_volume = hammer_get_root_volume(hmp, errorp);
66 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
67 KKASSERT(rootmap->phys_offset != 0);
68 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
69 HAMMER_ZONE_RAW_BUFFER_INDEX);
70 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
71 KKASSERT(HAMMER_ZONE_DECODE(rootmap->next_offset) == zone);
74 * Deal with alignment and buffer-boundary issues.
76 * Be careful, certain primary alignments are used below to allocate
77 * new blockmap blocks.
79 bytes = (bytes + 7) & ~7;
80 KKASSERT(bytes <= HAMMER_BUFSIZE);
82 lockmgr(&hmp->blockmap_lock, LK_EXCLUSIVE|LK_RETRY);
83 next_offset = rootmap->next_offset;
87 * The allocation request may not cross a buffer boundary
89 tmp_offset = next_offset + bytes;
90 if ((next_offset ^ (tmp_offset - 1)) & ~HAMMER_BUFMASK64)
91 next_offset = (tmp_offset - 1) & ~HAMMER_BUFMASK64;
94 * Dive layer 1. If we are starting a new layer 1 entry,
95 * allocate a layer 2 block for it.
97 layer1_offset = rootmap->phys_offset +
98 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
99 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
100 KKASSERT(*errorp == 0);
101 KKASSERT(next_offset <= rootmap->alloc_offset);
104 * Allocate layer2 backing store in layer1 if necessary. next_offset
105 * can skip to a bigblock boundary but alloc_offset is at least
106 * bigblock=aligned so that's ok.
108 if (next_offset == rootmap->alloc_offset &&
109 ((next_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0 ||
110 layer1->phys_offset == HAMMER_BLOCKMAP_FREE)
112 KKASSERT((next_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0);
113 hammer_modify_buffer(buffer1, layer1, sizeof(*layer1));
114 bzero(layer1, sizeof(*layer1));
115 layer1->phys_offset = hammer_freemap_alloc(hmp, next_offset,
117 layer1->blocks_free = HAMMER_BLOCKMAP_RADIX2;
118 KKASSERT(*errorp == 0);
120 KKASSERT(layer1->phys_offset);
123 * If layer1 indicates no free blocks in layer2 and our alloc_offset
124 * is not in layer2, skip layer2 entirely.
126 if (layer1->blocks_free == 0 &&
127 ((next_offset ^ rootmap->alloc_offset) & ~HAMMER_BLOCKMAP_LAYER2_MASK) != 0) {
128 kprintf("blockmap skip1 %016llx\n", next_offset);
129 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2_MASK) &
130 ~HAMMER_BLOCKMAP_LAYER2_MASK;
131 if (next_offset >= hmp->zone_limits[zone]) {
132 kprintf("blockmap wrap1\n");
133 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
134 if (++loops == 2) { /* XXX poor-man's */
144 * Dive layer 2, each entry represents a large-block.
146 layer2_offset = layer1->phys_offset +
147 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
148 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
149 KKASSERT(*errorp == 0);
151 if ((next_offset & HAMMER_LARGEBLOCK_MASK64) == 0) {
153 * We are at the beginning of a new bigblock
155 if (next_offset == rootmap->alloc_offset ||
156 layer2->u.phys_offset == HAMMER_BLOCKMAP_FREE) {
158 * Allocate the bigblock in layer2 if diving into
159 * uninitialized space or if the block was previously
162 hammer_modify_buffer(buffer1, layer1, sizeof(*layer1));
163 KKASSERT(layer1->blocks_free);
164 --layer1->blocks_free;
165 hammer_modify_buffer(buffer2, layer2, sizeof(*layer2));
166 bzero(layer2, sizeof(*layer2));
167 layer2->u.phys_offset =
168 hammer_freemap_alloc(hmp, next_offset, errorp);
169 layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
170 KKASSERT(*errorp == 0);
171 } else if (layer2->bytes_free != HAMMER_LARGEBLOCK_SIZE) {
173 * We have encountered a block that is already
174 * partially allocated. We must skip this block.
176 kprintf("blockmap skip2 %016llx %d\n",
177 next_offset, layer2->bytes_free);
178 next_offset += HAMMER_LARGEBLOCK_SIZE;
179 if (next_offset >= hmp->zone_limits[zone]) {
180 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
181 kprintf("blockmap wrap2\n");
182 if (++loops == 2) { /* XXX poor-man's */
192 * We are appending within a bigblock.
194 KKASSERT(layer2->u.phys_offset != HAMMER_BLOCKMAP_FREE);
197 hammer_modify_buffer(buffer2, layer2, sizeof(*layer2));
198 layer2->bytes_free -= bytes;
199 KKASSERT(layer2->bytes_free >= 0);
202 * If the buffer was completely free we do not have to read it from
203 * disk, call hammer_bnew() to instantiate it.
205 if ((next_offset & HAMMER_BUFMASK) == 0) {
206 bigblock_offset = layer2->u.phys_offset +
207 (next_offset & HAMMER_LARGEBLOCK_MASK64);
208 hammer_bnew(hmp, bigblock_offset, errorp, &buffer3);
212 * Adjust our iterator and alloc_offset. The layer1 and layer2
213 * space beyond alloc_offset is uninitialized. alloc_offset must
214 * be big-block aligned.
216 hammer_modify_volume(root_volume, rootmap, sizeof(*rootmap));
217 rootmap->next_offset = next_offset + bytes;
218 if (rootmap->alloc_offset < rootmap->next_offset) {
219 rootmap->alloc_offset =
220 (rootmap->next_offset + HAMMER_LARGEBLOCK_MASK) &
221 ~HAMMER_LARGEBLOCK_MASK64;
225 hammer_rel_buffer(buffer1, 0);
227 hammer_rel_buffer(buffer2, 0);
229 hammer_rel_buffer(buffer3, 0);
230 hammer_rel_volume(root_volume, 0);
231 lockmgr(&hmp->blockmap_lock, LK_RELEASE);
236 * Free (offset,bytes) in a zone
239 hammer_blockmap_free(hammer_mount_t hmp, hammer_off_t bmap_off, int bytes)
241 hammer_volume_t root_volume;
242 hammer_blockmap_t rootmap;
243 struct hammer_blockmap_layer1 *layer1;
244 struct hammer_blockmap_layer2 *layer2;
245 hammer_buffer_t buffer1 = NULL;
246 hammer_buffer_t buffer2 = NULL;
247 hammer_off_t layer1_offset;
248 hammer_off_t layer2_offset;
252 bytes = (bytes + 7) & ~7;
253 KKASSERT(bytes <= HAMMER_BUFSIZE);
254 zone = HAMMER_ZONE_DECODE(bmap_off);
255 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
256 root_volume = hammer_get_root_volume(hmp, &error);
260 lockmgr(&hmp->blockmap_lock, LK_EXCLUSIVE|LK_RETRY);
262 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
263 KKASSERT(rootmap->phys_offset != 0);
264 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
265 HAMMER_ZONE_RAW_BUFFER_INDEX);
266 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
267 KKASSERT(((bmap_off ^ (bmap_off + (bytes - 1))) &
268 ~HAMMER_LARGEBLOCK_MASK64) == 0);
270 if (bmap_off >= rootmap->alloc_offset) {
271 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
272 bmap_off, rootmap->alloc_offset);
279 layer1_offset = rootmap->phys_offset +
280 HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off);
281 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
282 KKASSERT(error == 0);
283 KKASSERT(layer1->phys_offset);
286 * Dive layer 2, each entry represents a large-block.
288 layer2_offset = layer1->phys_offset +
289 HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off);
290 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
292 KKASSERT(error == 0);
293 KKASSERT(layer2->u.phys_offset);
294 hammer_modify_buffer(buffer2, layer2, sizeof(*layer2));
295 layer2->bytes_free += bytes;
296 KKASSERT(layer2->bytes_free <= HAMMER_LARGEBLOCK_SIZE);
299 * If the big-block is free, return it to the free pool. If our
300 * iterator is in the wholely free block, leave the block intact
301 * and reset the iterator.
303 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
304 if ((rootmap->next_offset ^ bmap_off) &
305 ~HAMMER_LARGEBLOCK_MASK64) {
306 hammer_freemap_free(hmp, layer2->u.phys_offset,
308 layer2->u.phys_offset = HAMMER_BLOCKMAP_FREE;
310 hammer_modify_buffer(buffer1, layer1, sizeof(*layer1));
311 ++layer1->blocks_free;
314 * XXX Not working yet - we aren't clearing it when
315 * reallocating the block later on.
317 if (layer1->blocks_free == HAMMER_BLOCKMAP_RADIX2) {
319 hmp, layer1->phys_offset,
320 bmap_off & ~HAMMER_BLOCKMAP_LAYER2_MASK,
322 layer1->phys_offset = HAMMER_BLOCKMAP_FREE;
326 hammer_modify_volume(root_volume, rootmap,
328 rootmap->next_offset &= ~HAMMER_LARGEBLOCK_MASK64;
332 lockmgr(&hmp->blockmap_lock, LK_RELEASE);
335 hammer_rel_buffer(buffer1, 0);
337 hammer_rel_buffer(buffer2, 0);
338 hammer_rel_volume(root_volume, 0);
342 * Lookup a blockmap offset.
345 hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t bmap_off, int *errorp)
347 hammer_volume_t root_volume;
348 hammer_blockmap_t rootmap;
349 struct hammer_blockmap_layer1 *layer1;
350 struct hammer_blockmap_layer2 *layer2;
351 hammer_buffer_t buffer = NULL;
352 hammer_off_t layer1_offset;
353 hammer_off_t layer2_offset;
354 hammer_off_t result_offset;
357 zone = HAMMER_ZONE_DECODE(bmap_off);
358 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
359 root_volume = hammer_get_root_volume(hmp, errorp);
362 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
363 KKASSERT(rootmap->phys_offset != 0);
364 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
365 HAMMER_ZONE_RAW_BUFFER_INDEX);
366 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
368 if (bmap_off >= rootmap->alloc_offset) {
369 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
370 bmap_off, rootmap->alloc_offset);
378 layer1_offset = rootmap->phys_offset +
379 HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off);
380 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
381 KKASSERT(*errorp == 0);
382 KKASSERT(layer1->phys_offset);
385 * Dive layer 2, each entry represents a large-block.
387 layer2_offset = layer1->phys_offset +
388 HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off);
389 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
391 KKASSERT(*errorp == 0);
392 KKASSERT(layer2->u.phys_offset);
394 result_offset = layer2->u.phys_offset +
395 (bmap_off & HAMMER_LARGEBLOCK_MASK64);
398 hammer_rel_buffer(buffer, 0);
399 hammer_rel_volume(root_volume, 0);
400 if (hammer_debug_general & 0x0800) {
401 kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n",
402 bmap_off, result_offset);
404 return(result_offset);