HAMMER 38C/Many: Undo/Synchronization and crash recovery
[dragonfly.git] / sys / vfs / hammer / hammer_blockmap.c
CommitLineData
40043e7f
MD
1/*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
10a5d1ba 34 * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.8 2008/04/25 21:49:49 dillon Exp $
40043e7f
MD
35 */
36
37/*
38 * HAMMER blockmap
39 */
40#include "hammer.h"
41
bf686dbe
MD
42static hammer_off_t hammer_find_hole(hammer_mount_t hmp,
43 hammer_holes_t holes, int bytes);
44static void hammer_add_hole(hammer_mount_t hmp, hammer_holes_t holes,
45 hammer_off_t offset, int bytes);
46
40043e7f
MD
47/*
48 * Allocate bytes from a zone
49 */
50hammer_off_t
36f82b23
MD
51hammer_blockmap_alloc(hammer_transaction_t trans, int zone,
52 int bytes, int *errorp)
40043e7f
MD
53{
54 hammer_volume_t root_volume;
c3be93f2
MD
55 hammer_blockmap_t rootmap;
56 struct hammer_blockmap_layer1 *layer1;
57 struct hammer_blockmap_layer2 *layer2;
f03c9cf4
MD
58 hammer_buffer_t buffer1 = NULL;
59 hammer_buffer_t buffer2 = NULL;
60 hammer_buffer_t buffer3 = NULL;
c3be93f2 61 hammer_off_t tmp_offset;
f03c9cf4 62 hammer_off_t next_offset;
c3be93f2
MD
63 hammer_off_t layer1_offset;
64 hammer_off_t layer2_offset;
65 hammer_off_t bigblock_offset;
f03c9cf4 66 int loops = 0;
bf686dbe
MD
67 int skip_amount;
68 int used_hole;
40043e7f
MD
69
70 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
36f82b23 71 root_volume = hammer_get_root_volume(trans->hmp, errorp);
40043e7f
MD
72 if (*errorp)
73 return(0);
74 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
75 KKASSERT(rootmap->phys_offset != 0);
76 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
77 HAMMER_ZONE_RAW_BUFFER_INDEX);
78 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
c3be93f2 79 KKASSERT(HAMMER_ZONE_DECODE(rootmap->next_offset) == zone);
40043e7f
MD
80
81 /*
82 * Deal with alignment and buffer-boundary issues.
83 *
84 * Be careful, certain primary alignments are used below to allocate
85 * new blockmap blocks.
86 */
87 bytes = (bytes + 7) & ~7;
bf686dbe 88 KKASSERT(bytes > 0 && bytes <= HAMMER_BUFSIZE);
40043e7f 89
36f82b23 90 lockmgr(&trans->hmp->blockmap_lock, LK_EXCLUSIVE|LK_RETRY);
bf686dbe
MD
91
92 /*
93 * Try to use a known-free hole, otherwise append.
94 */
36f82b23
MD
95 next_offset = hammer_find_hole(trans->hmp, &trans->hmp->holes[zone],
96 bytes);
bf686dbe
MD
97 if (next_offset == 0) {
98 next_offset = rootmap->next_offset;
99 used_hole = 0;
100 } else {
101 used_hole = 1;
102 }
f03c9cf4
MD
103
104again:
105 /*
bf686dbe 106 * The allocation request may not cross a buffer boundary.
f03c9cf4 107 */
bf686dbe
MD
108 tmp_offset = next_offset + bytes - 1;
109 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
110 skip_amount = HAMMER_BUFSIZE -
111 ((int)next_offset & HAMMER_BUFMASK);
36f82b23 112 hammer_add_hole(trans->hmp, &trans->hmp->holes[zone],
bf686dbe
MD
113 next_offset, skip_amount);
114 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
115 }
40043e7f
MD
116
117 /*
c3be93f2
MD
118 * Dive layer 1. If we are starting a new layer 1 entry,
119 * allocate a layer 2 block for it.
40043e7f 120 */
c3be93f2 121 layer1_offset = rootmap->phys_offset +
f03c9cf4 122 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
36f82b23 123 layer1 = hammer_bread(trans->hmp, layer1_offset, errorp, &buffer1);
40043e7f 124 KKASSERT(*errorp == 0);
eb3f8f1f 125 KKASSERT(next_offset <= rootmap->alloc_offset);
40043e7f 126
c3be93f2 127 /*
eb3f8f1f
MD
128 * Allocate layer2 backing store in layer1 if necessary. next_offset
129 * can skip to a bigblock boundary but alloc_offset is at least
130 * bigblock=aligned so that's ok.
c3be93f2 131 */
eb3f8f1f
MD
132 if (next_offset == rootmap->alloc_offset &&
133 ((next_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0 ||
134 layer1->phys_offset == HAMMER_BLOCKMAP_FREE)
c3be93f2 135 ) {
eb3f8f1f 136 KKASSERT((next_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0);
36f82b23 137 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
c3be93f2 138 bzero(layer1, sizeof(*layer1));
36f82b23
MD
139 layer1->phys_offset =
140 hammer_freemap_alloc(trans, next_offset, errorp);
f03c9cf4 141 layer1->blocks_free = HAMMER_BLOCKMAP_RADIX2;
10a5d1ba 142 hammer_modify_buffer_done(buffer1);
40043e7f 143 KKASSERT(*errorp == 0);
40043e7f 144 }
c3be93f2 145 KKASSERT(layer1->phys_offset);
40043e7f
MD
146
147 /*
f03c9cf4
MD
148 * If layer1 indicates no free blocks in layer2 and our alloc_offset
149 * is not in layer2, skip layer2 entirely.
40043e7f 150 */
f03c9cf4
MD
151 if (layer1->blocks_free == 0 &&
152 ((next_offset ^ rootmap->alloc_offset) & ~HAMMER_BLOCKMAP_LAYER2_MASK) != 0) {
153 kprintf("blockmap skip1 %016llx\n", next_offset);
154 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2_MASK) &
155 ~HAMMER_BLOCKMAP_LAYER2_MASK;
36f82b23 156 if (next_offset >= trans->hmp->zone_limits[zone]) {
f03c9cf4
MD
157 kprintf("blockmap wrap1\n");
158 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
159 if (++loops == 2) { /* XXX poor-man's */
160 next_offset = 0;
161 *errorp = ENOSPC;
162 goto done;
163 }
164 }
165 goto again;
166 }
40043e7f 167
c3be93f2 168 /*
f03c9cf4 169 * Dive layer 2, each entry represents a large-block.
c3be93f2 170 */
f03c9cf4
MD
171 layer2_offset = layer1->phys_offset +
172 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
36f82b23 173 layer2 = hammer_bread(trans->hmp, layer2_offset, errorp, &buffer2);
f03c9cf4
MD
174 KKASSERT(*errorp == 0);
175
176 if ((next_offset & HAMMER_LARGEBLOCK_MASK64) == 0) {
177 /*
178 * We are at the beginning of a new bigblock
179 */
180 if (next_offset == rootmap->alloc_offset ||
181 layer2->u.phys_offset == HAMMER_BLOCKMAP_FREE) {
182 /*
183 * Allocate the bigblock in layer2 if diving into
184 * uninitialized space or if the block was previously
185 * freed.
186 */
36f82b23
MD
187 hammer_modify_buffer(trans, buffer1,
188 layer1, sizeof(*layer1));
f03c9cf4
MD
189 KKASSERT(layer1->blocks_free);
190 --layer1->blocks_free;
10a5d1ba 191 hammer_modify_buffer_done(buffer1);
36f82b23
MD
192 hammer_modify_buffer(trans, buffer2,
193 layer2, sizeof(*layer2));
f03c9cf4
MD
194 bzero(layer2, sizeof(*layer2));
195 layer2->u.phys_offset =
36f82b23
MD
196 hammer_freemap_alloc(trans, next_offset,
197 errorp);
f03c9cf4 198 layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
10a5d1ba 199 hammer_modify_buffer_done(buffer2);
f03c9cf4
MD
200 KKASSERT(*errorp == 0);
201 } else if (layer2->bytes_free != HAMMER_LARGEBLOCK_SIZE) {
202 /*
203 * We have encountered a block that is already
204 * partially allocated. We must skip this block.
205 */
eb3f8f1f
MD
206 kprintf("blockmap skip2 %016llx %d\n",
207 next_offset, layer2->bytes_free);
f03c9cf4 208 next_offset += HAMMER_LARGEBLOCK_SIZE;
36f82b23 209 if (next_offset >= trans->hmp->zone_limits[zone]) {
f03c9cf4
MD
210 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
211 kprintf("blockmap wrap2\n");
212 if (++loops == 2) { /* XXX poor-man's */
213 next_offset = 0;
214 *errorp = ENOSPC;
215 goto done;
216 }
217 }
218 goto again;
219 }
220 } else {
221 /*
222 * We are appending within a bigblock.
223 */
224 KKASSERT(layer2->u.phys_offset != HAMMER_BLOCKMAP_FREE);
40043e7f
MD
225 }
226
36f82b23 227 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
c3be93f2 228 layer2->bytes_free -= bytes;
10a5d1ba 229 hammer_modify_buffer_done(buffer2);
eb3f8f1f 230 KKASSERT(layer2->bytes_free >= 0);
40043e7f
MD
231
232 /*
f03c9cf4
MD
233 * If the buffer was completely free we do not have to read it from
234 * disk, call hammer_bnew() to instantiate it.
40043e7f 235 */
f03c9cf4 236 if ((next_offset & HAMMER_BUFMASK) == 0) {
c3be93f2 237 bigblock_offset = layer2->u.phys_offset +
f03c9cf4 238 (next_offset & HAMMER_LARGEBLOCK_MASK64);
36f82b23 239 hammer_bnew(trans->hmp, bigblock_offset, errorp, &buffer3);
40043e7f
MD
240 }
241
c3be93f2 242 /*
eb3f8f1f
MD
243 * Adjust our iterator and alloc_offset. The layer1 and layer2
244 * space beyond alloc_offset is uninitialized. alloc_offset must
245 * be big-block aligned.
c3be93f2 246 */
bf686dbe 247 if (used_hole == 0) {
36f82b23
MD
248 hammer_modify_volume(trans, root_volume,
249 rootmap, sizeof(*rootmap));
bf686dbe
MD
250 rootmap->next_offset = next_offset + bytes;
251 if (rootmap->alloc_offset < rootmap->next_offset) {
252 rootmap->alloc_offset =
253 (rootmap->next_offset + HAMMER_LARGEBLOCK_MASK) &
254 ~HAMMER_LARGEBLOCK_MASK64;
255 }
10a5d1ba 256 hammer_modify_volume_done(root_volume);
eb3f8f1f 257 }
f03c9cf4
MD
258done:
259 if (buffer1)
260 hammer_rel_buffer(buffer1, 0);
261 if (buffer2)
262 hammer_rel_buffer(buffer2, 0);
263 if (buffer3)
264 hammer_rel_buffer(buffer3, 0);
40043e7f 265 hammer_rel_volume(root_volume, 0);
36f82b23 266 lockmgr(&trans->hmp->blockmap_lock, LK_RELEASE);
f03c9cf4 267 return(next_offset);
40043e7f
MD
268}
269
270/*
271 * Free (offset,bytes) in a zone
272 */
c3be93f2 273void
36f82b23
MD
274hammer_blockmap_free(hammer_transaction_t trans,
275 hammer_off_t bmap_off, int bytes)
40043e7f 276{
c3be93f2
MD
277 hammer_volume_t root_volume;
278 hammer_blockmap_t rootmap;
279 struct hammer_blockmap_layer1 *layer1;
280 struct hammer_blockmap_layer2 *layer2;
f03c9cf4
MD
281 hammer_buffer_t buffer1 = NULL;
282 hammer_buffer_t buffer2 = NULL;
c3be93f2
MD
283 hammer_off_t layer1_offset;
284 hammer_off_t layer2_offset;
285 int error;
286 int zone;
287
288 bytes = (bytes + 7) & ~7;
289 KKASSERT(bytes <= HAMMER_BUFSIZE);
290 zone = HAMMER_ZONE_DECODE(bmap_off);
291 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
36f82b23 292 root_volume = hammer_get_root_volume(trans->hmp, &error);
c3be93f2
MD
293 if (error)
294 return;
f03c9cf4 295
36f82b23 296 lockmgr(&trans->hmp->blockmap_lock, LK_EXCLUSIVE|LK_RETRY);
f03c9cf4 297
c3be93f2
MD
298 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
299 KKASSERT(rootmap->phys_offset != 0);
300 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
301 HAMMER_ZONE_RAW_BUFFER_INDEX);
302 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
303 KKASSERT(((bmap_off ^ (bmap_off + (bytes - 1))) &
304 ~HAMMER_LARGEBLOCK_MASK64) == 0);
305
306 if (bmap_off >= rootmap->alloc_offset) {
307 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
308 bmap_off, rootmap->alloc_offset);
309 goto done;
310 }
311
312 /*
313 * Dive layer 1.
314 */
315 layer1_offset = rootmap->phys_offset +
316 HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off);
36f82b23 317 layer1 = hammer_bread(trans->hmp, layer1_offset, &error, &buffer1);
c3be93f2
MD
318 KKASSERT(error == 0);
319 KKASSERT(layer1->phys_offset);
320
321 /*
322 * Dive layer 2, each entry represents a large-block.
323 */
324 layer2_offset = layer1->phys_offset +
325 HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off);
36f82b23 326 layer2 = hammer_bread(trans->hmp, layer2_offset, &error, &buffer2);
c3be93f2
MD
327
328 KKASSERT(error == 0);
329 KKASSERT(layer2->u.phys_offset);
36f82b23 330 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
c3be93f2 331 layer2->bytes_free += bytes;
eb3f8f1f 332 KKASSERT(layer2->bytes_free <= HAMMER_LARGEBLOCK_SIZE);
c3be93f2
MD
333
334 /*
335 * If the big-block is free, return it to the free pool. If our
336 * iterator is in the wholely free block, leave the block intact
337 * and reset the iterator.
338 */
339 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
340 if ((rootmap->next_offset ^ bmap_off) &
341 ~HAMMER_LARGEBLOCK_MASK64) {
36f82b23 342 hammer_freemap_free(trans, layer2->u.phys_offset,
c3be93f2
MD
343 bmap_off, &error);
344 layer2->u.phys_offset = HAMMER_BLOCKMAP_FREE;
f03c9cf4 345
36f82b23
MD
346 hammer_modify_buffer(trans, buffer1,
347 layer1, sizeof(*layer1));
f03c9cf4 348 ++layer1->blocks_free;
eb3f8f1f
MD
349#if 0
350 /*
351 * XXX Not working yet - we aren't clearing it when
352 * reallocating the block later on.
353 */
f03c9cf4
MD
354 if (layer1->blocks_free == HAMMER_BLOCKMAP_RADIX2) {
355 hammer_freemap_free(
36f82b23 356 trans, layer1->phys_offset,
f03c9cf4
MD
357 bmap_off & ~HAMMER_BLOCKMAP_LAYER2_MASK,
358 &error);
359 layer1->phys_offset = HAMMER_BLOCKMAP_FREE;
360 }
eb3f8f1f 361#endif
10a5d1ba 362 hammer_modify_buffer_done(buffer1);
c3be93f2 363 } else {
10a5d1ba
MD
364 /*
365 * Leave block intact and reset the iterator.
366 *
367 * XXX can't do this yet because if we allow data
368 * allocations they could overwrite deleted data
369 * that is still subject to an undo on reboot.
370 */
371#if 0
36f82b23
MD
372 hammer_modify_volume(trans, root_volume,
373 rootmap, sizeof(*rootmap));
c3be93f2 374 rootmap->next_offset &= ~HAMMER_LARGEBLOCK_MASK64;
10a5d1ba
MD
375 hammer_modify_volume_done(root_volume);
376#endif
c3be93f2
MD
377 }
378 }
10a5d1ba 379 hammer_modify_buffer_done(buffer2);
c3be93f2 380done:
36f82b23 381 lockmgr(&trans->hmp->blockmap_lock, LK_RELEASE);
f03c9cf4
MD
382
383 if (buffer1)
384 hammer_rel_buffer(buffer1, 0);
385 if (buffer2)
386 hammer_rel_buffer(buffer2, 0);
c3be93f2 387 hammer_rel_volume(root_volume, 0);
40043e7f
MD
388}
389
bf686dbe
MD
390/*
391 * Return the number of free bytes in the big-block containing the
392 * specified blockmap offset.
393 */
394int
395hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t bmap_off,
396 int *curp, int *errorp)
397{
398 hammer_volume_t root_volume;
399 hammer_blockmap_t rootmap;
400 struct hammer_blockmap_layer1 *layer1;
401 struct hammer_blockmap_layer2 *layer2;
402 hammer_buffer_t buffer = NULL;
403 hammer_off_t layer1_offset;
404 hammer_off_t layer2_offset;
405 int bytes;
406 int zone;
407
408 zone = HAMMER_ZONE_DECODE(bmap_off);
409 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
410 root_volume = hammer_get_root_volume(hmp, errorp);
411 if (*errorp) {
412 *curp = 0;
413 return(0);
414 }
415 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
416 KKASSERT(rootmap->phys_offset != 0);
417 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
418 HAMMER_ZONE_RAW_BUFFER_INDEX);
419 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
420
421 if (bmap_off >= rootmap->alloc_offset) {
422 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
423 bmap_off, rootmap->alloc_offset);
424 bytes = 0;
425 *curp = 0;
426 goto done;
427 }
428
429 /*
430 * Dive layer 1.
431 */
432 layer1_offset = rootmap->phys_offset +
433 HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off);
434 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
435 KKASSERT(*errorp == 0);
436 KKASSERT(layer1->phys_offset);
437
438 /*
439 * Dive layer 2, each entry represents a large-block.
440 */
441 layer2_offset = layer1->phys_offset +
442 HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off);
443 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
444
445 KKASSERT(*errorp == 0);
446 KKASSERT(layer2->u.phys_offset);
447
448 bytes = layer2->bytes_free;
449
450 if ((rootmap->next_offset ^ bmap_off) & ~HAMMER_LARGEBLOCK_MASK64)
451 *curp = 0;
452 else
453 *curp = 1;
454done:
455 if (buffer)
456 hammer_rel_buffer(buffer, 0);
457 hammer_rel_volume(root_volume, 0);
458 if (hammer_debug_general & 0x0800) {
459 kprintf("hammer_blockmap_getfree: %016llx -> %d\n",
460 bmap_off, bytes);
461 }
462 return(bytes);
463}
464
465
40043e7f
MD
466/*
467 * Lookup a blockmap offset.
468 */
469hammer_off_t
470hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t bmap_off, int *errorp)
471{
472 hammer_volume_t root_volume;
c3be93f2
MD
473 hammer_blockmap_t rootmap;
474 struct hammer_blockmap_layer1 *layer1;
475 struct hammer_blockmap_layer2 *layer2;
40043e7f 476 hammer_buffer_t buffer = NULL;
c3be93f2
MD
477 hammer_off_t layer1_offset;
478 hammer_off_t layer2_offset;
40043e7f
MD
479 hammer_off_t result_offset;
480 int zone;
40043e7f
MD
481
482 zone = HAMMER_ZONE_DECODE(bmap_off);
483 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
484 root_volume = hammer_get_root_volume(hmp, errorp);
485 if (*errorp)
486 return(0);
487 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
488 KKASSERT(rootmap->phys_offset != 0);
489 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
490 HAMMER_ZONE_RAW_BUFFER_INDEX);
491 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
492
493 if (bmap_off >= rootmap->alloc_offset) {
494 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
495 bmap_off, rootmap->alloc_offset);
496 result_offset = 0;
497 goto done;
498 }
499
500 /*
c3be93f2 501 * Dive layer 1.
40043e7f 502 */
c3be93f2
MD
503 layer1_offset = rootmap->phys_offset +
504 HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off);
505 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
40043e7f 506 KKASSERT(*errorp == 0);
c3be93f2 507 KKASSERT(layer1->phys_offset);
40043e7f
MD
508
509 /*
c3be93f2 510 * Dive layer 2, each entry represents a large-block.
40043e7f 511 */
c3be93f2
MD
512 layer2_offset = layer1->phys_offset +
513 HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off);
514 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
40043e7f 515
40043e7f 516 KKASSERT(*errorp == 0);
c3be93f2
MD
517 KKASSERT(layer2->u.phys_offset);
518
519 result_offset = layer2->u.phys_offset +
40043e7f
MD
520 (bmap_off & HAMMER_LARGEBLOCK_MASK64);
521done:
522 if (buffer)
523 hammer_rel_buffer(buffer, 0);
524 hammer_rel_volume(root_volume, 0);
525 if (hammer_debug_general & 0x0800) {
526 kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n",
527 bmap_off, result_offset);
528 }
529 return(result_offset);
530}
531
bf686dbe
MD
532/************************************************************************
533 * IN-CORE TRACKING OF ALLOCATION HOLES *
534 ************************************************************************
535 *
536 * This is a temporary shim in need of a more permanent solution.
537 *
538 * As we allocate space holes are created due to having to align to a new
539 * 16K buffer when an allocation would otherwise cross the buffer boundary.
540 * These holes are recorded here and used to fullfill smaller requests as
541 * much as possible. Only a limited number of holes are recorded and these
542 * functions operate somewhat like a heuristic, where information is allowed
543 * to be thrown away.
544 */
545
546void
547hammer_init_holes(hammer_mount_t hmp, hammer_holes_t holes)
548{
549 TAILQ_INIT(&holes->list);
550 holes->count = 0;
551}
552
553void
554hammer_free_holes(hammer_mount_t hmp, hammer_holes_t holes)
555{
556 hammer_hole_t hole;
557
558 while ((hole = TAILQ_FIRST(&holes->list)) != NULL) {
559 TAILQ_REMOVE(&holes->list, hole, entry);
560 kfree(hole, M_HAMMER);
561 }
562}
563
564/*
565 * Attempt to locate a hole with sufficient free space to accomodate the
566 * requested allocation. Return the offset or 0 if no hole could be found.
567 */
568static hammer_off_t
569hammer_find_hole(hammer_mount_t hmp, hammer_holes_t holes, int bytes)
570{
571 hammer_hole_t hole;
572 hammer_off_t result_off = 0;
573
574 TAILQ_FOREACH(hole, &holes->list, entry) {
575 if (bytes <= hole->bytes) {
576 result_off = hole->offset;
577 hole->offset += bytes;
578 hole->bytes -= bytes;
579 break;
580 }
581 }
582 return(result_off);
583}
584
585/*
586 * If a newly created hole is reasonably sized then record it. We only
587 * keep track of a limited number of holes. Lost holes are recovered by
588 * reblocking.
589 */
590static void
591hammer_add_hole(hammer_mount_t hmp, hammer_holes_t holes,
592 hammer_off_t offset, int bytes)
593{
594 hammer_hole_t hole;
595
596 if (bytes <= 128)
597 return;
598
599 if (holes->count < HAMMER_MAX_HOLES) {
600 hole = kmalloc(sizeof(*hole), M_HAMMER, M_WAITOK);
601 ++holes->count;
602 } else {
603 hole = TAILQ_FIRST(&holes->list);
604 TAILQ_REMOVE(&holes->list, hole, entry);
605 }
606 TAILQ_INSERT_TAIL(&holes->list, hole, entry);
607 hole->offset = offset;
608 hole->bytes = bytes;
609}
610