AMD64 - Fix many compile-time warnings. int/ptr type mismatches, %llx, etc.
[dragonfly.git] / sys / vfs / hammer / hammer_blockmap.c
CommitLineData
40043e7f
MD
1/*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
e469566b 34 * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.27 2008/07/31 22:30:33 dillon Exp $
40043e7f
MD
35 */
36
37/*
38 * HAMMER blockmap
39 */
40#include "hammer.h"
41
0832c9bb 42static int hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2);
362ec2dc 43static void hammer_reserve_setdelay_offset(hammer_mount_t hmp,
1ce12d35 44 hammer_off_t base_offset, int zone,
5e435c92 45 struct hammer_blockmap_layer2 *layer2);
362ec2dc 46static void hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv);
0832c9bb
MD
47
48/*
49 * Reserved big-blocks red-black tree support
50 */
51RB_GENERATE2(hammer_res_rb_tree, hammer_reserve, rb_node,
52 hammer_res_rb_compare, hammer_off_t, zone_offset);
53
54static int
55hammer_res_rb_compare(hammer_reserve_t res1, hammer_reserve_t res2)
56{
57 if (res1->zone_offset < res2->zone_offset)
58 return(-1);
59 if (res1->zone_offset > res2->zone_offset)
60 return(1);
61 return(0);
62}
bf686dbe 63
40043e7f
MD
64/*
65 * Allocate bytes from a zone
66 */
67hammer_off_t
df2ccbac
MD
68hammer_blockmap_alloc(hammer_transaction_t trans, int zone, int bytes,
69 hammer_off_t hint, int *errorp)
40043e7f 70{
0832c9bb 71 hammer_mount_t hmp;
40043e7f 72 hammer_volume_t root_volume;
cb51be26
MD
73 hammer_blockmap_t blockmap;
74 hammer_blockmap_t freemap;
0832c9bb 75 hammer_reserve_t resv;
c3be93f2
MD
76 struct hammer_blockmap_layer1 *layer1;
77 struct hammer_blockmap_layer2 *layer2;
f03c9cf4
MD
78 hammer_buffer_t buffer1 = NULL;
79 hammer_buffer_t buffer2 = NULL;
80 hammer_buffer_t buffer3 = NULL;
c3be93f2 81 hammer_off_t tmp_offset;
f03c9cf4 82 hammer_off_t next_offset;
0832c9bb 83 hammer_off_t result_offset;
c3be93f2
MD
84 hammer_off_t layer1_offset;
85 hammer_off_t layer2_offset;
cb51be26 86 hammer_off_t base_off;
f03c9cf4 87 int loops = 0;
df301614 88 int offset; /* offset within big-block */
df2ccbac 89 int use_hint;
40043e7f 90
0832c9bb 91 hmp = trans->hmp;
40043e7f
MD
92
93 /*
94 * Deal with alignment and buffer-boundary issues.
95 *
96 * Be careful, certain primary alignments are used below to allocate
97 * new blockmap blocks.
98 */
0832c9bb 99 bytes = (bytes + 15) & ~15;
4a2796f3 100 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
0832c9bb 101 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
bf686dbe
MD
102
103 /*
cb51be26 104 * Setup
bf686dbe 105 */
cb51be26
MD
106 root_volume = trans->rootvol;
107 *errorp = 0;
108 blockmap = &hmp->blockmap[zone];
109 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
110 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
111
df2ccbac
MD
112 /*
113 * Use the hint if we have one.
114 */
115 if (hint && HAMMER_ZONE_DECODE(hint) == zone) {
116 next_offset = (hint + 15) & ~(hammer_off_t)15;
117 use_hint = 1;
118 } else {
119 next_offset = blockmap->next_offset;
120 use_hint = 0;
121 }
cb51be26 122again:
df2ccbac
MD
123
124 /*
125 * use_hint is turned off if we leave the hinted big-block.
126 */
127 if (use_hint && ((next_offset ^ hint) & ~HAMMER_HINTBLOCK_MASK64)) {
128 next_offset = blockmap->next_offset;
129 use_hint = 0;
130 }
131
0832c9bb 132 /*
cb51be26 133 * Check for wrap
0832c9bb 134 */
4a2796f3 135 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
cb51be26
MD
136 if (++loops == 2) {
137 result_offset = 0;
138 *errorp = ENOSPC;
df301614 139 goto failed;
cb51be26
MD
140 }
141 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
142 }
0832c9bb 143
f03c9cf4 144 /*
4a2796f3
MD
145 * The allocation request may not cross a buffer boundary. Special
146 * large allocations must not cross a large-block boundary.
f03c9cf4 147 */
bf686dbe 148 tmp_offset = next_offset + bytes - 1;
4a2796f3
MD
149 if (bytes <= HAMMER_BUFSIZE) {
150 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
151 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
152 goto again;
153 }
154 } else {
155 if ((next_offset ^ tmp_offset) & ~HAMMER_LARGEBLOCK_MASK64) {
156 next_offset = tmp_offset & ~HAMMER_LARGEBLOCK_MASK64;
157 goto again;
158 }
bf686dbe 159 }
df301614 160 offset = (int)next_offset & HAMMER_LARGEBLOCK_MASK;
40043e7f
MD
161
162 /*
cb51be26 163 * Dive layer 1.
40043e7f 164 */
cb51be26 165 layer1_offset = freemap->phys_offset +
f03c9cf4 166 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
0832c9bb 167 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
cdb6e4e6
MD
168 if (*errorp) {
169 result_offset = 0;
170 goto failed;
171 }
19619882
MD
172
173 /*
cb51be26 174 * Check CRC.
c3be93f2 175 */
cb51be26 176 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
db9f9d7f
MD
177 hammer_lock_ex(&hmp->blkmap_lock);
178 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
179 panic("CRC FAILED: LAYER1");
180 hammer_unlock(&hmp->blkmap_lock);
40043e7f 181 }
40043e7f
MD
182
183 /*
cb51be26
MD
184 * If we are at a big-block boundary and layer1 indicates no
185 * free big-blocks, then we cannot allocate a new bigblock in
186 * layer2, skip to the next layer1 entry.
40043e7f 187 */
df301614 188 if (offset == 0 && layer1->blocks_free == 0) {
cb51be26 189 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
f03c9cf4 190 ~HAMMER_BLOCKMAP_LAYER2_MASK;
f03c9cf4
MD
191 goto again;
192 }
cb51be26 193 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
40043e7f 194
c3be93f2 195 /*
f03c9cf4 196 * Dive layer 2, each entry represents a large-block.
c3be93f2 197 */
f03c9cf4
MD
198 layer2_offset = layer1->phys_offset +
199 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
0832c9bb 200 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
cdb6e4e6
MD
201 if (*errorp) {
202 result_offset = 0;
203 goto failed;
204 }
f03c9cf4 205
19619882 206 /*
db9f9d7f
MD
207 * Check CRC. This can race another thread holding the lock
208 * and in the middle of modifying layer2.
19619882 209 */
cb51be26 210 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
db9f9d7f
MD
211 hammer_lock_ex(&hmp->blkmap_lock);
212 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
213 panic("CRC FAILED: LAYER2");
214 hammer_unlock(&hmp->blkmap_lock);
19619882
MD
215 }
216
cb51be26 217 /*
df301614 218 * Skip the layer if the zone is owned by someone other then us.
cb51be26 219 */
df301614
MD
220 if (layer2->zone && layer2->zone != zone) {
221 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
222 goto again;
223 }
224 if (offset < layer2->append_off) {
225 next_offset += layer2->append_off - offset;
4a2796f3
MD
226 goto again;
227 }
228
229 /*
df2ccbac
MD
230 * If operating in the current non-hint blockmap block, do not
231 * allow it to get over-full. Also drop any active hinting so
232 * blockmap->next_offset is updated at the end.
233 *
234 * We do this for B-Tree and meta-data allocations to provide
235 * localization for updates.
236 */
237 if ((zone == HAMMER_ZONE_BTREE_INDEX ||
238 zone == HAMMER_ZONE_META_INDEX) &&
239 offset >= HAMMER_LARGEBLOCK_OVERFILL &&
240 !((next_offset ^ blockmap->next_offset) & ~HAMMER_LARGEBLOCK_MASK64)
241 ) {
242 if (offset >= HAMMER_LARGEBLOCK_OVERFILL) {
243 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
244 use_hint = 0;
245 goto again;
246 }
247 }
248
249 /*
df301614
MD
250 * We need the lock from this point on. We have to re-check zone
251 * ownership after acquiring the lock and also check for reservations.
252 */
253 hammer_lock_ex(&hmp->blkmap_lock);
254
255 if (layer2->zone && layer2->zone != zone) {
256 hammer_unlock(&hmp->blkmap_lock);
257 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
258 goto again;
259 }
260 if (offset < layer2->append_off) {
261 hammer_unlock(&hmp->blkmap_lock);
262 next_offset += layer2->append_off - offset;
263 goto again;
264 }
265
266 /*
267 * The bigblock might be reserved by another zone. If it is reserved
268 * by our zone we may have to move next_offset past the append_off.
4a2796f3
MD
269 */
270 base_off = (next_offset &
271 (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) |
272 HAMMER_ZONE_RAW_BUFFER;
273 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
274 if (resv) {
4a2796f3 275 if (resv->zone != zone) {
df301614 276 hammer_unlock(&hmp->blkmap_lock);
cb51be26
MD
277 next_offset = (next_offset + HAMMER_LARGEBLOCK_SIZE) &
278 ~HAMMER_LARGEBLOCK_MASK64;
f03c9cf4
MD
279 goto again;
280 }
df301614
MD
281 if (offset < resv->append_off) {
282 hammer_unlock(&hmp->blkmap_lock);
283 next_offset += resv->append_off - offset;
284 goto again;
285 }
1ce12d35 286 ++resv->refs;
cb51be26
MD
287 }
288
4a2796f3
MD
289 /*
290 * Ok, we can allocate out of this layer2 big-block. Assume ownership
291 * of the layer for real. At this point we've validated any
292 * reservation that might exist and can just ignore resv.
293 */
cb51be26 294 if (layer2->zone == 0) {
f03c9cf4 295 /*
cb51be26 296 * Assign the bigblock to our zone
f03c9cf4 297 */
cb51be26
MD
298 hammer_modify_buffer(trans, buffer1,
299 layer1, sizeof(*layer1));
300 --layer1->blocks_free;
301 layer1->layer1_crc = crc32(layer1,
302 HAMMER_LAYER1_CRCSIZE);
303 hammer_modify_buffer_done(buffer1);
304 hammer_modify_buffer(trans, buffer2,
305 layer2, sizeof(*layer2));
306 layer2->zone = zone;
307 KKASSERT(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
308 KKASSERT(layer2->append_off == 0);
309 hammer_modify_volume_field(trans, trans->rootvol,
310 vol0_stat_freebigblocks);
311 --root_volume->ondisk->vol0_stat_freebigblocks;
312 hmp->copy_stat_freebigblocks =
313 root_volume->ondisk->vol0_stat_freebigblocks;
314 hammer_modify_volume_done(trans->rootvol);
cb51be26
MD
315 } else {
316 hammer_modify_buffer(trans, buffer2,
317 layer2, sizeof(*layer2));
40043e7f 318 }
cb51be26 319 KKASSERT(layer2->zone == zone);
40043e7f 320
c3be93f2 321 layer2->bytes_free -= bytes;
df301614
MD
322 KKASSERT(layer2->append_off <= offset);
323 layer2->append_off = offset + bytes;
19619882 324 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
10a5d1ba 325 hammer_modify_buffer_done(buffer2);
eb3f8f1f 326 KKASSERT(layer2->bytes_free >= 0);
40043e7f 327
1ce12d35
MD
328 /*
329 * We hold the blockmap lock and should be the only ones
330 * capable of modifying resv->append_off. Track the allocation
331 * as appropriate.
332 */
333 KKASSERT(bytes != 0);
df301614
MD
334 if (resv) {
335 KKASSERT(resv->append_off <= offset);
336 resv->append_off = offset + bytes;
5e435c92 337 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
1ce12d35 338 hammer_blockmap_reserve_complete(hmp, resv);
df301614
MD
339 }
340
40043e7f 341 /*
0832c9bb
MD
342 * If we are allocating from the base of a new buffer we can avoid
343 * a disk read by calling hammer_bnew().
40043e7f 344 */
f03c9cf4 345 if ((next_offset & HAMMER_BUFMASK) == 0) {
4a2796f3
MD
346 hammer_bnew_ext(trans->hmp, next_offset, bytes,
347 errorp, &buffer3);
40043e7f 348 }
0832c9bb 349 result_offset = next_offset;
40043e7f 350
c3be93f2 351 /*
df2ccbac
MD
352 * If we weren't supplied with a hint or could not use the hint
353 * then we wound up using blockmap->next_offset as the hint and
354 * need to save it.
c3be93f2 355 */
df2ccbac
MD
356 if (use_hint == 0) {
357 hammer_modify_volume(NULL, root_volume, NULL, 0);
358 blockmap->next_offset = next_offset + bytes;
359 hammer_modify_volume_done(root_volume);
360 }
d99d6bf5 361 hammer_unlock(&hmp->blkmap_lock);
df301614 362failed:
0832c9bb
MD
363
364 /*
365 * Cleanup
366 */
f03c9cf4
MD
367 if (buffer1)
368 hammer_rel_buffer(buffer1, 0);
369 if (buffer2)
370 hammer_rel_buffer(buffer2, 0);
371 if (buffer3)
372 hammer_rel_buffer(buffer3, 0);
0832c9bb
MD
373
374 return(result_offset);
40043e7f
MD
375}
376
377/*
4a2796f3 378 * Frontend function - Reserve bytes in a zone.
47637bff
MD
379 *
380 * This code reserves bytes out of a blockmap without committing to any
cb51be26
MD
381 * meta-data modifications, allowing the front-end to directly issue disk
382 * write I/O for large blocks of data
4a2796f3
MD
383 *
384 * The backend later finalizes the reservation with hammer_blockmap_finalize()
385 * upon committing the related record.
47637bff 386 */
0832c9bb
MD
387hammer_reserve_t
388hammer_blockmap_reserve(hammer_mount_t hmp, int zone, int bytes,
389 hammer_off_t *zone_offp, int *errorp)
47637bff
MD
390{
391 hammer_volume_t root_volume;
cb51be26
MD
392 hammer_blockmap_t blockmap;
393 hammer_blockmap_t freemap;
47637bff
MD
394 struct hammer_blockmap_layer1 *layer1;
395 struct hammer_blockmap_layer2 *layer2;
396 hammer_buffer_t buffer1 = NULL;
397 hammer_buffer_t buffer2 = NULL;
398 hammer_buffer_t buffer3 = NULL;
399 hammer_off_t tmp_offset;
400 hammer_off_t next_offset;
401 hammer_off_t layer1_offset;
402 hammer_off_t layer2_offset;
cb51be26 403 hammer_off_t base_off;
0832c9bb 404 hammer_reserve_t resv;
cb51be26 405 hammer_reserve_t resx;
47637bff 406 int loops = 0;
df301614 407 int offset;
47637bff 408
0832c9bb
MD
409 /*
410 * Setup
411 */
47637bff
MD
412 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
413 root_volume = hammer_get_root_volume(hmp, errorp);
414 if (*errorp)
0832c9bb 415 return(NULL);
cb51be26
MD
416 blockmap = &hmp->blockmap[zone];
417 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
418 KKASSERT(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
47637bff
MD
419
420 /*
421 * Deal with alignment and buffer-boundary issues.
422 *
423 * Be careful, certain primary alignments are used below to allocate
424 * new blockmap blocks.
425 */
0832c9bb 426 bytes = (bytes + 15) & ~15;
4a2796f3 427 KKASSERT(bytes > 0 && bytes <= HAMMER_XBUFSIZE);
47637bff 428
cb51be26 429 next_offset = blockmap->next_offset;
df301614 430again:
fce862c7 431 resv = NULL;
47637bff 432 /*
cb51be26 433 * Check for wrap
47637bff 434 */
4a2796f3 435 if (next_offset == HAMMER_ZONE_ENCODE(zone + 1, 0)) {
cb51be26 436 if (++loops == 2) {
47637bff 437 *errorp = ENOSPC;
df301614 438 goto failed;
47637bff
MD
439 }
440 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
441 }
442
443 /*
4a2796f3
MD
444 * The allocation request may not cross a buffer boundary. Special
445 * large allocations must not cross a large-block boundary.
47637bff
MD
446 */
447 tmp_offset = next_offset + bytes - 1;
4a2796f3
MD
448 if (bytes <= HAMMER_BUFSIZE) {
449 if ((next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64) {
450 next_offset = tmp_offset & ~HAMMER_BUFMASK64;
451 goto again;
452 }
453 } else {
454 if ((next_offset ^ tmp_offset) & ~HAMMER_LARGEBLOCK_MASK64) {
455 next_offset = tmp_offset & ~HAMMER_LARGEBLOCK_MASK64;
456 goto again;
457 }
47637bff 458 }
df301614 459 offset = (int)next_offset & HAMMER_LARGEBLOCK_MASK;
47637bff
MD
460
461 /*
462 * Dive layer 1.
463 */
cb51be26 464 layer1_offset = freemap->phys_offset +
47637bff
MD
465 HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
466 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
cdb6e4e6
MD
467 if (*errorp)
468 goto failed;
47637bff
MD
469
470 /*
cb51be26 471 * Check CRC.
47637bff
MD
472 */
473 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
db9f9d7f
MD
474 hammer_lock_ex(&hmp->blkmap_lock);
475 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
476 panic("CRC FAILED: LAYER1");
477 hammer_unlock(&hmp->blkmap_lock);
47637bff 478 }
47637bff
MD
479
480 /*
cb51be26
MD
481 * If we are at a big-block boundary and layer1 indicates no
482 * free big-blocks, then we cannot allocate a new bigblock in
483 * layer2, skip to the next layer1 entry.
47637bff 484 */
cb51be26
MD
485 if ((next_offset & HAMMER_LARGEBLOCK_MASK) == 0 &&
486 layer1->blocks_free == 0) {
487 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2) &
47637bff
MD
488 ~HAMMER_BLOCKMAP_LAYER2_MASK;
489 goto again;
490 }
cb51be26 491 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
47637bff
MD
492
493 /*
494 * Dive layer 2, each entry represents a large-block.
495 */
496 layer2_offset = layer1->phys_offset +
497 HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
498 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
cdb6e4e6
MD
499 if (*errorp)
500 goto failed;
47637bff
MD
501
502 /*
0832c9bb
MD
503 * Check CRC if not allocating into uninitialized space (which we
504 * aren't when reserving space).
47637bff
MD
505 */
506 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
db9f9d7f
MD
507 hammer_lock_ex(&hmp->blkmap_lock);
508 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
509 panic("CRC FAILED: LAYER2");
510 hammer_unlock(&hmp->blkmap_lock);
47637bff
MD
511 }
512
0832c9bb 513 /*
df301614 514 * Skip the layer if the zone is owned by someone other then us.
0832c9bb 515 */
df301614
MD
516 if (layer2->zone && layer2->zone != zone) {
517 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
518 goto again;
519 }
520 if (offset < layer2->append_off) {
521 next_offset += layer2->append_off - offset;
4a2796f3
MD
522 goto again;
523 }
524
525 /*
df301614
MD
526 * We need the lock from this point on. We have to re-check zone
527 * ownership after acquiring the lock and also check for reservations.
528 */
529 hammer_lock_ex(&hmp->blkmap_lock);
530
531 if (layer2->zone && layer2->zone != zone) {
532 hammer_unlock(&hmp->blkmap_lock);
533 next_offset += (HAMMER_LARGEBLOCK_SIZE - offset);
534 goto again;
535 }
536 if (offset < layer2->append_off) {
537 hammer_unlock(&hmp->blkmap_lock);
538 next_offset += layer2->append_off - offset;
539 goto again;
540 }
541
542 /*
543 * The bigblock might be reserved by another zone. If it is reserved
544 * by our zone we may have to move next_offset past the append_off.
4a2796f3
MD
545 */
546 base_off = (next_offset &
df301614 547 (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) |
4a2796f3
MD
548 HAMMER_ZONE_RAW_BUFFER;
549 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_off);
550 if (resv) {
4a2796f3 551 if (resv->zone != zone) {
df301614 552 hammer_unlock(&hmp->blkmap_lock);
4a2796f3
MD
553 next_offset = (next_offset + HAMMER_LARGEBLOCK_SIZE) &
554 ~HAMMER_LARGEBLOCK_MASK64;
555 goto again;
556 }
df301614
MD
557 if (offset < resv->append_off) {
558 hammer_unlock(&hmp->blkmap_lock);
559 next_offset += resv->append_off - offset;
560 goto again;
561 }
cb51be26 562 ++resv->refs;
df301614 563 resx = NULL;
cb51be26 564 } else {
bac808fe 565 resx = kmalloc(sizeof(*resv), hmp->m_misc,
df301614
MD
566 M_WAITOK | M_ZERO | M_USE_RESERVE);
567 resx->refs = 1;
568 resx->zone = zone;
569 resx->zone_offset = base_off;
5e435c92
MD
570 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
571 resx->flags |= HAMMER_RESF_LAYER2FREE;
df301614
MD
572 resv = RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resx);
573 KKASSERT(resv == NULL);
574 resv = resx;
a7e9bef1 575 ++hammer_count_reservations;
cb51be26 576 }
df301614 577 resv->append_off = offset + bytes;
cb51be26
MD
578
579 /*
0832c9bb
MD
580 * If we are not reserving a whole buffer but are at the start of
581 * a new block, call hammer_bnew() to avoid a disk read.
582 *
4a2796f3
MD
583 * If we are reserving a whole buffer (or more), the caller will
584 * probably use a direct read, so do nothing.
47637bff 585 */
0832c9bb
MD
586 if (bytes < HAMMER_BUFSIZE && (next_offset & HAMMER_BUFMASK) == 0) {
587 hammer_bnew(hmp, next_offset, errorp, &buffer3);
588 }
589
47637bff
MD
590 /*
591 * Adjust our iterator and alloc_offset. The layer1 and layer2
592 * space beyond alloc_offset is uninitialized. alloc_offset must
593 * be big-block aligned.
594 */
df301614
MD
595 blockmap->next_offset = next_offset + bytes;
596 hammer_unlock(&hmp->blkmap_lock);
0832c9bb 597
df301614 598failed:
47637bff
MD
599 if (buffer1)
600 hammer_rel_buffer(buffer1, 0);
601 if (buffer2)
602 hammer_rel_buffer(buffer2, 0);
603 if (buffer3)
604 hammer_rel_buffer(buffer3, 0);
605 hammer_rel_volume(root_volume, 0);
0832c9bb
MD
606 *zone_offp = next_offset;
607
608 return(resv);
609}
610
1b0ab2c3 611/*
5e435c92
MD
612 * Dereference a reservation structure. Upon the final release the
613 * underlying big-block is checked and if it is entirely free we delete
614 * any related HAMMER buffers to avoid potential conflicts with future
615 * reuse of the big-block.
0832c9bb
MD
616 */
617void
618hammer_blockmap_reserve_complete(hammer_mount_t hmp, hammer_reserve_t resv)
619{
5e435c92 620 hammer_off_t base_offset;
362ec2dc 621 int error;
1b0ab2c3 622
0832c9bb 623 KKASSERT(resv->refs > 0);
5e435c92
MD
624 KKASSERT((resv->zone_offset & HAMMER_OFF_ZONE_MASK) ==
625 HAMMER_ZONE_RAW_BUFFER);
626
627 /*
628 * Setting append_off to the max prevents any new allocations
629 * from occuring while we are trying to dispose of the reservation,
630 * allowing us to safely delete any related HAMMER buffers.
362ec2dc
MD
631 *
632 * If we are unable to clean out all related HAMMER buffers we
633 * requeue the delay.
5e435c92
MD
634 */
635 if (resv->refs == 1 && (resv->flags & HAMMER_RESF_LAYER2FREE)) {
636 resv->append_off = HAMMER_LARGEBLOCK_SIZE;
1ce12d35
MD
637 base_offset = resv->zone_offset & ~HAMMER_OFF_ZONE_MASK;
638 base_offset = HAMMER_ZONE_ENCODE(resv->zone, base_offset);
362ec2dc
MD
639 error = hammer_del_buffers(hmp, base_offset,
640 resv->zone_offset,
641 HAMMER_LARGEBLOCK_SIZE,
642 0);
643 if (error)
644 hammer_reserve_setdelay(hmp, resv);
5e435c92 645 }
0832c9bb 646 if (--resv->refs == 0) {
cb51be26 647 KKASSERT((resv->flags & HAMMER_RESF_ONDELAY) == 0);
0832c9bb 648 RB_REMOVE(hammer_res_rb_tree, &hmp->rb_resv_root, resv);
bac808fe 649 kfree(resv, hmp->m_misc);
0832c9bb
MD
650 --hammer_count_reservations;
651 }
47637bff
MD
652}
653
654/*
5e435c92
MD
655 * Prevent a potentially free big-block from being reused until after
656 * the related flushes have completely cycled, otherwise crash recovery
657 * could resurrect a data block that was already reused and overwritten.
658 *
1ce12d35
MD
659 * The caller might reset the underlying layer2 entry's append_off to 0, so
660 * our covering append_off must be set to max to prevent any reallocation
661 * until after the flush delays complete, not to mention proper invalidation
662 * of any underlying cached blocks.
cb51be26 663 */
5e435c92 664static void
362ec2dc 665hammer_reserve_setdelay_offset(hammer_mount_t hmp, hammer_off_t base_offset,
1ce12d35 666 int zone, struct hammer_blockmap_layer2 *layer2)
cb51be26 667{
5e435c92 668 hammer_reserve_t resv;
df301614 669
5e435c92
MD
670 /*
671 * Allocate the reservation if necessary.
1ce12d35
MD
672 *
673 * NOTE: need lock in future around resv lookup/allocation and
674 * the setdelay call, currently refs is not bumped until the call.
5e435c92
MD
675 */
676again:
677 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root, base_offset);
cb51be26 678 if (resv == NULL) {
bac808fe 679 resv = kmalloc(sizeof(*resv), hmp->m_misc,
df301614 680 M_WAITOK | M_ZERO | M_USE_RESERVE);
1ce12d35 681 resv->zone = zone;
5e435c92
MD
682 resv->zone_offset = base_offset;
683 resv->refs = 0;
1ce12d35
MD
684 resv->append_off = HAMMER_LARGEBLOCK_SIZE;
685
5e435c92
MD
686 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
687 resv->flags |= HAMMER_RESF_LAYER2FREE;
df301614 688 if (RB_INSERT(hammer_res_rb_tree, &hmp->rb_resv_root, resv)) {
bac808fe 689 kfree(resv, hmp->m_misc);
5e435c92 690 goto again;
df301614 691 }
5e435c92 692 ++hammer_count_reservations;
1ce12d35
MD
693 } else {
694 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
695 resv->flags |= HAMMER_RESF_LAYER2FREE;
5e435c92 696 }
1ce12d35 697 hammer_reserve_setdelay(hmp, resv);
362ec2dc 698}
5e435c92 699
362ec2dc
MD
700/*
701 * Enter the reservation on the on-delay list, or move it if it
702 * is already on the list.
703 */
704static void
705hammer_reserve_setdelay(hammer_mount_t hmp, hammer_reserve_t resv)
706{
5e435c92 707 if (resv->flags & HAMMER_RESF_ONDELAY) {
cb51be26
MD
708 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
709 resv->flush_group = hmp->flusher.next + 1;
5e435c92 710 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
cb51be26 711 } else {
5e435c92 712 ++resv->refs;
a7e9bef1 713 ++hmp->rsv_fromdelay;
df301614
MD
714 resv->flags |= HAMMER_RESF_ONDELAY;
715 resv->flush_group = hmp->flusher.next + 1;
716 TAILQ_INSERT_TAIL(&hmp->delay_list, resv, delay_entry);
717 }
cb51be26
MD
718}
719
720void
721hammer_reserve_clrdelay(hammer_mount_t hmp, hammer_reserve_t resv)
722{
723 KKASSERT(resv->flags & HAMMER_RESF_ONDELAY);
724 resv->flags &= ~HAMMER_RESF_ONDELAY;
725 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
a7e9bef1 726 --hmp->rsv_fromdelay;
cb51be26
MD
727 hammer_blockmap_reserve_complete(hmp, resv);
728}
729
cb51be26 730/*
4a2796f3 731 * Backend function - free (offset, bytes) in a zone.
cdb6e4e6
MD
732 *
733 * XXX error return
40043e7f 734 */
c3be93f2 735void
36f82b23 736hammer_blockmap_free(hammer_transaction_t trans,
cb51be26 737 hammer_off_t zone_offset, int bytes)
40043e7f 738{
0832c9bb 739 hammer_mount_t hmp;
c3be93f2 740 hammer_volume_t root_volume;
cb51be26
MD
741 hammer_blockmap_t blockmap;
742 hammer_blockmap_t freemap;
c3be93f2
MD
743 struct hammer_blockmap_layer1 *layer1;
744 struct hammer_blockmap_layer2 *layer2;
f03c9cf4
MD
745 hammer_buffer_t buffer1 = NULL;
746 hammer_buffer_t buffer2 = NULL;
c3be93f2
MD
747 hammer_off_t layer1_offset;
748 hammer_off_t layer2_offset;
cb51be26 749 hammer_off_t base_off;
c3be93f2
MD
750 int error;
751 int zone;
752
cb51be26
MD
753 if (bytes == 0)
754 return;
0832c9bb
MD
755 hmp = trans->hmp;
756
cb51be26
MD
757 /*
758 * Alignment
759 */
4a2796f3
MD
760 bytes = (bytes + 15) & ~15;
761 KKASSERT(bytes <= HAMMER_XBUFSIZE);
762 KKASSERT(((zone_offset ^ (zone_offset + (bytes - 1))) &
763 ~HAMMER_LARGEBLOCK_MASK64) == 0);
f03c9cf4 764
cb51be26
MD
765 /*
766 * Basic zone validation & locking
767 */
768 zone = HAMMER_ZONE_DECODE(zone_offset);
769 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
770 root_volume = trans->rootvol;
771 error = 0;
f03c9cf4 772
cb51be26
MD
773 blockmap = &hmp->blockmap[zone];
774 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
c3be93f2
MD
775
776 /*
777 * Dive layer 1.
778 */
cb51be26
MD
779 layer1_offset = freemap->phys_offset +
780 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
0832c9bb 781 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
cdb6e4e6
MD
782 if (error)
783 goto failed;
cb51be26
MD
784 KKASSERT(layer1->phys_offset &&
785 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
19619882 786 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
db9f9d7f
MD
787 hammer_lock_ex(&hmp->blkmap_lock);
788 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
789 panic("CRC FAILED: LAYER1");
790 hammer_unlock(&hmp->blkmap_lock);
19619882 791 }
c3be93f2
MD
792
793 /*
794 * Dive layer 2, each entry represents a large-block.
795 */
796 layer2_offset = layer1->phys_offset +
cb51be26 797 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
0832c9bb 798 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
cdb6e4e6
MD
799 if (error)
800 goto failed;
19619882 801 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
db9f9d7f
MD
802 hammer_lock_ex(&hmp->blkmap_lock);
803 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
804 panic("CRC FAILED: LAYER2");
805 hammer_unlock(&hmp->blkmap_lock);
19619882
MD
806 }
807
df301614
MD
808 hammer_lock_ex(&hmp->blkmap_lock);
809
36f82b23 810 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
4a2796f3
MD
811
812 /*
5e435c92 813 * Free space previously allocated via blockmap_alloc().
4a2796f3
MD
814 */
815 KKASSERT(layer2->zone == zone);
816 layer2->bytes_free += bytes;
817 KKASSERT(layer2->bytes_free <= HAMMER_LARGEBLOCK_SIZE);
5e435c92
MD
818
819 /*
820 * If a big-block becomes entirely free we must create a covering
821 * reservation to prevent premature reuse. Note, however, that
822 * the big-block and/or reservation may still have an append_off
823 * that allows further (non-reused) allocations.
824 *
825 * Once the reservation has been made we re-check layer2 and if
826 * the big-block is still entirely free we reset the layer2 entry.
827 * The reservation will prevent premature reuse.
828 *
829 * NOTE: hammer_buffer's are only invalidated when the reservation
830 * is completed, if the layer2 entry is still completely free at
831 * that time. Any allocations from the reservation that may have
832 * occured in the mean time, or active references on the reservation
833 * from new pending allocations, will prevent the invalidation from
834 * occuring.
835 */
4a2796f3
MD
836 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
837 base_off = (zone_offset & (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | HAMMER_ZONE_RAW_BUFFER;
5e435c92 838
1ce12d35 839 hammer_reserve_setdelay_offset(hmp, base_off, zone, layer2);
5e435c92 840 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
4a2796f3
MD
841 layer2->zone = 0;
842 layer2->append_off = 0;
36f82b23
MD
843 hammer_modify_buffer(trans, buffer1,
844 layer1, sizeof(*layer1));
4a2796f3 845 ++layer1->blocks_free;
19619882
MD
846 layer1->layer1_crc = crc32(layer1,
847 HAMMER_LAYER1_CRCSIZE);
10a5d1ba 848 hammer_modify_buffer_done(buffer1);
cb51be26
MD
849 hammer_modify_volume_field(trans,
850 trans->rootvol,
851 vol0_stat_freebigblocks);
4a2796f3 852 ++root_volume->ondisk->vol0_stat_freebigblocks;
cb51be26
MD
853 hmp->copy_stat_freebigblocks =
854 root_volume->ondisk->vol0_stat_freebigblocks;
855 hammer_modify_volume_done(trans->rootvol);
c3be93f2
MD
856 }
857 }
4a2796f3
MD
858 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
859 hammer_modify_buffer_done(buffer2);
860 hammer_unlock(&hmp->blkmap_lock);
861
cdb6e4e6 862failed:
4a2796f3
MD
863 if (buffer1)
864 hammer_rel_buffer(buffer1, 0);
865 if (buffer2)
866 hammer_rel_buffer(buffer2, 0);
867}
868
869/*
870 * Backend function - finalize (offset, bytes) in a zone.
871 *
872 * Allocate space that was previously reserved by the frontend.
873 */
cdb6e4e6 874int
4a2796f3 875hammer_blockmap_finalize(hammer_transaction_t trans,
5e435c92 876 hammer_reserve_t resv,
4a2796f3
MD
877 hammer_off_t zone_offset, int bytes)
878{
879 hammer_mount_t hmp;
880 hammer_volume_t root_volume;
881 hammer_blockmap_t blockmap;
882 hammer_blockmap_t freemap;
883 struct hammer_blockmap_layer1 *layer1;
884 struct hammer_blockmap_layer2 *layer2;
885 hammer_buffer_t buffer1 = NULL;
886 hammer_buffer_t buffer2 = NULL;
887 hammer_off_t layer1_offset;
888 hammer_off_t layer2_offset;
889 int error;
890 int zone;
df301614 891 int offset;
4a2796f3
MD
892
893 if (bytes == 0)
cdb6e4e6 894 return(0);
4a2796f3
MD
895 hmp = trans->hmp;
896
897 /*
898 * Alignment
899 */
900 bytes = (bytes + 15) & ~15;
901 KKASSERT(bytes <= HAMMER_XBUFSIZE);
902
903 /*
904 * Basic zone validation & locking
905 */
906 zone = HAMMER_ZONE_DECODE(zone_offset);
907 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
908 root_volume = trans->rootvol;
909 error = 0;
4a2796f3
MD
910
911 blockmap = &hmp->blockmap[zone];
912 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
913
914 /*
915 * Dive layer 1.
916 */
917 layer1_offset = freemap->phys_offset +
918 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
919 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
cdb6e4e6
MD
920 if (error)
921 goto failed;
4a2796f3
MD
922 KKASSERT(layer1->phys_offset &&
923 layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
924 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
db9f9d7f
MD
925 hammer_lock_ex(&hmp->blkmap_lock);
926 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
927 panic("CRC FAILED: LAYER1");
928 hammer_unlock(&hmp->blkmap_lock);
4a2796f3
MD
929 }
930
931 /*
932 * Dive layer 2, each entry represents a large-block.
933 */
934 layer2_offset = layer1->phys_offset +
935 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
936 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
cdb6e4e6
MD
937 if (error)
938 goto failed;
4a2796f3 939 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
db9f9d7f
MD
940 hammer_lock_ex(&hmp->blkmap_lock);
941 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
942 panic("CRC FAILED: LAYER2");
943 hammer_unlock(&hmp->blkmap_lock);
4a2796f3
MD
944 }
945
df301614
MD
946 hammer_lock_ex(&hmp->blkmap_lock);
947
4a2796f3
MD
948 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
949
950 /*
951 * Finalize some or all of the space covered by a current
952 * reservation. An allocation in the same layer may have
953 * already assigned ownership.
954 */
955 if (layer2->zone == 0) {
956 hammer_modify_buffer(trans, buffer1,
957 layer1, sizeof(*layer1));
958 --layer1->blocks_free;
959 layer1->layer1_crc = crc32(layer1,
960 HAMMER_LAYER1_CRCSIZE);
961 hammer_modify_buffer_done(buffer1);
962 layer2->zone = zone;
963 KKASSERT(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
964 KKASSERT(layer2->append_off == 0);
965 hammer_modify_volume_field(trans,
966 trans->rootvol,
967 vol0_stat_freebigblocks);
968 --root_volume->ondisk->vol0_stat_freebigblocks;
969 hmp->copy_stat_freebigblocks =
970 root_volume->ondisk->vol0_stat_freebigblocks;
971 hammer_modify_volume_done(trans->rootvol);
972 }
973 if (layer2->zone != zone)
974 kprintf("layer2 zone mismatch %d %d\n", layer2->zone, zone);
975 KKASSERT(layer2->zone == zone);
1ce12d35 976 KKASSERT(bytes != 0);
4a2796f3 977 layer2->bytes_free -= bytes;
5e435c92
MD
978 if (resv)
979 resv->flags &= ~HAMMER_RESF_LAYER2FREE;
4a2796f3
MD
980
981 /*
982 * Finalizations can occur out of order, or combined with allocations.
983 * append_off must be set to the highest allocated offset.
984 */
df301614
MD
985 offset = ((int)zone_offset & HAMMER_LARGEBLOCK_MASK) + bytes;
986 if (layer2->append_off < offset)
987 layer2->append_off = offset;
4a2796f3 988
19619882 989 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
10a5d1ba 990 hammer_modify_buffer_done(buffer2);
d99d6bf5 991 hammer_unlock(&hmp->blkmap_lock);
f03c9cf4 992
cdb6e4e6 993failed:
f03c9cf4
MD
994 if (buffer1)
995 hammer_rel_buffer(buffer1, 0);
996 if (buffer2)
997 hammer_rel_buffer(buffer2, 0);
cdb6e4e6 998 return(error);
40043e7f
MD
999}
1000
1001/*
bf686dbe
MD
1002 * Return the number of free bytes in the big-block containing the
1003 * specified blockmap offset.
1004 */
1005int
cb51be26 1006hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t zone_offset,
bf686dbe
MD
1007 int *curp, int *errorp)
1008{
1009 hammer_volume_t root_volume;
cb51be26
MD
1010 hammer_blockmap_t blockmap;
1011 hammer_blockmap_t freemap;
bf686dbe
MD
1012 struct hammer_blockmap_layer1 *layer1;
1013 struct hammer_blockmap_layer2 *layer2;
1014 hammer_buffer_t buffer = NULL;
1015 hammer_off_t layer1_offset;
1016 hammer_off_t layer2_offset;
1017 int bytes;
1018 int zone;
1019
cb51be26 1020 zone = HAMMER_ZONE_DECODE(zone_offset);
bf686dbe
MD
1021 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
1022 root_volume = hammer_get_root_volume(hmp, errorp);
1023 if (*errorp) {
1024 *curp = 0;
1025 return(0);
1026 }
cb51be26
MD
1027 blockmap = &hmp->blockmap[zone];
1028 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
bf686dbe
MD
1029
1030 /*
1031 * Dive layer 1.
1032 */
cb51be26
MD
1033 layer1_offset = freemap->phys_offset +
1034 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
bf686dbe 1035 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
cdb6e4e6
MD
1036 if (*errorp) {
1037 bytes = 0;
1038 goto failed;
1039 }
bf686dbe 1040 KKASSERT(layer1->phys_offset);
19619882 1041 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
db9f9d7f
MD
1042 hammer_lock_ex(&hmp->blkmap_lock);
1043 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1044 panic("CRC FAILED: LAYER1");
1045 hammer_unlock(&hmp->blkmap_lock);
19619882 1046 }
bf686dbe
MD
1047
1048 /*
1049 * Dive layer 2, each entry represents a large-block.
cdb6e4e6
MD
1050 *
1051 * (reuse buffer, layer1 pointer becomes invalid)
bf686dbe
MD
1052 */
1053 layer2_offset = layer1->phys_offset +
cb51be26 1054 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
bf686dbe 1055 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
cdb6e4e6
MD
1056 if (*errorp) {
1057 bytes = 0;
1058 goto failed;
1059 }
19619882 1060 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
db9f9d7f
MD
1061 hammer_lock_ex(&hmp->blkmap_lock);
1062 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1063 panic("CRC FAILED: LAYER2");
1064 hammer_unlock(&hmp->blkmap_lock);
19619882 1065 }
cb51be26 1066 KKASSERT(layer2->zone == zone);
bf686dbe
MD
1067
1068 bytes = layer2->bytes_free;
1069
cb51be26 1070 if ((blockmap->next_offset ^ zone_offset) & ~HAMMER_LARGEBLOCK_MASK64)
bf686dbe
MD
1071 *curp = 0;
1072 else
1073 *curp = 1;
cdb6e4e6 1074failed:
bf686dbe
MD
1075 if (buffer)
1076 hammer_rel_buffer(buffer, 0);
1077 hammer_rel_volume(root_volume, 0);
1078 if (hammer_debug_general & 0x0800) {
1079 kprintf("hammer_blockmap_getfree: %016llx -> %d\n",
973c11b9 1080 (long long)zone_offset, bytes);
bf686dbe
MD
1081 }
1082 return(bytes);
1083}
1084
1085
1086/*
40043e7f
MD
1087 * Lookup a blockmap offset.
1088 */
1089hammer_off_t
cb51be26
MD
1090hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t zone_offset,
1091 int *errorp)
40043e7f
MD
1092{
1093 hammer_volume_t root_volume;
cb51be26 1094 hammer_blockmap_t freemap;
c3be93f2
MD
1095 struct hammer_blockmap_layer1 *layer1;
1096 struct hammer_blockmap_layer2 *layer2;
40043e7f 1097 hammer_buffer_t buffer = NULL;
c3be93f2
MD
1098 hammer_off_t layer1_offset;
1099 hammer_off_t layer2_offset;
40043e7f 1100 hammer_off_t result_offset;
cb51be26
MD
1101 hammer_off_t base_off;
1102 hammer_reserve_t resv;
40043e7f 1103 int zone;
40043e7f 1104
cb51be26
MD
1105 /*
1106 * Calculate the zone-2 offset.
1107 */
1108 zone = HAMMER_ZONE_DECODE(zone_offset);
40043e7f 1109 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
cb51be26
MD
1110
1111 result_offset = (zone_offset & ~HAMMER_OFF_ZONE_MASK) |
1112 HAMMER_ZONE_RAW_BUFFER;
1113
1114 /*
1115 * We can actually stop here, normal blockmaps are now direct-mapped
1116 * onto the freemap and so represent zone-2 addresses.
1117 */
1118 if (hammer_verify_zone == 0) {
1119 *errorp = 0;
1120 return(result_offset);
1121 }
1122
1123 /*
1124 * Validate the allocation zone
1125 */
40043e7f
MD
1126 root_volume = hammer_get_root_volume(hmp, errorp);
1127 if (*errorp)
1128 return(0);
cb51be26
MD
1129 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
1130 KKASSERT(freemap->phys_offset != 0);
40043e7f
MD
1131
1132 /*
c3be93f2 1133 * Dive layer 1.
40043e7f 1134 */
cb51be26
MD
1135 layer1_offset = freemap->phys_offset +
1136 HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_offset);
c3be93f2 1137 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
cdb6e4e6
MD
1138 if (*errorp)
1139 goto failed;
cb51be26 1140 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
19619882 1141 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE)) {
db9f9d7f
MD
1142 hammer_lock_ex(&hmp->blkmap_lock);
1143 if (layer1->layer1_crc != crc32(layer1, HAMMER_LAYER1_CRCSIZE))
1144 panic("CRC FAILED: LAYER1");
1145 hammer_unlock(&hmp->blkmap_lock);
19619882 1146 }
40043e7f
MD
1147
1148 /*
c3be93f2 1149 * Dive layer 2, each entry represents a large-block.
40043e7f 1150 */
c3be93f2 1151 layer2_offset = layer1->phys_offset +
cb51be26 1152 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_offset);
c3be93f2 1153 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
40043e7f 1154
cdb6e4e6
MD
1155 if (*errorp)
1156 goto failed;
cb51be26
MD
1157 if (layer2->zone == 0) {
1158 base_off = (zone_offset & (~HAMMER_LARGEBLOCK_MASK64 & ~HAMMER_OFF_ZONE_MASK)) | HAMMER_ZONE_RAW_BUFFER;
1159 resv = RB_LOOKUP(hammer_res_rb_tree, &hmp->rb_resv_root,
1160 base_off);
1161 KKASSERT(resv && resv->zone == zone);
1162
1163 } else if (layer2->zone != zone) {
1164 panic("hammer_blockmap_lookup: bad zone %d/%d\n",
1165 layer2->zone, zone);
1166 }
19619882 1167 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE)) {
db9f9d7f
MD
1168 hammer_lock_ex(&hmp->blkmap_lock);
1169 if (layer2->entry_crc != crc32(layer2, HAMMER_LAYER2_CRCSIZE))
1170 panic("CRC FAILED: LAYER2");
1171 hammer_unlock(&hmp->blkmap_lock);
19619882 1172 }
c3be93f2 1173
cdb6e4e6 1174failed:
40043e7f
MD
1175 if (buffer)
1176 hammer_rel_buffer(buffer, 0);
1177 hammer_rel_volume(root_volume, 0);
1178 if (hammer_debug_general & 0x0800) {
1179 kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n",
973c11b9 1180 (long long)zone_offset, (long long)result_offset);
40043e7f
MD
1181 }
1182 return(result_offset);
1183}
1184
bf686dbe
MD
1185
1186/*
cb51be26 1187 * Check space availability
bf686dbe 1188 */
cb51be26 1189int
a7e9bef1 1190hammer_checkspace(hammer_mount_t hmp, int slop)
bf686dbe 1191{
cb51be26
MD
1192 const int in_size = sizeof(struct hammer_inode_data) +
1193 sizeof(union hammer_btree_elm);
1194 const int rec_size = (sizeof(union hammer_btree_elm) * 2);
a7e9bef1 1195 int64_t usedbytes;
cb51be26 1196
a7e9bef1
MD
1197 usedbytes = hmp->rsv_inodes * in_size +
1198 hmp->rsv_recs * rec_size +
1199 hmp->rsv_databytes +
7b6ccb11
MD
1200 ((int64_t)hmp->rsv_fromdelay << HAMMER_LARGEBLOCK_BITS) +
1201 ((int64_t)hidirtybufspace << 2) +
1202 (slop << HAMMER_LARGEBLOCK_BITS);
a7e9bef1 1203
7b6ccb11 1204 hammer_count_extra_space_used = usedbytes; /* debugging */
a7e9bef1 1205
7b6ccb11
MD
1206 if (hmp->copy_stat_freebigblocks >=
1207 (usedbytes >> HAMMER_LARGEBLOCK_BITS)) {
cb51be26 1208 return(0);
7b6ccb11 1209 }
cb51be26 1210 return (ENOSPC);
6f97fce3
MD
1211}
1212