| 1 | /* |
| 2 | * Copyright (c) 2008 The DragonFly Project. All rights reserved. |
| 3 | * |
| 4 | * This code is derived from software contributed to The DragonFly Project |
| 5 | * by Matthew Dillon <dillon@backplane.com> |
| 6 | * |
| 7 | * Redistribution and use in source and binary forms, with or without |
| 8 | * modification, are permitted provided that the following conditions |
| 9 | * are met: |
| 10 | * |
| 11 | * 1. Redistributions of source code must retain the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer. |
| 13 | * 2. Redistributions in binary form must reproduce the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer in |
| 15 | * the documentation and/or other materials provided with the |
| 16 | * distribution. |
| 17 | * 3. Neither the name of The DragonFly Project nor the names of its |
| 18 | * contributors may be used to endorse or promote products derived |
| 19 | * from this software without specific, prior written permission. |
| 20 | * |
| 21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 22 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 24 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 25 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 26 | * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| 27 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 28 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
| 29 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 30 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 31 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 32 | * SUCH DAMAGE. |
| 33 | * |
| 34 | * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.4 2008/02/23 03:01:08 dillon Exp $ |
| 35 | */ |
| 36 | |
| 37 | /* |
| 38 | * HAMMER blockmap |
| 39 | */ |
| 40 | #include "hammer.h" |
| 41 | |
| 42 | /* |
| 43 | * Allocate bytes from a zone |
| 44 | */ |
| 45 | hammer_off_t |
| 46 | hammer_blockmap_alloc(hammer_mount_t hmp, int zone, int bytes, int *errorp) |
| 47 | { |
| 48 | hammer_volume_t root_volume; |
| 49 | hammer_blockmap_t rootmap; |
| 50 | struct hammer_blockmap_layer1 *layer1; |
| 51 | struct hammer_blockmap_layer2 *layer2; |
| 52 | hammer_buffer_t buffer1 = NULL; |
| 53 | hammer_buffer_t buffer2 = NULL; |
| 54 | hammer_buffer_t buffer3 = NULL; |
| 55 | hammer_off_t tmp_offset; |
| 56 | hammer_off_t next_offset; |
| 57 | hammer_off_t layer1_offset; |
| 58 | hammer_off_t layer2_offset; |
| 59 | hammer_off_t bigblock_offset; |
| 60 | int loops = 0; |
| 61 | |
| 62 | KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES); |
| 63 | root_volume = hammer_get_root_volume(hmp, errorp); |
| 64 | if (*errorp) |
| 65 | return(0); |
| 66 | rootmap = &root_volume->ondisk->vol0_blockmap[zone]; |
| 67 | KKASSERT(rootmap->phys_offset != 0); |
| 68 | KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) == |
| 69 | HAMMER_ZONE_RAW_BUFFER_INDEX); |
| 70 | KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone); |
| 71 | KKASSERT(HAMMER_ZONE_DECODE(rootmap->next_offset) == zone); |
| 72 | |
| 73 | /* |
| 74 | * Deal with alignment and buffer-boundary issues. |
| 75 | * |
| 76 | * Be careful, certain primary alignments are used below to allocate |
| 77 | * new blockmap blocks. |
| 78 | */ |
| 79 | bytes = (bytes + 7) & ~7; |
| 80 | KKASSERT(bytes <= HAMMER_BUFSIZE); |
| 81 | KKASSERT(rootmap->next_offset <= rootmap->alloc_offset); |
| 82 | |
| 83 | lockmgr(&hmp->blockmap_lock, LK_EXCLUSIVE|LK_RETRY); |
| 84 | next_offset = rootmap->next_offset; |
| 85 | |
| 86 | again: |
| 87 | /* |
| 88 | * The allocation request may not cross a buffer boundary |
| 89 | */ |
| 90 | tmp_offset = next_offset + bytes; |
| 91 | if ((next_offset ^ (tmp_offset - 1)) & ~HAMMER_BUFMASK64) |
| 92 | next_offset = (tmp_offset - 1) & ~HAMMER_BUFMASK64; |
| 93 | |
| 94 | /* |
| 95 | * Dive layer 1. If we are starting a new layer 1 entry, |
| 96 | * allocate a layer 2 block for it. |
| 97 | */ |
| 98 | layer1_offset = rootmap->phys_offset + |
| 99 | HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset); |
| 100 | layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1); |
| 101 | KKASSERT(*errorp == 0); |
| 102 | |
| 103 | /* |
| 104 | * Allocate layer2 backing store in layer1 if necessary |
| 105 | */ |
| 106 | if ((next_offset == rootmap->alloc_offset && |
| 107 | (next_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0) || |
| 108 | layer1->phys_offset == HAMMER_BLOCKMAP_FREE |
| 109 | ) { |
| 110 | hammer_modify_buffer(buffer1, layer1, sizeof(*layer1)); |
| 111 | bzero(layer1, sizeof(*layer1)); |
| 112 | layer1->phys_offset = hammer_freemap_alloc(hmp, next_offset, |
| 113 | errorp); |
| 114 | layer1->blocks_free = HAMMER_BLOCKMAP_RADIX2; |
| 115 | KKASSERT(*errorp == 0); |
| 116 | } |
| 117 | KKASSERT(layer1->phys_offset); |
| 118 | |
| 119 | /* |
| 120 | * If layer1 indicates no free blocks in layer2 and our alloc_offset |
| 121 | * is not in layer2, skip layer2 entirely. |
| 122 | */ |
| 123 | if (layer1->blocks_free == 0 && |
| 124 | ((next_offset ^ rootmap->alloc_offset) & ~HAMMER_BLOCKMAP_LAYER2_MASK) != 0) { |
| 125 | kprintf("blockmap skip1 %016llx\n", next_offset); |
| 126 | next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2_MASK) & |
| 127 | ~HAMMER_BLOCKMAP_LAYER2_MASK; |
| 128 | if (next_offset >= hmp->zone_limits[zone]) { |
| 129 | kprintf("blockmap wrap1\n"); |
| 130 | next_offset = HAMMER_ZONE_ENCODE(zone, 0); |
| 131 | if (++loops == 2) { /* XXX poor-man's */ |
| 132 | next_offset = 0; |
| 133 | *errorp = ENOSPC; |
| 134 | goto done; |
| 135 | } |
| 136 | } |
| 137 | goto again; |
| 138 | } |
| 139 | |
| 140 | /* |
| 141 | * Dive layer 2, each entry represents a large-block. |
| 142 | */ |
| 143 | layer2_offset = layer1->phys_offset + |
| 144 | HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset); |
| 145 | layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2); |
| 146 | KKASSERT(*errorp == 0); |
| 147 | |
| 148 | if ((next_offset & HAMMER_LARGEBLOCK_MASK64) == 0) { |
| 149 | /* |
| 150 | * We are at the beginning of a new bigblock |
| 151 | */ |
| 152 | if (next_offset == rootmap->alloc_offset || |
| 153 | layer2->u.phys_offset == HAMMER_BLOCKMAP_FREE) { |
| 154 | /* |
| 155 | * Allocate the bigblock in layer2 if diving into |
| 156 | * uninitialized space or if the block was previously |
| 157 | * freed. |
| 158 | */ |
| 159 | hammer_modify_buffer(buffer1, layer1, sizeof(*layer1)); |
| 160 | KKASSERT(layer1->blocks_free); |
| 161 | --layer1->blocks_free; |
| 162 | hammer_modify_buffer(buffer2, layer2, sizeof(*layer2)); |
| 163 | bzero(layer2, sizeof(*layer2)); |
| 164 | layer2->u.phys_offset = |
| 165 | hammer_freemap_alloc(hmp, next_offset, errorp); |
| 166 | layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE; |
| 167 | KKASSERT(*errorp == 0); |
| 168 | } else if (layer2->bytes_free != HAMMER_LARGEBLOCK_SIZE) { |
| 169 | /* |
| 170 | * We have encountered a block that is already |
| 171 | * partially allocated. We must skip this block. |
| 172 | */ |
| 173 | kprintf("blockmap skip2 %016llx\n", next_offset); |
| 174 | next_offset += HAMMER_LARGEBLOCK_SIZE; |
| 175 | if (next_offset >= hmp->zone_limits[zone]) { |
| 176 | next_offset = HAMMER_ZONE_ENCODE(zone, 0); |
| 177 | kprintf("blockmap wrap2\n"); |
| 178 | if (++loops == 2) { /* XXX poor-man's */ |
| 179 | next_offset = 0; |
| 180 | *errorp = ENOSPC; |
| 181 | goto done; |
| 182 | } |
| 183 | } |
| 184 | goto again; |
| 185 | } |
| 186 | } else { |
| 187 | /* |
| 188 | * We are appending within a bigblock. |
| 189 | */ |
| 190 | KKASSERT(layer2->u.phys_offset != HAMMER_BLOCKMAP_FREE); |
| 191 | } |
| 192 | |
| 193 | hammer_modify_buffer(buffer2, layer2, sizeof(*layer2)); |
| 194 | layer2->bytes_free -= bytes; |
| 195 | |
| 196 | /* |
| 197 | * If the buffer was completely free we do not have to read it from |
| 198 | * disk, call hammer_bnew() to instantiate it. |
| 199 | */ |
| 200 | if ((next_offset & HAMMER_BUFMASK) == 0) { |
| 201 | bigblock_offset = layer2->u.phys_offset + |
| 202 | (next_offset & HAMMER_LARGEBLOCK_MASK64); |
| 203 | hammer_bnew(hmp, bigblock_offset, errorp, &buffer3); |
| 204 | } |
| 205 | |
| 206 | /* |
| 207 | * Adjust our iterator |
| 208 | */ |
| 209 | hammer_modify_volume(root_volume, rootmap, sizeof(*rootmap)); |
| 210 | rootmap->next_offset = next_offset + bytes; |
| 211 | if (rootmap->alloc_offset < rootmap->next_offset) |
| 212 | rootmap->alloc_offset = rootmap->next_offset; |
| 213 | done: |
| 214 | if (buffer1) |
| 215 | hammer_rel_buffer(buffer1, 0); |
| 216 | if (buffer2) |
| 217 | hammer_rel_buffer(buffer2, 0); |
| 218 | if (buffer3) |
| 219 | hammer_rel_buffer(buffer3, 0); |
| 220 | hammer_rel_volume(root_volume, 0); |
| 221 | lockmgr(&hmp->blockmap_lock, LK_RELEASE); |
| 222 | return(next_offset); |
| 223 | } |
| 224 | |
| 225 | /* |
| 226 | * Free (offset,bytes) in a zone |
| 227 | */ |
| 228 | void |
| 229 | hammer_blockmap_free(hammer_mount_t hmp, hammer_off_t bmap_off, int bytes) |
| 230 | { |
| 231 | hammer_volume_t root_volume; |
| 232 | hammer_blockmap_t rootmap; |
| 233 | struct hammer_blockmap_layer1 *layer1; |
| 234 | struct hammer_blockmap_layer2 *layer2; |
| 235 | hammer_buffer_t buffer1 = NULL; |
| 236 | hammer_buffer_t buffer2 = NULL; |
| 237 | hammer_off_t layer1_offset; |
| 238 | hammer_off_t layer2_offset; |
| 239 | int error; |
| 240 | int zone; |
| 241 | |
| 242 | bytes = (bytes + 7) & ~7; |
| 243 | KKASSERT(bytes <= HAMMER_BUFSIZE); |
| 244 | zone = HAMMER_ZONE_DECODE(bmap_off); |
| 245 | KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES); |
| 246 | root_volume = hammer_get_root_volume(hmp, &error); |
| 247 | if (error) |
| 248 | return; |
| 249 | |
| 250 | lockmgr(&hmp->blockmap_lock, LK_EXCLUSIVE|LK_RETRY); |
| 251 | |
| 252 | rootmap = &root_volume->ondisk->vol0_blockmap[zone]; |
| 253 | KKASSERT(rootmap->phys_offset != 0); |
| 254 | KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) == |
| 255 | HAMMER_ZONE_RAW_BUFFER_INDEX); |
| 256 | KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone); |
| 257 | KKASSERT(((bmap_off ^ (bmap_off + (bytes - 1))) & |
| 258 | ~HAMMER_LARGEBLOCK_MASK64) == 0); |
| 259 | |
| 260 | if (bmap_off >= rootmap->alloc_offset) { |
| 261 | panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx", |
| 262 | bmap_off, rootmap->alloc_offset); |
| 263 | goto done; |
| 264 | } |
| 265 | |
| 266 | /* |
| 267 | * Dive layer 1. |
| 268 | */ |
| 269 | layer1_offset = rootmap->phys_offset + |
| 270 | HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off); |
| 271 | layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1); |
| 272 | KKASSERT(error == 0); |
| 273 | KKASSERT(layer1->phys_offset); |
| 274 | |
| 275 | /* |
| 276 | * Dive layer 2, each entry represents a large-block. |
| 277 | */ |
| 278 | layer2_offset = layer1->phys_offset + |
| 279 | HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off); |
| 280 | layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2); |
| 281 | |
| 282 | KKASSERT(error == 0); |
| 283 | KKASSERT(layer2->u.phys_offset); |
| 284 | hammer_modify_buffer(buffer2, layer2, sizeof(*layer2)); |
| 285 | layer2->bytes_free += bytes; |
| 286 | |
| 287 | /* |
| 288 | * If the big-block is free, return it to the free pool. If our |
| 289 | * iterator is in the wholely free block, leave the block intact |
| 290 | * and reset the iterator. |
| 291 | */ |
| 292 | if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) { |
| 293 | if ((rootmap->next_offset ^ bmap_off) & |
| 294 | ~HAMMER_LARGEBLOCK_MASK64) { |
| 295 | hammer_freemap_free(hmp, layer2->u.phys_offset, |
| 296 | bmap_off, &error); |
| 297 | layer2->u.phys_offset = HAMMER_BLOCKMAP_FREE; |
| 298 | |
| 299 | hammer_modify_buffer(buffer1, layer1, sizeof(*layer1)); |
| 300 | ++layer1->blocks_free; |
| 301 | if (layer1->blocks_free == HAMMER_BLOCKMAP_RADIX2) { |
| 302 | hammer_freemap_free( |
| 303 | hmp, layer1->phys_offset, |
| 304 | bmap_off & ~HAMMER_BLOCKMAP_LAYER2_MASK, |
| 305 | &error); |
| 306 | layer1->phys_offset = HAMMER_BLOCKMAP_FREE; |
| 307 | } |
| 308 | } else { |
| 309 | hammer_modify_volume(root_volume, rootmap, |
| 310 | sizeof(*rootmap)); |
| 311 | rootmap->next_offset &= ~HAMMER_LARGEBLOCK_MASK64; |
| 312 | } |
| 313 | } |
| 314 | done: |
| 315 | lockmgr(&hmp->blockmap_lock, LK_RELEASE); |
| 316 | |
| 317 | if (buffer1) |
| 318 | hammer_rel_buffer(buffer1, 0); |
| 319 | if (buffer2) |
| 320 | hammer_rel_buffer(buffer2, 0); |
| 321 | hammer_rel_volume(root_volume, 0); |
| 322 | } |
| 323 | |
| 324 | /* |
| 325 | * Lookup a blockmap offset. |
| 326 | */ |
| 327 | hammer_off_t |
| 328 | hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t bmap_off, int *errorp) |
| 329 | { |
| 330 | hammer_volume_t root_volume; |
| 331 | hammer_blockmap_t rootmap; |
| 332 | struct hammer_blockmap_layer1 *layer1; |
| 333 | struct hammer_blockmap_layer2 *layer2; |
| 334 | hammer_buffer_t buffer = NULL; |
| 335 | hammer_off_t layer1_offset; |
| 336 | hammer_off_t layer2_offset; |
| 337 | hammer_off_t result_offset; |
| 338 | int zone; |
| 339 | |
| 340 | zone = HAMMER_ZONE_DECODE(bmap_off); |
| 341 | KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES); |
| 342 | root_volume = hammer_get_root_volume(hmp, errorp); |
| 343 | if (*errorp) |
| 344 | return(0); |
| 345 | rootmap = &root_volume->ondisk->vol0_blockmap[zone]; |
| 346 | KKASSERT(rootmap->phys_offset != 0); |
| 347 | KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) == |
| 348 | HAMMER_ZONE_RAW_BUFFER_INDEX); |
| 349 | KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone); |
| 350 | |
| 351 | if (bmap_off >= rootmap->alloc_offset) { |
| 352 | panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx", |
| 353 | bmap_off, rootmap->alloc_offset); |
| 354 | result_offset = 0; |
| 355 | goto done; |
| 356 | } |
| 357 | |
| 358 | /* |
| 359 | * Dive layer 1. |
| 360 | */ |
| 361 | layer1_offset = rootmap->phys_offset + |
| 362 | HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off); |
| 363 | layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer); |
| 364 | KKASSERT(*errorp == 0); |
| 365 | KKASSERT(layer1->phys_offset); |
| 366 | |
| 367 | /* |
| 368 | * Dive layer 2, each entry represents a large-block. |
| 369 | */ |
| 370 | layer2_offset = layer1->phys_offset + |
| 371 | HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off); |
| 372 | layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer); |
| 373 | |
| 374 | KKASSERT(*errorp == 0); |
| 375 | KKASSERT(layer2->u.phys_offset); |
| 376 | |
| 377 | result_offset = layer2->u.phys_offset + |
| 378 | (bmap_off & HAMMER_LARGEBLOCK_MASK64); |
| 379 | done: |
| 380 | if (buffer) |
| 381 | hammer_rel_buffer(buffer, 0); |
| 382 | hammer_rel_volume(root_volume, 0); |
| 383 | if (hammer_debug_general & 0x0800) { |
| 384 | kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n", |
| 385 | bmap_off, result_offset); |
| 386 | } |
| 387 | return(result_offset); |
| 388 | } |
| 389 | |