2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.1 2008/02/10 09:51:01 dillon Exp $
43 * Allocate bytes from a zone
46 hammer_blockmap_alloc(hammer_mount_t hmp, int zone, int bytes, int *errorp)
48 hammer_volume_t root_volume;
49 hammer_blockmap_entry_t rootmap;
50 hammer_blockmap_entry_t blockmap;
51 hammer_buffer_t buffer = NULL;
52 hammer_off_t alloc_offset;
53 hammer_off_t result_offset;
56 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
57 root_volume = hammer_get_root_volume(hmp, errorp);
60 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
61 KKASSERT(rootmap->phys_offset != 0);
62 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
63 HAMMER_ZONE_RAW_BUFFER_INDEX);
64 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
67 * Deal with alignment and buffer-boundary issues.
69 * Be careful, certain primary alignments are used below to allocate
70 * new blockmap blocks.
72 bytes = (bytes + 7) & ~7;
73 KKASSERT(bytes <= HAMMER_BUFSIZE);
75 lockmgr(&hmp->blockmap_lock, LK_EXCLUSIVE|LK_RETRY);
76 alloc_offset = rootmap->alloc_offset;
77 result_offset = alloc_offset + bytes;
78 if ((alloc_offset ^ (result_offset - 1)) & ~HAMMER_BUFMASK64) {
79 alloc_offset = (result_offset - 1) & ~HAMMER_BUFMASK64;
83 * Dive layer 2, each entry is a layer-1 entry. If we are at the
84 * start of a new entry, allocate a layer 1 large-block
86 i = (alloc_offset >> (HAMMER_LARGEBLOCK_BITS +
87 HAMMER_BLOCKMAP_BITS)) & HAMMER_BLOCKMAP_RADIX_MASK;
89 blockmap = hammer_bread(hmp, rootmap->phys_offset + i * sizeof(*blockmap), errorp, &buffer);
90 KKASSERT(*errorp == 0);
92 if ((alloc_offset & HAMMER_LARGEBLOCK_LAYER1_MASK) == 0) {
93 hammer_modify_buffer(buffer, blockmap, sizeof(*blockmap));
94 bzero(blockmap, sizeof(*blockmap));
95 blockmap->phys_offset = hammer_freemap_alloc(hmp, errorp);
96 KKASSERT(*errorp == 0);
97 kprintf("ALLOC LAYER2 %016llx\n", blockmap->phys_offset);
100 kprintf("blkmap_alloc %016llx [%2d@%016llx]", alloc_offset, i, blockmap->phys_offset);
102 KKASSERT(blockmap->phys_offset);
105 * Dive layer 1, each entry is a large-block. If we are at the
106 * start of a new entry, allocate a large-block.
108 i = (alloc_offset >> HAMMER_LARGEBLOCK_BITS) &
109 HAMMER_BLOCKMAP_RADIX_MASK;
111 blockmap = hammer_bread(hmp, blockmap->phys_offset + i * sizeof(*blockmap), errorp, &buffer);
112 KKASSERT(*errorp == 0);
114 if ((alloc_offset & HAMMER_LARGEBLOCK_MASK64) == 0) {
115 hammer_modify_buffer(buffer, blockmap, sizeof(*blockmap));
116 /* XXX rootmap changed */
117 bzero(blockmap, sizeof(*blockmap));
118 blockmap->phys_offset = hammer_freemap_alloc(hmp, errorp);
119 blockmap->bytes_free = HAMMER_LARGEBLOCK_SIZE;
120 KKASSERT(*errorp == 0);
121 kprintf("ALLOC LAYER1 %016llx\n", blockmap->phys_offset);
124 hammer_modify_buffer(buffer, blockmap, sizeof(*blockmap));
125 blockmap->bytes_free -= bytes;
127 kprintf("[%2d@%016llx] free=%d phys %016llx\n", i, blockmap->phys_offset, blockmap->bytes_free, blockmap->phys_offset + (result_offset & HAMMER_LARGEBLOCK_MASK64));
130 hammer_modify_volume(root_volume, &rootmap->alloc_offset,
131 sizeof(rootmap->alloc_offset));
132 result_offset = alloc_offset;
133 rootmap->alloc_offset = alloc_offset + bytes;
136 * Calling bnew on the buffer backing the allocation gets it into
137 * the system without a disk read.
139 * XXX This can only be done when appending into a new buffer.
141 if (((int32_t)result_offset & HAMMER_BUFMASK) == 0) {
142 hammer_bnew(hmp, blockmap->phys_offset + (result_offset & HAMMER_LARGEBLOCK_MASK64), errorp, &buffer);
146 hammer_rel_buffer(buffer, 0);
147 hammer_rel_volume(root_volume, 0);
148 lockmgr(&hmp->blockmap_lock, LK_RELEASE);
149 return(result_offset);
153 * Free (offset,bytes) in a zone
156 hammer_blockmap_free(hammer_mount_t hmp, hammer_off_t bmap_off, int bytes)
158 kprintf("hammer_blockmap_free %016llx %d\n", bmap_off, bytes);
163 * Lookup a blockmap offset.
166 hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t bmap_off, int *errorp)
168 hammer_volume_t root_volume;
169 hammer_blockmap_entry_t rootmap;
170 hammer_blockmap_entry_t blockmap;
171 hammer_buffer_t buffer = NULL;
172 hammer_off_t result_offset;
176 zone = HAMMER_ZONE_DECODE(bmap_off);
177 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
178 root_volume = hammer_get_root_volume(hmp, errorp);
181 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
182 KKASSERT(rootmap->phys_offset != 0);
183 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
184 HAMMER_ZONE_RAW_BUFFER_INDEX);
185 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
187 if (bmap_off >= rootmap->alloc_offset) {
188 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
189 bmap_off, rootmap->alloc_offset);
195 * Dive layer 2, each entry is a layer-1 entry. If we are at the
196 * start of a new entry, allocate a layer 1 large-block
198 i = (bmap_off >> (HAMMER_LARGEBLOCK_BITS +
199 HAMMER_BLOCKMAP_BITS)) & HAMMER_BLOCKMAP_RADIX_MASK;
201 blockmap = hammer_bread(hmp, rootmap->phys_offset + i * sizeof(*blockmap), errorp, &buffer);
202 KKASSERT(*errorp == 0);
203 KKASSERT(blockmap->phys_offset);
206 * Dive layer 1, entry entry is a large-block. If we are at the
207 * start of a new entry, allocate a large-block.
209 i = (bmap_off >> HAMMER_LARGEBLOCK_BITS) & HAMMER_BLOCKMAP_RADIX_MASK;
211 blockmap = hammer_bread(hmp, blockmap->phys_offset + i * sizeof(*blockmap), errorp, &buffer);
212 KKASSERT(*errorp == 0);
213 KKASSERT(blockmap->phys_offset);
214 result_offset = blockmap->phys_offset +
215 (bmap_off & HAMMER_LARGEBLOCK_MASK64);
218 hammer_rel_buffer(buffer, 0);
219 hammer_rel_volume(root_volume, 0);
220 if (hammer_debug_general & 0x0800) {
221 kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n",
222 bmap_off, result_offset);
224 return(result_offset);