2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.3 2008/02/20 00:55:51 dillon Exp $
43 * Allocate bytes from a zone
46 hammer_blockmap_alloc(hammer_mount_t hmp, int zone, int bytes, int *errorp)
48 hammer_volume_t root_volume;
49 hammer_blockmap_t rootmap;
50 struct hammer_blockmap_layer1 *layer1;
51 struct hammer_blockmap_layer2 *layer2;
52 hammer_buffer_t buffer = NULL;
53 hammer_off_t tmp_offset;
54 hammer_off_t alloc_offset;
55 hammer_off_t layer1_offset;
56 hammer_off_t layer2_offset;
57 hammer_off_t bigblock_offset;
59 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
60 root_volume = hammer_get_root_volume(hmp, errorp);
63 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
64 KKASSERT(rootmap->phys_offset != 0);
65 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
66 HAMMER_ZONE_RAW_BUFFER_INDEX);
67 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
68 KKASSERT(HAMMER_ZONE_DECODE(rootmap->next_offset) == zone);
71 * Deal with alignment and buffer-boundary issues.
73 * Be careful, certain primary alignments are used below to allocate
74 * new blockmap blocks.
76 bytes = (bytes + 7) & ~7;
77 KKASSERT(bytes <= HAMMER_BUFSIZE);
78 KKASSERT(rootmap->next_offset <= rootmap->alloc_offset);
80 lockmgr(&hmp->blockmap_lock, LK_EXCLUSIVE|LK_RETRY);
81 alloc_offset = rootmap->next_offset;
82 tmp_offset = alloc_offset + bytes;
83 if ((alloc_offset ^ (tmp_offset - 1)) & ~HAMMER_BUFMASK64) {
84 alloc_offset = (tmp_offset - 1) & ~HAMMER_BUFMASK64;
88 * Dive layer 1. If we are starting a new layer 1 entry,
89 * allocate a layer 2 block for it.
91 layer1_offset = rootmap->phys_offset +
92 HAMMER_BLOCKMAP_LAYER1_OFFSET(alloc_offset);
93 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
94 KKASSERT(*errorp == 0);
97 * Allocate layer2 backing store if necessary
99 if ((alloc_offset == rootmap->alloc_offset &&
100 (alloc_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0) ||
101 layer1->phys_offset == HAMMER_BLOCKMAP_FREE
103 hammer_modify_buffer(buffer, layer1, sizeof(*layer1));
104 bzero(layer1, sizeof(*layer1));
105 layer1->phys_offset = hammer_freemap_alloc(hmp, alloc_offset,
107 KKASSERT(*errorp == 0);
109 KKASSERT(layer1->phys_offset);
112 * Dive layer 2, each entry represents a large-block. If we are at
113 * the start of a new entry, allocate a large-block.
115 layer2_offset = layer1->phys_offset +
116 HAMMER_BLOCKMAP_LAYER2_OFFSET(alloc_offset);
117 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
118 KKASSERT(*errorp == 0);
121 * Allocate the bigblock in layer2 if necesasry.
123 if ((alloc_offset == rootmap->alloc_offset &&
124 (alloc_offset & HAMMER_LARGEBLOCK_MASK64) == 0) ||
125 layer2->u.phys_offset == HAMMER_BLOCKMAP_FREE
127 hammer_modify_buffer(buffer, layer2, sizeof(*layer2));
128 /* XXX rootmap changed */
129 bzero(layer2, sizeof(*layer2));
130 layer2->u.phys_offset = hammer_freemap_alloc(hmp, alloc_offset,
132 layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
133 KKASSERT(*errorp == 0);
136 hammer_modify_buffer(buffer, layer2, sizeof(*layer2));
137 layer2->bytes_free -= bytes;
140 * Calling bnew on the buffer backing the allocation gets it into
141 * the system without a disk read.
143 * XXX This can only be done when appending into a new buffer.
145 if (alloc_offset == rootmap->alloc_offset &&
146 (alloc_offset & HAMMER_BUFMASK) == 0) {
147 bigblock_offset = layer2->u.phys_offset +
148 (alloc_offset & HAMMER_LARGEBLOCK_MASK64);
149 hammer_bnew(hmp, bigblock_offset, errorp, &buffer);
153 * Adjust our iterator
155 hammer_modify_volume(root_volume, rootmap, sizeof(*rootmap));
156 rootmap->next_offset = alloc_offset + bytes;
157 if (rootmap->alloc_offset < rootmap->next_offset)
158 rootmap->alloc_offset = rootmap->next_offset;
161 hammer_rel_buffer(buffer, 0);
162 hammer_rel_volume(root_volume, 0);
163 lockmgr(&hmp->blockmap_lock, LK_RELEASE);
164 return(alloc_offset);
168 * Free (offset,bytes) in a zone
171 hammer_blockmap_free(hammer_mount_t hmp, hammer_off_t bmap_off, int bytes)
173 hammer_volume_t root_volume;
174 hammer_blockmap_t rootmap;
175 struct hammer_blockmap_layer1 *layer1;
176 struct hammer_blockmap_layer2 *layer2;
177 hammer_buffer_t buffer = NULL;
178 hammer_off_t layer1_offset;
179 hammer_off_t layer2_offset;
183 bytes = (bytes + 7) & ~7;
184 KKASSERT(bytes <= HAMMER_BUFSIZE);
185 zone = HAMMER_ZONE_DECODE(bmap_off);
186 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
187 root_volume = hammer_get_root_volume(hmp, &error);
190 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
191 KKASSERT(rootmap->phys_offset != 0);
192 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
193 HAMMER_ZONE_RAW_BUFFER_INDEX);
194 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
195 KKASSERT(((bmap_off ^ (bmap_off + (bytes - 1))) &
196 ~HAMMER_LARGEBLOCK_MASK64) == 0);
198 if (bmap_off >= rootmap->alloc_offset) {
199 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
200 bmap_off, rootmap->alloc_offset);
207 layer1_offset = rootmap->phys_offset +
208 HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off);
209 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer);
210 KKASSERT(error == 0);
211 KKASSERT(layer1->phys_offset);
214 * Dive layer 2, each entry represents a large-block.
216 layer2_offset = layer1->phys_offset +
217 HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off);
218 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer);
220 KKASSERT(error == 0);
221 KKASSERT(layer2->u.phys_offset);
222 hammer_modify_buffer(buffer, layer2, sizeof(*layer2));
223 layer2->bytes_free += bytes;
226 * If the big-block is free, return it to the free pool. If our
227 * iterator is in the wholely free block, leave the block intact
228 * and reset the iterator.
230 if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
231 if ((rootmap->next_offset ^ bmap_off) &
232 ~HAMMER_LARGEBLOCK_MASK64) {
233 hammer_freemap_free(hmp, layer2->u.phys_offset,
235 layer2->u.phys_offset = HAMMER_BLOCKMAP_FREE;
237 hammer_modify_volume(root_volume, rootmap,
239 rootmap->next_offset &= ~HAMMER_LARGEBLOCK_MASK64;
244 hammer_rel_buffer(buffer, 0);
245 hammer_rel_volume(root_volume, 0);
249 * Lookup a blockmap offset.
252 hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t bmap_off, int *errorp)
254 hammer_volume_t root_volume;
255 hammer_blockmap_t rootmap;
256 struct hammer_blockmap_layer1 *layer1;
257 struct hammer_blockmap_layer2 *layer2;
258 hammer_buffer_t buffer = NULL;
259 hammer_off_t layer1_offset;
260 hammer_off_t layer2_offset;
261 hammer_off_t result_offset;
264 zone = HAMMER_ZONE_DECODE(bmap_off);
265 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
266 root_volume = hammer_get_root_volume(hmp, errorp);
269 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
270 KKASSERT(rootmap->phys_offset != 0);
271 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
272 HAMMER_ZONE_RAW_BUFFER_INDEX);
273 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
275 if (bmap_off >= rootmap->alloc_offset) {
276 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
277 bmap_off, rootmap->alloc_offset);
285 layer1_offset = rootmap->phys_offset +
286 HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off);
287 layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
288 KKASSERT(*errorp == 0);
289 KKASSERT(layer1->phys_offset);
292 * Dive layer 2, each entry represents a large-block.
294 layer2_offset = layer1->phys_offset +
295 HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off);
296 layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
298 KKASSERT(*errorp == 0);
299 KKASSERT(layer2->u.phys_offset);
301 result_offset = layer2->u.phys_offset +
302 (bmap_off & HAMMER_LARGEBLOCK_MASK64);
305 hammer_rel_buffer(buffer, 0);
306 hammer_rel_volume(root_volume, 0);
307 if (hammer_debug_general & 0x0800) {
308 kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n",
309 bmap_off, result_offset);
311 return(result_offset);