2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.2 2008/02/10 18:58:22 dillon Exp $
43 * Allocate bytes from a zone
46 hammer_blockmap_alloc(hammer_mount_t hmp, int zone, int bytes, int *errorp)
48 hammer_volume_t root_volume;
49 hammer_blockmap_entry_t rootmap;
50 hammer_blockmap_entry_t blockmap;
51 hammer_buffer_t buffer = NULL;
52 hammer_off_t alloc_offset;
53 hammer_off_t result_offset;
56 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
57 root_volume = hammer_get_root_volume(hmp, errorp);
60 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
61 KKASSERT(rootmap->phys_offset != 0);
62 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
63 HAMMER_ZONE_RAW_BUFFER_INDEX);
64 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
67 * Deal with alignment and buffer-boundary issues.
69 * Be careful, certain primary alignments are used below to allocate
70 * new blockmap blocks.
72 bytes = (bytes + 7) & ~7;
73 KKASSERT(bytes <= HAMMER_BUFSIZE);
75 lockmgr(&hmp->blockmap_lock, LK_EXCLUSIVE|LK_RETRY);
76 alloc_offset = rootmap->alloc_offset;
77 result_offset = alloc_offset + bytes;
78 if ((alloc_offset ^ (result_offset - 1)) & ~HAMMER_BUFMASK64) {
79 alloc_offset = (result_offset - 1) & ~HAMMER_BUFMASK64;
83 * Dive layer 2, each entry is a layer-1 entry. If we are at the
84 * start of a new entry, allocate a layer 1 large-block
86 i = (alloc_offset >> (HAMMER_LARGEBLOCK_BITS +
87 HAMMER_BLOCKMAP_BITS)) & HAMMER_BLOCKMAP_RADIX_MASK;
89 blockmap = hammer_bread(hmp, rootmap->phys_offset + i * sizeof(*blockmap), errorp, &buffer);
90 KKASSERT(*errorp == 0);
92 if ((alloc_offset & HAMMER_LARGEBLOCK_LAYER1_MASK) == 0) {
93 hammer_modify_buffer(buffer, blockmap, sizeof(*blockmap));
94 bzero(blockmap, sizeof(*blockmap));
95 blockmap->phys_offset = hammer_freemap_alloc(hmp, errorp);
96 KKASSERT(*errorp == 0);
98 KKASSERT(blockmap->phys_offset);
101 * Dive layer 1, each entry is a large-block. If we are at the
102 * start of a new entry, allocate a large-block.
104 i = (alloc_offset >> HAMMER_LARGEBLOCK_BITS) &
105 HAMMER_BLOCKMAP_RADIX_MASK;
107 blockmap = hammer_bread(hmp, blockmap->phys_offset + i * sizeof(*blockmap), errorp, &buffer);
108 KKASSERT(*errorp == 0);
110 if ((alloc_offset & HAMMER_LARGEBLOCK_MASK64) == 0) {
111 hammer_modify_buffer(buffer, blockmap, sizeof(*blockmap));
112 /* XXX rootmap changed */
113 bzero(blockmap, sizeof(*blockmap));
114 blockmap->phys_offset = hammer_freemap_alloc(hmp, errorp);
115 blockmap->bytes_free = HAMMER_LARGEBLOCK_SIZE;
116 KKASSERT(*errorp == 0);
119 hammer_modify_buffer(buffer, blockmap, sizeof(*blockmap));
120 blockmap->bytes_free -= bytes;
122 hammer_modify_volume(root_volume, &rootmap->alloc_offset,
123 sizeof(rootmap->alloc_offset));
124 result_offset = alloc_offset;
125 rootmap->alloc_offset = alloc_offset + bytes;
128 * Calling bnew on the buffer backing the allocation gets it into
129 * the system without a disk read.
131 * XXX This can only be done when appending into a new buffer.
133 if (((int32_t)result_offset & HAMMER_BUFMASK) == 0) {
134 hammer_bnew(hmp, blockmap->phys_offset + (result_offset & HAMMER_LARGEBLOCK_MASK64), errorp, &buffer);
138 hammer_rel_buffer(buffer, 0);
139 hammer_rel_volume(root_volume, 0);
140 lockmgr(&hmp->blockmap_lock, LK_RELEASE);
141 return(result_offset);
145 * Free (offset,bytes) in a zone
148 hammer_blockmap_free(hammer_mount_t hmp, hammer_off_t bmap_off, int bytes)
150 kprintf("hammer_blockmap_free %016llx %d\n", bmap_off, bytes);
155 * Lookup a blockmap offset.
158 hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t bmap_off, int *errorp)
160 hammer_volume_t root_volume;
161 hammer_blockmap_entry_t rootmap;
162 hammer_blockmap_entry_t blockmap;
163 hammer_buffer_t buffer = NULL;
164 hammer_off_t result_offset;
168 zone = HAMMER_ZONE_DECODE(bmap_off);
169 KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
170 root_volume = hammer_get_root_volume(hmp, errorp);
173 rootmap = &root_volume->ondisk->vol0_blockmap[zone];
174 KKASSERT(rootmap->phys_offset != 0);
175 KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
176 HAMMER_ZONE_RAW_BUFFER_INDEX);
177 KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
179 if (bmap_off >= rootmap->alloc_offset) {
180 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
181 bmap_off, rootmap->alloc_offset);
187 * Dive layer 2, each entry is a layer-1 entry. If we are at the
188 * start of a new entry, allocate a layer 1 large-block
190 i = (bmap_off >> (HAMMER_LARGEBLOCK_BITS +
191 HAMMER_BLOCKMAP_BITS)) & HAMMER_BLOCKMAP_RADIX_MASK;
193 blockmap = hammer_bread(hmp, rootmap->phys_offset + i * sizeof(*blockmap), errorp, &buffer);
194 KKASSERT(*errorp == 0);
195 KKASSERT(blockmap->phys_offset);
198 * Dive layer 1, entry entry is a large-block. If we are at the
199 * start of a new entry, allocate a large-block.
201 i = (bmap_off >> HAMMER_LARGEBLOCK_BITS) & HAMMER_BLOCKMAP_RADIX_MASK;
203 blockmap = hammer_bread(hmp, blockmap->phys_offset + i * sizeof(*blockmap), errorp, &buffer);
204 KKASSERT(*errorp == 0);
205 KKASSERT(blockmap->phys_offset);
206 result_offset = blockmap->phys_offset +
207 (bmap_off & HAMMER_LARGEBLOCK_MASK64);
210 hammer_rel_buffer(buffer, 0);
211 hammer_rel_volume(root_volume, 0);
212 if (hammer_debug_general & 0x0800) {
213 kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n",
214 bmap_off, result_offset);
216 return(result_offset);