sbin/hammer: Minor fix for hammer(8) manpage
[dragonfly.git] / sbin / hammer / blockmap.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sbin/hammer/blockmap.c,v 1.2 2008/06/17 04:03:38 dillon Exp $
35  */
36
37 #include "hammer.h"
38
39 /*
40  * Allocate big-blocks using our poor-man's volume->vol_free_off.
41  *
42  * If the zone is HAMMER_ZONE_FREEMAP_INDEX we are bootstrapping the freemap
43  * itself and cannot update it yet.
44  */
45 hammer_off_t
46 alloc_bigblock(struct volume_info *volume, int zone)
47 {
48         struct volume_info *root_vol;
49         hammer_blockmap_t freemap;
50         struct buffer_info *buffer1 = NULL;
51         struct buffer_info *buffer2 = NULL;
52         struct hammer_blockmap_layer1 *layer1;
53         struct hammer_blockmap_layer2 *layer2;
54         hammer_off_t layer1_offset;
55         hammer_off_t layer2_offset;
56         hammer_off_t result_offset;
57
58         if (volume == NULL)
59                 volume = get_volume(RootVolNo);
60
61         result_offset = volume->vol_free_off;
62         if (result_offset >= volume->vol_free_end)
63                 errx(1, "alloc_bigblock: Ran out of room, filesystem too small");
64
65         volume->vol_free_off += HAMMER_BIGBLOCK_SIZE;
66
67         /*
68          * Update the freemap if not zone4.
69          */
70         if (zone != HAMMER_ZONE_FREEMAP_INDEX) {
71                 root_vol = get_volume(RootVolNo);
72                 freemap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
73
74                 layer1_offset = freemap->phys_offset +
75                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
76                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
77                 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
78                 --layer1->blocks_free;
79                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
80                 buffer1->cache.modified = 1;
81
82                 layer2_offset = layer1->phys_offset +
83                                 HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
84                 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
85                 assert(layer2->zone == 0);
86                 layer2->zone = zone;
87                 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
88                 layer2->bytes_free = 0;
89                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
90                 buffer2->cache.modified = 1;
91
92                 --root_vol->ondisk->vol0_stat_freebigblocks;
93                 root_vol->cache.modified = 1;
94
95                 rel_buffer(buffer1);
96                 rel_buffer(buffer2);
97                 rel_volume(root_vol);
98         }
99
100         rel_volume(volume);
101         return(result_offset);
102 }
103
104 /*
105  * Allocate a chunk of data out of a blockmap.  This is a simplified
106  * version which uses next_offset as a simple allocation iterator.
107  */
108 void *
109 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
110                struct buffer_info **bufferp)
111 {
112         struct volume_info *volume;
113         hammer_blockmap_t blockmap;
114         hammer_blockmap_t freemap;
115         struct buffer_info *buffer1 = NULL;
116         struct buffer_info *buffer2 = NULL;
117         struct hammer_blockmap_layer1 *layer1;
118         struct hammer_blockmap_layer2 *layer2;
119         hammer_off_t layer1_offset;
120         hammer_off_t layer2_offset;
121         hammer_off_t chunk_offset;
122         void *ptr;
123
124         volume = get_volume(RootVolNo);
125
126         blockmap = &volume->ondisk->vol0_blockmap[zone];
127         freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
128         assert(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
129
130         /*
131          * Alignment and buffer-boundary issues.  If the allocation would
132          * cross a buffer boundary we have to skip to the next buffer.
133          */
134         bytes = (bytes + 15) & ~15;
135         assert(bytes > 0 && bytes <= HAMMER_BUFSIZE);  /* not HAMMER_XBUFSIZE */
136         assert(hammer_is_zone2_mapped_index(zone));
137
138 again:
139         assert(blockmap->next_offset != HAMMER_ZONE_ENCODE(zone + 1, 0));
140
141         if ((blockmap->next_offset ^ (blockmap->next_offset + bytes - 1)) &
142             ~HAMMER_BUFMASK64) {
143                 volume->cache.modified = 1;
144                 blockmap->next_offset = (blockmap->next_offset + bytes - 1) &
145                                         ~HAMMER_BUFMASK64;
146         }
147         chunk_offset = blockmap->next_offset & HAMMER_BIGBLOCK_MASK;
148
149         /*
150          * Dive layer 1.
151          */
152         layer1_offset = freemap->phys_offset +
153                         HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
154         layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
155         assert(!(chunk_offset == 0 && layer1->blocks_free == 0));
156
157         if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
158                 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
159                 exit(1);
160         }
161
162         /*
163          * Dive layer 2, each entry represents a big-block.
164          */
165         layer2_offset = layer1->phys_offset +
166                         HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
167         layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
168
169         if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
170                 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
171                 exit(1);
172         }
173
174         /*
175          * If we are entering a new big-block assign ownership to our
176          * zone.  If the big-block is owned by another zone skip it.
177          */
178         if (layer2->zone == 0) {
179                 --layer1->blocks_free;
180                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
181                 layer2->zone = zone;
182                 --volume->ondisk->vol0_stat_freebigblocks;
183                 assert(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
184                 assert(layer2->append_off == 0);
185         }
186         if (layer2->zone != zone) {
187                 volume->cache.modified = 1;
188                 blockmap->next_offset = (blockmap->next_offset + HAMMER_BIGBLOCK_SIZE) &
189                                         ~HAMMER_BIGBLOCK_MASK64;
190                 goto again;
191         }
192
193         assert(layer2->append_off == chunk_offset);
194         layer2->bytes_free -= bytes;
195         *result_offp = blockmap->next_offset;
196         blockmap->next_offset += bytes;
197         layer2->append_off = (int)blockmap->next_offset & HAMMER_BIGBLOCK_MASK;
198         layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
199
200         ptr = get_buffer_data(*result_offp, bufferp, 0);
201         (*bufferp)->cache.modified = 1;
202
203         buffer1->cache.modified = 1;
204         buffer2->cache.modified = 1;
205         volume->cache.modified = 1;
206
207         rel_buffer(buffer1);
208         rel_buffer(buffer2);
209         rel_volume(volume);
210         return(ptr);
211 }
212
213 hammer_off_t
214 blockmap_lookup(hammer_off_t zone_offset,
215                 struct hammer_blockmap_layer1 *save_layer1,
216                 struct hammer_blockmap_layer2 *save_layer2,
217                 int *errorp)
218 {
219         struct volume_info *root_volume = NULL;
220         hammer_blockmap_t blockmap;
221         hammer_blockmap_t freemap;
222         struct hammer_blockmap_layer1 *layer1;
223         struct hammer_blockmap_layer2 *layer2;
224         struct buffer_info *buffer1 = NULL;
225         struct buffer_info *buffer2 = NULL;
226         hammer_off_t layer1_offset;
227         hammer_off_t layer2_offset;
228         hammer_off_t result_offset;
229         int zone;
230         int i;
231         int error = 0;
232
233         if (save_layer1)
234                 bzero(save_layer1, sizeof(*save_layer1));
235         if (save_layer2)
236                 bzero(save_layer2, sizeof(*save_layer2));
237
238         zone = HAMMER_ZONE_DECODE(zone_offset);
239
240         if (zone <= HAMMER_ZONE_RAW_VOLUME_INDEX)
241                 error = -1;
242         if (zone >= HAMMER_MAX_ZONES)
243                 error = -2;
244         if (RootVolNo < 0)
245                 error = -3;
246         if (error) {
247                 result_offset = HAMMER_OFF_BAD;
248                 goto done;
249         }
250
251         root_volume = get_volume(RootVolNo);
252         blockmap = &root_volume->ondisk->vol0_blockmap[zone];
253
254         if (zone == HAMMER_ZONE_RAW_BUFFER_INDEX) {
255                 result_offset = zone_offset;
256         } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
257                 i = (zone_offset & HAMMER_OFF_SHORT_MASK) /
258                     HAMMER_BIGBLOCK_SIZE;
259                 if (zone_offset >= blockmap->alloc_offset) {
260                         error = -4;
261                         result_offset = HAMMER_OFF_BAD;
262                         goto done;
263                 }
264                 result_offset = root_volume->ondisk->vol0_undo_array[i] +
265                                 (zone_offset & HAMMER_BIGBLOCK_MASK64);
266         } else {
267                 result_offset = hammer_xlate_to_zone2(zone_offset);
268         }
269
270         /*
271          * The blockmap should match the requested zone (else the volume
272          * header is mashed).
273          *
274          * Note that a valid offset can still be returned if AssertOnFailure
275          * is zero.
276          */
277         if (HAMMER_ZONE_FREEMAP_INDEX != zone &&
278             HAMMER_ZONE_DECODE(blockmap->alloc_offset) != zone) {
279                 error = -5;
280                 goto done;
281         }
282
283         /*
284          * Validate that the big-block is assigned to the zone.  Also
285          * assign save_layer{1,2}.
286          */
287
288         freemap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
289         /*
290          * Dive layer 1.
291          */
292         layer1_offset = freemap->phys_offset +
293                         HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
294         layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
295         if (layer1 == NULL) {
296                 error = -6;
297                 goto done;
298         }
299         if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
300                 error = -7;
301                 goto done;
302         }
303
304         if (save_layer1)
305                 *save_layer1 = *layer1;
306
307         /*
308          * Dive layer 2, each entry represents a big-block.
309          */
310         layer2_offset = layer1->phys_offset +
311                         HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
312         layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
313
314         if (layer2 == NULL) {
315                 error = -8;
316                 goto done;
317         }
318         if (layer2->zone != zone) {
319                 error = -9;
320                 goto done;
321         }
322         if (save_layer2)
323                 *save_layer2 = *layer2;
324
325 done:
326         rel_buffer(buffer1);
327         rel_buffer(buffer2);
328         rel_volume(root_volume);
329
330         if (AssertOnFailure && error != 0)
331                 errx(1, "blockmap_lookup: error=%d\n", error);
332         if (errorp)
333                 *errorp = error;
334
335         return(result_offset);
336 }
337