sbin/hammer: Cleanup blocks with a single statement
[dragonfly.git] / sbin / hammer / blockmap.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sbin/hammer/blockmap.c,v 1.2 2008/06/17 04:03:38 dillon Exp $
35  */
36
37 #include "hammer_util.h"
38
39 /*
40  * Allocate big-blocks using our poor-man's volume->vol_free_off.
41  * We are bootstrapping the freemap itself and cannot update it yet.
42  */
43 hammer_off_t
44 bootstrap_bigblock(struct volume_info *volume)
45 {
46         hammer_off_t result_offset;
47
48         assert_volume_offset(volume);
49         result_offset = volume->vol_free_off;
50
51         volume->vol_free_off += HAMMER_BIGBLOCK_SIZE;
52
53         return(result_offset);
54 }
55
56 /*
57  * Allocate a big-block for zone-3 for UNDO/REDO FIFO.
58  */
59 hammer_off_t
60 alloc_undo_bigblock(struct volume_info *volume)
61 {
62         hammer_blockmap_t freemap;
63         struct buffer_info *buffer1 = NULL;
64         struct buffer_info *buffer2 = NULL;
65         hammer_blockmap_layer1_t layer1;
66         hammer_blockmap_layer2_t layer2;
67         hammer_off_t layer1_offset;
68         hammer_off_t layer2_offset;
69         hammer_off_t result_offset;
70
71         /* Only root volume needs formatting */
72         assert(volume->vol_no == HAMMER_ROOT_VOLNO);
73
74         result_offset = bootstrap_bigblock(volume);
75         freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
76
77         /*
78          * Dive layer 1.
79          */
80         layer1_offset = freemap->phys_offset +
81                         HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
82         layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
83         assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
84         --layer1->blocks_free;
85         hammer_crc_set_layer1(HammerVersion, layer1);
86         buffer1->cache.modified = 1;
87
88         /*
89          * Dive layer 2, each entry represents a big-block.
90          */
91         layer2_offset = layer1->phys_offset +
92                         HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
93         layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
94         assert(layer2->zone == 0);
95         layer2->zone = HAMMER_ZONE_UNDO_INDEX;
96         layer2->append_off = HAMMER_BIGBLOCK_SIZE;
97         layer2->bytes_free = 0;
98         hammer_crc_set_layer2(HammerVersion, layer2);
99         buffer2->cache.modified = 1;
100
101         --volume->ondisk->vol0_stat_freebigblocks;
102
103         rel_buffer(buffer1);
104         rel_buffer(buffer2);
105
106         return(result_offset);
107 }
108
109 /*
110  * Allocate a chunk of data out of a blockmap.  This is a simplified
111  * version which uses next_offset as a simple allocation iterator.
112  */
113 void *
114 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
115                struct buffer_info **bufferp)
116 {
117         struct volume_info *volume;
118         hammer_blockmap_t blockmap;
119         hammer_blockmap_t freemap;
120         struct buffer_info *buffer1 = NULL;
121         struct buffer_info *buffer2 = NULL;
122         hammer_blockmap_layer1_t layer1;
123         hammer_blockmap_layer2_t layer2;
124         hammer_off_t tmp_offset;
125         hammer_off_t layer1_offset;
126         hammer_off_t layer2_offset;
127         void *ptr;
128
129         volume = get_root_volume();
130
131         blockmap = &volume->ondisk->vol0_blockmap[zone];
132         freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
133         assert(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
134
135         /*
136          * Alignment and buffer-boundary issues.  If the allocation would
137          * cross a buffer boundary we have to skip to the next buffer.
138          */
139         bytes = HAMMER_DATA_DOALIGN(bytes);
140         assert(bytes > 0 && bytes <= HAMMER_BUFSIZE);  /* not HAMMER_XBUFSIZE */
141         assert(hammer_is_index_record(zone));
142
143 again:
144         assert(blockmap->next_offset != HAMMER_ZONE_ENCODE(zone + 1, 0));
145
146         tmp_offset = blockmap->next_offset + bytes - 1;
147         if ((blockmap->next_offset ^ tmp_offset) & ~HAMMER_BUFMASK64)
148                 blockmap->next_offset = tmp_offset & ~HAMMER_BUFMASK64;
149
150         /*
151          * Dive layer 1.
152          */
153         layer1_offset = freemap->phys_offset +
154                         HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
155         layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
156         assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
157         assert(!((blockmap->next_offset & HAMMER_BIGBLOCK_MASK) == 0 &&
158                 layer1->blocks_free == 0));
159
160         /*
161          * Dive layer 2, each entry represents a big-block.
162          */
163         layer2_offset = layer1->phys_offset +
164                         HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
165         layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
166
167         if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX)
168                 errx(1, "alloc_blockmap: layer2 ran out of space!");
169
170         /*
171          * If we are entering a new big-block assign ownership to our
172          * zone.  If the big-block is owned by another zone skip it.
173          */
174         if (layer2->zone == 0) {
175                 --layer1->blocks_free;
176                 hammer_crc_set_layer1(HammerVersion, layer1);
177                 layer2->zone = zone;
178                 --volume->ondisk->vol0_stat_freebigblocks;
179                 assert(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
180                 assert(layer2->append_off == 0);
181         }
182         if (layer2->zone != zone) {
183                 blockmap->next_offset =
184                         HAMMER_ZONE_LAYER2_NEXT_OFFSET(blockmap->next_offset);
185                 goto again;
186         }
187
188         assert(layer2->append_off ==
189                 (blockmap->next_offset & HAMMER_BIGBLOCK_MASK));
190         layer2->bytes_free -= bytes;
191         *result_offp = blockmap->next_offset;
192         blockmap->next_offset += bytes;
193         layer2->append_off = (int)blockmap->next_offset & HAMMER_BIGBLOCK_MASK;
194         hammer_crc_set_layer2(HammerVersion, layer2);
195
196         ptr = get_buffer_data(*result_offp, bufferp, 0);
197         (*bufferp)->cache.modified = 1;
198
199         buffer1->cache.modified = 1;
200         buffer2->cache.modified = 1;
201
202         rel_buffer(buffer1);
203         rel_buffer(buffer2);
204         return(ptr);
205 }
206
207 hammer_off_t
208 blockmap_lookup(hammer_off_t zone_offset, int *errorp)
209 {
210         return(blockmap_lookup_save(zone_offset, NULL, NULL, errorp));
211 }
212
213 hammer_off_t
214 blockmap_lookup_save(hammer_off_t zone_offset,
215                 hammer_blockmap_layer1_t save_layer1,
216                 hammer_blockmap_layer2_t save_layer2,
217                 int *errorp)
218 {
219         struct volume_info *root_volume = NULL;
220         hammer_volume_ondisk_t ondisk;
221         hammer_blockmap_t blockmap;
222         hammer_blockmap_t freemap;
223         hammer_blockmap_layer1_t layer1;
224         hammer_blockmap_layer2_t layer2;
225         struct buffer_info *buffer1 = NULL;
226         struct buffer_info *buffer2 = NULL;
227         hammer_off_t layer1_offset;
228         hammer_off_t layer2_offset;
229         hammer_off_t result_offset = HAMMER_OFF_BAD;;
230         int zone;
231         int error = 0;
232
233         if (save_layer1)
234                 bzero(save_layer1, sizeof(*save_layer1));
235         if (save_layer2)
236                 bzero(save_layer2, sizeof(*save_layer2));
237
238         zone = HAMMER_ZONE_DECODE(zone_offset);
239
240         if (zone <= HAMMER_ZONE_RAW_VOLUME_INDEX) {
241                 error = -1;
242                 goto done;
243         }
244         if (zone >= HAMMER_MAX_ZONES) {
245                 error = -2;
246                 goto done;
247         }
248
249         root_volume = get_root_volume();
250         ondisk = root_volume->ondisk;
251         blockmap = &ondisk->vol0_blockmap[zone];
252
253         /*
254          * Handle blockmap offset translations.
255          */
256         if (hammer_is_index_record(zone)) {
257                 result_offset = hammer_xlate_to_zone2(zone_offset);
258         } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
259                 if (zone_offset >= blockmap->alloc_offset) {
260                         error = -3;
261                         goto done;
262                 }
263                 result_offset = hammer_xlate_to_undo(ondisk, zone_offset);
264         } else {
265                 /* assert(zone == HAMMER_ZONE_RAW_BUFFER_INDEX); */
266                 result_offset = zone_offset;
267         }
268
269         /*
270          * The blockmap should match the requested zone (else the volume
271          * header is mashed).
272          */
273         if (hammer_is_index_record(zone) &&
274             HAMMER_ZONE_DECODE(blockmap->alloc_offset) != zone) {
275                 error = -4;
276                 goto done;
277         }
278
279         /*
280          * Validate that the big-block is assigned to the zone.  Also
281          * assign save_layer{1,2} if not NULL.
282          */
283         freemap = &ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
284
285         /*
286          * Dive layer 1.
287          */
288         layer1_offset = freemap->phys_offset +
289                         HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
290         layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
291
292         if (layer1 == NULL) {
293                 error = -5;
294                 goto done;
295         }
296         if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
297                 error = -6;
298                 goto done;
299         }
300         if (save_layer1)
301                 *save_layer1 = *layer1;
302
303         /*
304          * Dive layer 2, each entry represents a big-block.
305          */
306         layer2_offset = layer1->phys_offset +
307                         HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
308         layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
309
310         if (layer2 == NULL) {
311                 error = -7;
312                 goto done;
313         }
314         if (layer2->zone != zone) {
315                 error = -8;
316                 goto done;
317         }
318         if (save_layer2)
319                 *save_layer2 = *layer2;
320
321 done:
322         rel_buffer(buffer1);
323         rel_buffer(buffer2);
324
325         if (errorp)
326                 *errorp = error;
327
328         return(result_offset);
329 }
330