HAMMER 30A/many: blockmap cleanup
[dragonfly.git] / sys / vfs / hammer / hammer_blockmap.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_blockmap.c,v 1.5 2008/02/23 20:55:23 dillon Exp $
35  */
36
37 /*
38  * HAMMER blockmap
39  */
40 #include "hammer.h"
41
42 /*
43  * Allocate bytes from a zone
44  */
45 hammer_off_t
46 hammer_blockmap_alloc(hammer_mount_t hmp, int zone, int bytes, int *errorp)
47 {
48         hammer_volume_t root_volume;
49         hammer_blockmap_t rootmap;
50         struct hammer_blockmap_layer1 *layer1;
51         struct hammer_blockmap_layer2 *layer2;
52         hammer_buffer_t buffer1 = NULL;
53         hammer_buffer_t buffer2 = NULL;
54         hammer_buffer_t buffer3 = NULL;
55         hammer_off_t tmp_offset;
56         hammer_off_t next_offset;
57         hammer_off_t layer1_offset;
58         hammer_off_t layer2_offset;
59         hammer_off_t bigblock_offset;
60         int loops = 0;
61
62         KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
63         root_volume = hammer_get_root_volume(hmp, errorp);
64         if (*errorp)
65                 return(0);
66         rootmap = &root_volume->ondisk->vol0_blockmap[zone];
67         KKASSERT(rootmap->phys_offset != 0);
68         KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
69                  HAMMER_ZONE_RAW_BUFFER_INDEX);
70         KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
71         KKASSERT(HAMMER_ZONE_DECODE(rootmap->next_offset) == zone);
72
73         /*
74          * Deal with alignment and buffer-boundary issues.
75          *
76          * Be careful, certain primary alignments are used below to allocate
77          * new blockmap blocks.
78          */
79         bytes = (bytes + 7) & ~7;
80         KKASSERT(bytes <= HAMMER_BUFSIZE);
81
82         lockmgr(&hmp->blockmap_lock, LK_EXCLUSIVE|LK_RETRY);
83         next_offset = rootmap->next_offset;
84
85 again:
86         /*
87          * The allocation request may not cross a buffer boundary
88          */
89         tmp_offset = next_offset + bytes;
90         if ((next_offset ^ (tmp_offset - 1)) & ~HAMMER_BUFMASK64)
91                 next_offset = (tmp_offset - 1) & ~HAMMER_BUFMASK64;
92
93         /*
94          * Dive layer 1.  If we are starting a new layer 1 entry,
95          * allocate a layer 2 block for it.
96          */
97         layer1_offset = rootmap->phys_offset +
98                         HAMMER_BLOCKMAP_LAYER1_OFFSET(next_offset);
99         layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer1);
100         KKASSERT(*errorp == 0);
101         KKASSERT(next_offset <= rootmap->alloc_offset);
102
103         /*
104          * Allocate layer2 backing store in layer1 if necessary.  next_offset
105          * can skip to a bigblock boundary but alloc_offset is at least
106          * bigblock=aligned so that's ok.
107          */
108         if (next_offset == rootmap->alloc_offset &&
109             ((next_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0 ||
110             layer1->phys_offset == HAMMER_BLOCKMAP_FREE)
111         ) {
112                 KKASSERT((next_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0);
113                 hammer_modify_buffer(buffer1, layer1, sizeof(*layer1));
114                 bzero(layer1, sizeof(*layer1));
115                 layer1->phys_offset = hammer_freemap_alloc(hmp, next_offset,
116                                                            errorp);
117                 layer1->blocks_free = HAMMER_BLOCKMAP_RADIX2;
118                 KKASSERT(*errorp == 0);
119         }
120         KKASSERT(layer1->phys_offset);
121
122         /*
123          * If layer1 indicates no free blocks in layer2 and our alloc_offset
124          * is not in layer2, skip layer2 entirely.
125          */
126         if (layer1->blocks_free == 0 &&
127             ((next_offset ^ rootmap->alloc_offset) & ~HAMMER_BLOCKMAP_LAYER2_MASK) != 0) {
128                 kprintf("blockmap skip1 %016llx\n", next_offset);
129                 next_offset = (next_offset + HAMMER_BLOCKMAP_LAYER2_MASK) &
130                               ~HAMMER_BLOCKMAP_LAYER2_MASK;
131                 if (next_offset >= hmp->zone_limits[zone]) {
132                         kprintf("blockmap wrap1\n");
133                         next_offset = HAMMER_ZONE_ENCODE(zone, 0);
134                         if (++loops == 2) {     /* XXX poor-man's */
135                                 next_offset = 0;
136                                 *errorp = ENOSPC;
137                                 goto done;
138                         }
139                 }
140                 goto again;
141         }
142
143         /*
144          * Dive layer 2, each entry represents a large-block.
145          */
146         layer2_offset = layer1->phys_offset +
147                         HAMMER_BLOCKMAP_LAYER2_OFFSET(next_offset);
148         layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer2);
149         KKASSERT(*errorp == 0);
150
151         if ((next_offset & HAMMER_LARGEBLOCK_MASK64) == 0) {
152                 /*
153                  * We are at the beginning of a new bigblock
154                  */
155                 if (next_offset == rootmap->alloc_offset ||
156                     layer2->u.phys_offset == HAMMER_BLOCKMAP_FREE) {
157                         /*
158                          * Allocate the bigblock in layer2 if diving into
159                          * uninitialized space or if the block was previously
160                          * freed.
161                          */
162                         hammer_modify_buffer(buffer1, layer1, sizeof(*layer1));
163                         KKASSERT(layer1->blocks_free);
164                         --layer1->blocks_free;
165                         hammer_modify_buffer(buffer2, layer2, sizeof(*layer2));
166                         bzero(layer2, sizeof(*layer2));
167                         layer2->u.phys_offset =
168                                 hammer_freemap_alloc(hmp, next_offset, errorp);
169                         layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
170                         KKASSERT(*errorp == 0);
171                 } else if (layer2->bytes_free != HAMMER_LARGEBLOCK_SIZE) {
172                         /*
173                          * We have encountered a block that is already
174                          * partially allocated.  We must skip this block.
175                          */
176                         kprintf("blockmap skip2 %016llx %d\n",
177                                 next_offset, layer2->bytes_free);
178                         next_offset += HAMMER_LARGEBLOCK_SIZE;
179                         if (next_offset >= hmp->zone_limits[zone]) {
180                                 next_offset = HAMMER_ZONE_ENCODE(zone, 0);
181                                 kprintf("blockmap wrap2\n");
182                                 if (++loops == 2) {     /* XXX poor-man's */
183                                         next_offset = 0;
184                                         *errorp = ENOSPC;
185                                         goto done;
186                                 }
187                         }
188                         goto again;
189                 }
190         } else {
191                 /*
192                  * We are appending within a bigblock.
193                  */
194                 KKASSERT(layer2->u.phys_offset != HAMMER_BLOCKMAP_FREE);
195         }
196
197         hammer_modify_buffer(buffer2, layer2, sizeof(*layer2));
198         layer2->bytes_free -= bytes;
199         KKASSERT(layer2->bytes_free >= 0);
200
201         /*
202          * If the buffer was completely free we do not have to read it from
203          * disk, call hammer_bnew() to instantiate it.
204          */
205         if ((next_offset & HAMMER_BUFMASK) == 0) {
206                 bigblock_offset = layer2->u.phys_offset +
207                                   (next_offset & HAMMER_LARGEBLOCK_MASK64);
208                 hammer_bnew(hmp, bigblock_offset, errorp, &buffer3);
209         }
210
211         /*
212          * Adjust our iterator and alloc_offset.  The layer1 and layer2
213          * space beyond alloc_offset is uninitialized.  alloc_offset must
214          * be big-block aligned.
215          */
216         hammer_modify_volume(root_volume, rootmap, sizeof(*rootmap));
217         rootmap->next_offset = next_offset + bytes;
218         if (rootmap->alloc_offset < rootmap->next_offset) {
219                 rootmap->alloc_offset =
220                     (rootmap->next_offset + HAMMER_LARGEBLOCK_MASK) &
221                     ~HAMMER_LARGEBLOCK_MASK64;
222         }
223 done:
224         if (buffer1)
225                 hammer_rel_buffer(buffer1, 0);
226         if (buffer2)
227                 hammer_rel_buffer(buffer2, 0);
228         if (buffer3)
229                 hammer_rel_buffer(buffer3, 0);
230         hammer_rel_volume(root_volume, 0);
231         lockmgr(&hmp->blockmap_lock, LK_RELEASE);
232         return(next_offset);
233 }
234
235 /*
236  * Free (offset,bytes) in a zone
237  */
238 void
239 hammer_blockmap_free(hammer_mount_t hmp, hammer_off_t bmap_off, int bytes)
240 {
241         hammer_volume_t root_volume;
242         hammer_blockmap_t rootmap;
243         struct hammer_blockmap_layer1 *layer1;
244         struct hammer_blockmap_layer2 *layer2;
245         hammer_buffer_t buffer1 = NULL;
246         hammer_buffer_t buffer2 = NULL;
247         hammer_off_t layer1_offset;
248         hammer_off_t layer2_offset;
249         int error;
250         int zone;
251
252         bytes = (bytes + 7) & ~7;
253         KKASSERT(bytes <= HAMMER_BUFSIZE);
254         zone = HAMMER_ZONE_DECODE(bmap_off);
255         KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
256         root_volume = hammer_get_root_volume(hmp, &error);
257         if (error)
258                 return;
259
260         lockmgr(&hmp->blockmap_lock, LK_EXCLUSIVE|LK_RETRY);
261
262         rootmap = &root_volume->ondisk->vol0_blockmap[zone];
263         KKASSERT(rootmap->phys_offset != 0);
264         KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
265                  HAMMER_ZONE_RAW_BUFFER_INDEX);
266         KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
267         KKASSERT(((bmap_off ^ (bmap_off + (bytes - 1))) & 
268                   ~HAMMER_LARGEBLOCK_MASK64) == 0);
269
270         if (bmap_off >= rootmap->alloc_offset) {
271                 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
272                       bmap_off, rootmap->alloc_offset);
273                 goto done;
274         }
275
276         /*
277          * Dive layer 1.
278          */
279         layer1_offset = rootmap->phys_offset +
280                         HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off);
281         layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
282         KKASSERT(error == 0);
283         KKASSERT(layer1->phys_offset);
284
285         /*
286          * Dive layer 2, each entry represents a large-block.
287          */
288         layer2_offset = layer1->phys_offset +
289                         HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off);
290         layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
291
292         KKASSERT(error == 0);
293         KKASSERT(layer2->u.phys_offset);
294         hammer_modify_buffer(buffer2, layer2, sizeof(*layer2));
295         layer2->bytes_free += bytes;
296         KKASSERT(layer2->bytes_free <= HAMMER_LARGEBLOCK_SIZE);
297
298         /*
299          * If the big-block is free, return it to the free pool.  If our
300          * iterator is in the wholely free block, leave the block intact
301          * and reset the iterator.
302          */
303         if (layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
304                 if ((rootmap->next_offset ^ bmap_off) &
305                     ~HAMMER_LARGEBLOCK_MASK64) {
306                         hammer_freemap_free(hmp, layer2->u.phys_offset,
307                                             bmap_off, &error);
308                         layer2->u.phys_offset = HAMMER_BLOCKMAP_FREE;
309
310                         hammer_modify_buffer(buffer1, layer1, sizeof(*layer1));
311                         ++layer1->blocks_free;
312 #if 0
313                         /* 
314                          * XXX Not working yet - we aren't clearing it when
315                          * reallocating the block later on.
316                          */
317                         if (layer1->blocks_free == HAMMER_BLOCKMAP_RADIX2) {
318                                 hammer_freemap_free(
319                                         hmp, layer1->phys_offset,
320                                         bmap_off & ~HAMMER_BLOCKMAP_LAYER2_MASK,
321                                         &error);
322                                 layer1->phys_offset = HAMMER_BLOCKMAP_FREE;
323                         }
324 #endif
325                 } else {
326                         hammer_modify_volume(root_volume, rootmap,
327                                              sizeof(*rootmap));
328                         rootmap->next_offset &= ~HAMMER_LARGEBLOCK_MASK64;
329                 }
330         }
331 done:
332         lockmgr(&hmp->blockmap_lock, LK_RELEASE);
333
334         if (buffer1)
335                 hammer_rel_buffer(buffer1, 0);
336         if (buffer2)
337                 hammer_rel_buffer(buffer2, 0);
338         hammer_rel_volume(root_volume, 0);
339 }
340
341 /*
342  * Lookup a blockmap offset.
343  */
344 hammer_off_t
345 hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t bmap_off, int *errorp)
346 {
347         hammer_volume_t root_volume;
348         hammer_blockmap_t rootmap;
349         struct hammer_blockmap_layer1 *layer1;
350         struct hammer_blockmap_layer2 *layer2;
351         hammer_buffer_t buffer = NULL;
352         hammer_off_t layer1_offset;
353         hammer_off_t layer2_offset;
354         hammer_off_t result_offset;
355         int zone;
356
357         zone = HAMMER_ZONE_DECODE(bmap_off);
358         KKASSERT(zone >= HAMMER_ZONE_BTREE_INDEX && zone < HAMMER_MAX_ZONES);
359         root_volume = hammer_get_root_volume(hmp, errorp);
360         if (*errorp)
361                 return(0);
362         rootmap = &root_volume->ondisk->vol0_blockmap[zone];
363         KKASSERT(rootmap->phys_offset != 0);
364         KKASSERT(HAMMER_ZONE_DECODE(rootmap->phys_offset) ==
365                  HAMMER_ZONE_RAW_BUFFER_INDEX);
366         KKASSERT(HAMMER_ZONE_DECODE(rootmap->alloc_offset) == zone);
367
368         if (bmap_off >= rootmap->alloc_offset) {
369                 panic("hammer_blockmap_lookup: %016llx beyond EOF %016llx",
370                       bmap_off, rootmap->alloc_offset);
371                 result_offset = 0;
372                 goto done;
373         }
374
375         /*
376          * Dive layer 1.
377          */
378         layer1_offset = rootmap->phys_offset +
379                         HAMMER_BLOCKMAP_LAYER1_OFFSET(bmap_off);
380         layer1 = hammer_bread(hmp, layer1_offset, errorp, &buffer);
381         KKASSERT(*errorp == 0);
382         KKASSERT(layer1->phys_offset);
383
384         /*
385          * Dive layer 2, each entry represents a large-block.
386          */
387         layer2_offset = layer1->phys_offset +
388                         HAMMER_BLOCKMAP_LAYER2_OFFSET(bmap_off);
389         layer2 = hammer_bread(hmp, layer2_offset, errorp, &buffer);
390
391         KKASSERT(*errorp == 0);
392         KKASSERT(layer2->u.phys_offset);
393
394         result_offset = layer2->u.phys_offset +
395                         (bmap_off & HAMMER_LARGEBLOCK_MASK64);
396 done:
397         if (buffer)
398                 hammer_rel_buffer(buffer, 0);
399         hammer_rel_volume(root_volume, 0);
400         if (hammer_debug_general & 0x0800) {
401                 kprintf("hammer_blockmap_lookup: %016llx -> %016llx\n",
402                         bmap_off, result_offset);
403         }
404         return(result_offset);
405 }
406