HAMMER Utilities: Correct vol0_stat_freebigblocks.
[games.git] / sbin / hammer / ondisk.c
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sbin/hammer/ondisk.c,v 1.21 2008/06/02 16:57:53 dillon Exp $
35  */
36
37 #include <sys/types.h>
38 #include <assert.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <stdarg.h>
42 #include <string.h>
43 #include <unistd.h>
44 #include <stddef.h>
45 #include <err.h>
46 #include <fcntl.h>
47 #include "hammer_util.h"
48
49 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
50                         struct buffer_info **bufferp);
51 static hammer_off_t alloc_bigblock(struct volume_info *volume,
52                         hammer_off_t owner);
53 #if 0
54 static void init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type);
55 static hammer_off_t hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes,
56                         struct buffer_info **bufp, u_int16_t hdr_type);
57 static void readhammerbuf(struct volume_info *vol, void *data,
58                         int64_t offset);
59 #endif
60 static void writehammerbuf(struct volume_info *vol, const void *data,
61                         int64_t offset);
62
63 int DebugOpt;
64
65 uuid_t Hammer_FSType;
66 uuid_t Hammer_FSId;
67 int64_t BootAreaSize;
68 int64_t MemAreaSize;
69 int64_t UndoBufferSize;
70 int     UsingSuperClusters;
71 int     NumVolumes;
72 int     RootVolNo = -1;
73 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
74
75 static __inline
76 int
77 buffer_hash(hammer_off_t buf_offset)
78 {
79         int hi;
80
81         hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
82         return(hi);
83 }
84
85 /*
86  * Lookup the requested information structure and related on-disk buffer.
87  * Missing structures are created.
88  */
89 struct volume_info *
90 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
91 {
92         struct volume_info *vol;
93         struct volume_info *scan;
94         struct hammer_volume_ondisk *ondisk;
95         int i, n;
96
97         /*
98          * Allocate the volume structure
99          */
100         vol = malloc(sizeof(*vol));
101         bzero(vol, sizeof(*vol));
102         for (i = 0; i < HAMMER_BUFLISTS; ++i)
103                 TAILQ_INIT(&vol->buffer_lists[i]);
104         vol->name = strdup(filename);
105         vol->fd = open(filename, oflags);
106         if (vol->fd < 0) {
107                 free(vol->name);
108                 free(vol);
109                 err(1, "setup_volume: %s: Open failed", filename);
110         }
111
112         /*
113          * Read or initialize the volume header
114          */
115         vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
116         if (isnew) {
117                 bzero(ondisk, HAMMER_BUFSIZE);
118         } else {
119                 n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
120                 if (n != HAMMER_BUFSIZE) {
121                         err(1, "setup_volume: %s: Read failed at offset 0",
122                             filename);
123                 }
124                 vol_no = ondisk->vol_no;
125                 if (RootVolNo < 0) {
126                         RootVolNo = ondisk->vol_rootvol;
127                 } else if (RootVolNo != (int)ondisk->vol_rootvol) {
128                         errx(1, "setup_volume: %s: root volume disagreement: "
129                                 "%d vs %d",
130                                 vol->name, RootVolNo, ondisk->vol_rootvol);
131                 }
132
133                 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
134                         errx(1, "setup_volume: %s: Header does not indicate "
135                                 "that this is a hammer volume", vol->name);
136                 }
137                 if (TAILQ_EMPTY(&VolList)) {
138                         Hammer_FSId = vol->ondisk->vol_fsid;
139                 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
140                         errx(1, "setup_volume: %s: FSId does match other "
141                                 "volumes!", vol->name);
142                 }
143         }
144         vol->vol_no = vol_no;
145
146         if (isnew) {
147                 /*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/
148                 vol->cache.modified = 1;
149         }
150
151         /*
152          * Link the volume structure in
153          */
154         TAILQ_FOREACH(scan, &VolList, entry) {
155                 if (scan->vol_no == vol_no) {
156                         errx(1, "setup_volume %s: Duplicate volume number %d "
157                                 "against %s", filename, vol_no, scan->name);
158                 }
159         }
160         TAILQ_INSERT_TAIL(&VolList, vol, entry);
161         return(vol);
162 }
163
164 struct volume_info *
165 get_volume(int32_t vol_no)
166 {
167         struct volume_info *vol;
168
169         TAILQ_FOREACH(vol, &VolList, entry) {
170                 if (vol->vol_no == vol_no)
171                         break;
172         }
173         if (vol == NULL)
174                 errx(1, "get_volume: Volume %d does not exist!", vol_no);
175         ++vol->cache.refs;
176         /* not added to or removed from hammer cache */
177         return(vol);
178 }
179
180 void
181 rel_volume(struct volume_info *volume)
182 {
183         /* not added to or removed from hammer cache */
184         --volume->cache.refs;
185 }
186
187 /*
188  * Acquire the specified buffer.
189  */
190 struct buffer_info *
191 get_buffer(hammer_off_t buf_offset, int isnew)
192 {
193         void *ondisk;
194         struct buffer_info *buf;
195         struct volume_info *volume;
196         hammer_off_t orig_offset = buf_offset;
197         int vol_no;
198         int zone;
199         int hi, n;
200
201         zone = HAMMER_ZONE_DECODE(buf_offset);
202         if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
203                 buf_offset = blockmap_lookup(buf_offset, NULL, NULL);
204         }
205         assert((buf_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER);
206         vol_no = HAMMER_VOL_DECODE(buf_offset);
207         volume = get_volume(vol_no);
208         buf_offset &= ~HAMMER_BUFMASK64;
209
210         hi = buffer_hash(buf_offset);
211
212         TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry) {
213                 if (buf->buf_offset == buf_offset)
214                         break;
215         }
216         if (buf == NULL) {
217                 buf = malloc(sizeof(*buf));
218                 bzero(buf, sizeof(*buf));
219                 if (DebugOpt) {
220                         fprintf(stderr, "get_buffer %016llx %016llx\n",
221                                 orig_offset, buf_offset);
222                 }
223                 buf->buf_offset = buf_offset;
224                 buf->buf_disk_offset = volume->ondisk->vol_buf_beg +
225                                         (buf_offset & HAMMER_OFF_SHORT_MASK);
226                 buf->volume = volume;
227                 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
228                 ++volume->cache.refs;
229                 buf->cache.u.buffer = buf;
230                 hammer_cache_add(&buf->cache, ISBUFFER);
231         }
232         ++buf->cache.refs;
233         hammer_cache_flush();
234         if ((ondisk = buf->ondisk) == NULL) {
235                 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
236                 if (isnew == 0) {
237                         n = pread(volume->fd, ondisk, HAMMER_BUFSIZE,
238                                   buf->buf_disk_offset);
239                         if (n != HAMMER_BUFSIZE) {
240                                 err(1, "get_buffer: %s:%016llx Read failed at "
241                                        "offset %lld",
242                                     volume->name, buf->buf_offset,
243                                     buf->buf_disk_offset);
244                         }
245                 }
246         }
247         if (isnew) {
248                 bzero(ondisk, HAMMER_BUFSIZE);
249                 buf->cache.modified = 1;
250         }
251         return(buf);
252 }
253
254 void
255 rel_buffer(struct buffer_info *buffer)
256 {
257         struct volume_info *volume;
258         int hi;
259
260         assert(buffer->cache.refs > 0);
261         if (--buffer->cache.refs == 0) {
262                 if (buffer->cache.delete) {
263                         hi = buffer_hash(buffer->buf_offset);
264                         volume = buffer->volume;
265                         if (buffer->cache.modified)
266                                 flush_buffer(buffer);
267                         TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
268                         hammer_cache_del(&buffer->cache);
269                         free(buffer->ondisk);
270                         free(buffer);
271                         rel_volume(volume);
272                 }
273         }
274 }
275
276 void *
277 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
278                 int isnew)
279 {
280         struct buffer_info *buffer;
281
282         if ((buffer = *bufferp) != NULL) {
283                 if (isnew || 
284                     ((buffer->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
285                         rel_buffer(buffer);
286                         buffer = *bufferp = NULL;
287                 }
288         }
289         if (buffer == NULL)
290                 buffer = *bufferp = get_buffer(buf_offset, isnew);
291         return((char *)buffer->ondisk + ((int32_t)buf_offset & HAMMER_BUFMASK));
292 }
293
294 /*
295  * Retrieve a pointer to a B-Tree node given a cluster offset.  The underlying
296  * bufp is freed if non-NULL and a referenced buffer is loaded into it.
297  */
298 hammer_node_ondisk_t
299 get_node(hammer_off_t node_offset, struct buffer_info **bufp)
300 {
301         struct buffer_info *buf;
302
303         if (*bufp)
304                 rel_buffer(*bufp);
305         *bufp = buf = get_buffer(node_offset, 0);
306         return((void *)((char *)buf->ondisk +
307                         (int32_t)(node_offset & HAMMER_BUFMASK)));
308 }
309
310 /*
311  * Allocate HAMMER elements - btree nodes, data storage, and record elements
312  *
313  * NOTE: hammer_alloc_fifo() initializes the fifo header for the returned
314  * item and zero's out the remainder, so don't bzero() it.
315  */
316 void *
317 alloc_btree_element(hammer_off_t *offp)
318 {
319         struct buffer_info *buffer = NULL;
320         hammer_node_ondisk_t node;
321
322         node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
323                               offp, &buffer);
324         bzero(node, sizeof(*node));
325         /* XXX buffer not released, pointer remains valid */
326         return(node);
327 }
328
329 void *
330 alloc_data_element(hammer_off_t *offp, int32_t data_len,
331                    struct buffer_info **data_bufferp)
332 {
333         void *data;
334
335         if (data_len >= HAMMER_BUFSIZE) {
336                 assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
337                 data = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
338                                       offp, data_bufferp);
339                 bzero(data, data_len);
340         } else if (data_len) {
341                 data = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
342                                       offp, data_bufferp);
343                 bzero(data, data_len);
344         } else {
345                 data = NULL;
346         }
347         return (data);
348 }
349
350 /*
351  * Format a new freemap.  Set all layer1 entries to UNAVAIL.  The initialize
352  * code will load each volume's freemap.
353  */
354 void
355 format_freemap(struct volume_info *root_vol, hammer_blockmap_t blockmap)
356 {
357         struct buffer_info *buffer = NULL;
358         hammer_off_t layer1_offset;
359         struct hammer_blockmap_layer1 *layer1;
360         int i, isnew;
361
362         layer1_offset = alloc_bigblock(root_vol, 0);
363         for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
364                 isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
365                 layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
366                                          &buffer, isnew);
367                 bzero(layer1, sizeof(*layer1));
368                 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
369                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
370         }
371         rel_buffer(buffer);
372
373         blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
374         blockmap->phys_offset = layer1_offset;
375         blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
376         blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
377         blockmap->reserved01 = 0;
378         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
379         root_vol->cache.modified = 1;
380 }
381
382 /*
383  * Load the volume's remaining free space into the freemap.  If this is
384  * the root volume, initialize the freemap owner for the layer1 bigblock.
385  *
386  * Returns the number of bigblocks available.
387  */
388 int64_t
389 initialize_freemap(struct volume_info *vol)
390 {
391         struct volume_info *root_vol;
392         struct buffer_info *buffer1 = NULL;
393         struct buffer_info *buffer2 = NULL;
394         struct hammer_blockmap_layer1 *layer1;
395         struct hammer_blockmap_layer2 *layer2;
396         hammer_off_t layer1_base;
397         hammer_off_t layer1_offset;
398         hammer_off_t layer2_offset;
399         hammer_off_t phys_offset;
400         hammer_off_t aligned_vol_free_end;
401         int64_t count = 0;
402         int modified1 = 0;
403
404         root_vol = get_volume(RootVolNo);
405         aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
406                                 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
407
408         printf("initialize freemap volume %d\n", vol->vol_no);
409
410         /*
411          * Initialize the freemap.  First preallocate the bigblocks required
412          * to implement layer2.   This preallocation is a bootstrap allocation
413          * using blocks from the target volume.
414          */
415         layer1_base = root_vol->ondisk->vol0_blockmap[
416                                         HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
417         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
418              phys_offset < aligned_vol_free_end;
419              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
420                 layer1_offset = layer1_base +
421                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
422                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
423                 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
424                         layer1->phys_offset = alloc_bigblock(vol, 0);
425                         layer1->blocks_free = 0;
426                         buffer1->cache.modified = 1;
427                         layer1->layer1_crc = crc32(layer1,
428                                                    HAMMER_LAYER1_CRCSIZE);
429                 }
430         }
431
432         /*
433          * Now fill everything in.
434          */
435         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
436              phys_offset < aligned_vol_free_end;
437              phys_offset += HAMMER_LARGEBLOCK_SIZE) {
438                 modified1 = 0;
439                 layer1_offset = layer1_base +
440                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
441                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
442
443                 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
444                 layer2_offset = layer1->phys_offset +
445                                 HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
446
447                 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
448                 if (phys_offset < vol->vol_free_off) {
449                         /*
450                          * Fixups XXX - bigblocks already allocated as part
451                          * of the freemap bootstrap.
452                          */
453                         layer2->u.owner = HAMMER_ENCODE_FREEMAP(0, 0); /* XXX */
454                 } else if (phys_offset < vol->vol_free_end) {
455                         ++layer1->blocks_free;
456                         buffer1->cache.modified = 1;
457                         layer2->u.owner = HAMMER_BLOCKMAP_FREE;
458                         ++count;
459                         modified1 = 1;
460                 } else {
461                         layer2->u.owner = HAMMER_BLOCKMAP_UNAVAIL;
462                 }
463                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
464                 buffer2->cache.modified = 1;
465
466                 /*
467                  * Finish-up layer 1
468                  */
469                 if (modified1) {
470                         layer1->layer1_crc = crc32(layer1,
471                                                    HAMMER_LAYER1_CRCSIZE);
472                         buffer1->cache.modified = 1;
473                 }
474         }
475         rel_buffer(buffer1);
476         rel_buffer(buffer2);
477         rel_volume(root_vol);
478         return(count);
479 }
480
481 /*
482  * Allocate big-blocks using our poor-man's volume->vol_free_off and
483  * update the freemap if owner != 0.
484  *
485  * An owner of 0 is used for bootstrapping the freemap.
486  */
487 hammer_off_t
488 alloc_bigblock(struct volume_info *volume, hammer_off_t owner)
489 {
490         struct buffer_info *buffer = NULL;
491         struct volume_info *root_vol;
492         hammer_off_t result_offset;
493         hammer_off_t layer_offset;
494         struct hammer_blockmap_layer1 *layer1;
495         struct hammer_blockmap_layer2 *layer2;
496         int didget;
497
498         if (volume == NULL) {
499                 volume = get_volume(RootVolNo);
500                 didget = 1;
501         } else {
502                 didget = 0;
503         }
504         result_offset = volume->vol_free_off;
505         if (result_offset >= volume->vol_free_end)
506                 panic("alloc_bigblock: Ran out of room, filesystem too small");
507         volume->vol_free_off += HAMMER_LARGEBLOCK_SIZE;
508
509         /*
510          * Update the freemap
511          */
512         if (owner) {
513                 root_vol = get_volume(RootVolNo);
514                 layer_offset = root_vol->ondisk->vol0_blockmap[
515                                         HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
516                 layer_offset += HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
517                 layer1 = get_buffer_data(layer_offset, &buffer, 0);
518                 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
519                 --layer1->blocks_free;
520                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
521                 buffer->cache.modified = 1;
522                 layer_offset = layer1->phys_offset +
523                                HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
524                 layer2 = get_buffer_data(layer_offset, &buffer, 0);
525                 assert(layer2->u.owner == HAMMER_BLOCKMAP_FREE);
526                 layer2->u.owner = owner;
527                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
528                 buffer->cache.modified = 1;
529
530                 --root_vol->ondisk->vol0_stat_freebigblocks;
531                 root_vol->cache.modified = 1;
532
533                 rel_buffer(buffer);
534                 rel_volume(root_vol);
535         }
536
537         if (didget)
538                 rel_volume(volume);
539         return(result_offset);
540 }
541
542 /*
543  * Format the undo-map for the root volume.
544  */
545 void
546 format_undomap(hammer_volume_ondisk_t ondisk)
547 {
548         const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
549         hammer_off_t undo_limit;
550         hammer_blockmap_t blockmap;
551         hammer_off_t scan;
552         struct hammer_blockmap_layer2 *layer2;
553         int n;
554         int limit_index;
555
556         /*
557          * Size the undo buffer in multiples of HAMMER_LARGEBLOCK_SIZE,
558          * up to HAMMER_UNDO_LAYER2 large blocks.  Size to approximately
559          * 0.1% of the disk.
560          */
561         undo_limit = UndoBufferSize;
562         if (undo_limit == 0)
563                 undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
564         undo_limit = (undo_limit + HAMMER_LARGEBLOCK_MASK64) &
565                      ~HAMMER_LARGEBLOCK_MASK64;
566         if (undo_limit < HAMMER_LARGEBLOCK_SIZE)
567                 undo_limit = HAMMER_LARGEBLOCK_SIZE;
568         if (undo_limit > HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2)
569                 undo_limit = HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2;
570         UndoBufferSize = undo_limit;
571
572         blockmap = &ondisk->vol0_blockmap[undo_zone];
573         bzero(blockmap, sizeof(*blockmap));
574         blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
575         blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
576         blockmap->next_offset = blockmap->first_offset;
577         blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
578         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
579
580         layer2 = &ondisk->vol0_undo_array[0];
581         n = 0;
582         scan = blockmap->next_offset;
583         limit_index = undo_limit / HAMMER_LARGEBLOCK_SIZE;
584
585         assert(limit_index <= HAMMER_UNDO_LAYER2);
586
587         for (n = 0; n < limit_index; ++n) {
588                 layer2->u.phys_offset = alloc_bigblock(NULL, scan);
589                 layer2->bytes_free = -1;        /* not used */
590                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
591
592                 scan += HAMMER_LARGEBLOCK_SIZE;
593                 ++layer2;
594         }
595         while (n < HAMMER_UNDO_LAYER2) {
596                 layer2->u.phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
597                 layer2->bytes_free = -1;
598                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
599                 ++layer2;
600                 ++n;
601         }
602 }
603
604 /*
605  * Format a new blockmap.  Set the owner to the base of the blockmap
606  * (meaning either the blockmap layer1 bigblock, layer2 bigblock, or
607  * target bigblock).
608  */
609 void
610 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base)
611 {
612         blockmap->phys_offset = alloc_bigblock(NULL, zone_base);
613         blockmap->alloc_offset = zone_base;
614         blockmap->first_offset = zone_base;
615         blockmap->next_offset = zone_base;
616         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
617 }
618
619 /*
620  * Allocate a chunk of data out of a blockmap.  This is a simplified
621  * version which uses next_offset as a simple allocation iterator.
622  */
623 static
624 void *
625 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
626                struct buffer_info **bufferp)
627 {
628         struct buffer_info *buffer1 = NULL;
629         struct buffer_info *buffer2 = NULL;
630         struct volume_info *volume;
631         hammer_blockmap_t rootmap;
632         struct hammer_blockmap_layer1 *layer1;
633         struct hammer_blockmap_layer2 *layer2;
634         hammer_off_t layer1_offset;
635         hammer_off_t layer2_offset;
636         hammer_off_t bigblock_offset;
637         void *ptr;
638
639         volume = get_volume(RootVolNo);
640
641         rootmap = &volume->ondisk->vol0_blockmap[zone];
642
643         /*
644          * Alignment and buffer-boundary issues.  If the allocation would
645          * cross a buffer boundary we have to skip to the next buffer.
646          */
647         bytes = (bytes + 7) & ~7;
648         if ((rootmap->next_offset ^ (rootmap->next_offset + bytes - 1)) &
649             ~HAMMER_BUFMASK64) {
650                 volume->cache.modified = 1;
651                 rootmap->next_offset = (rootmap->next_offset + bytes) &
652                                        ~HAMMER_BUFMASK64;
653         }
654
655         /*
656          * Dive layer 1
657          */
658         layer1_offset = rootmap->phys_offset +
659                         HAMMER_BLOCKMAP_LAYER1_OFFSET(rootmap->next_offset);
660
661         layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
662         if ((rootmap->next_offset >= rootmap->alloc_offset &&
663             (rootmap->next_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0) ||
664             layer1->phys_offset == HAMMER_BLOCKMAP_FREE
665         ) {
666                 assert(rootmap->next_offset >= rootmap->alloc_offset);
667                 buffer1->cache.modified = 1;
668                 bzero(layer1, sizeof(*layer1));
669                 layer1->blocks_free = HAMMER_BLOCKMAP_RADIX2;
670                 layer1->phys_offset = alloc_bigblock(NULL,
671                                                      rootmap->next_offset);
672         }
673
674         /*
675          * Dive layer 2
676          */
677         layer2_offset = layer1->phys_offset +
678                         HAMMER_BLOCKMAP_LAYER2_OFFSET(rootmap->next_offset);
679
680         layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
681
682         if ((rootmap->next_offset & HAMMER_LARGEBLOCK_MASK64) == 0 &&
683             (rootmap->next_offset >= rootmap->alloc_offset ||
684              layer2->u.phys_offset == HAMMER_BLOCKMAP_FREE)
685         ) {
686                 assert(rootmap->next_offset >= rootmap->alloc_offset);
687                 buffer2->cache.modified = 1;
688                 bzero(layer2, sizeof(*layer2));
689                 layer2->u.phys_offset = alloc_bigblock(NULL,
690                                                        rootmap->next_offset);
691                 layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
692                 --layer1->blocks_free;
693         }
694
695         buffer1->cache.modified = 1;
696         buffer2->cache.modified = 1;
697         volume->cache.modified = 1;
698         layer2->bytes_free -= bytes;
699         *result_offp = rootmap->next_offset;
700         rootmap->next_offset += bytes;
701         rootmap->alloc_offset = rootmap->next_offset;
702
703         bigblock_offset = layer2->u.phys_offset + 
704                           (*result_offp & HAMMER_LARGEBLOCK_MASK);
705
706         layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
707         layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
708
709         ptr = get_buffer_data(bigblock_offset, bufferp, 0);
710         (*bufferp)->cache.modified = 1;
711
712         if (buffer1)
713                 rel_buffer(buffer1);
714         if (buffer2)
715                 rel_buffer(buffer2);
716
717         rel_volume(volume);
718         return(ptr);
719 }
720
721 /*
722  * Presize a blockmap.  Allocate all layer2 bigblocks required to map the
723  * blockmap through the specified zone limit.
724  *
725  * Note: This code is typically called later, after some data may have
726  *       already been allocated, but can be called or re-called at any time.
727  * 
728  * Note: vol0_zone_limit is not zone-encoded.
729  */
730 void
731 presize_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base,
732                  hammer_off_t vol0_zone_limit)
733 {
734         struct buffer_info *buffer1 = NULL;
735         struct buffer_info *buffer2 = NULL;
736         struct hammer_blockmap_layer1 *layer1;
737         struct hammer_blockmap_layer2 *layer2;
738         hammer_off_t zone_limit;
739         hammer_off_t layer1_offset;
740         hammer_off_t layer2_offset;
741
742         zone_limit = zone_base + vol0_zone_limit;
743
744         while (zone_base < zone_limit) {
745                 layer1_offset = blockmap->phys_offset +
746                         HAMMER_BLOCKMAP_LAYER1_OFFSET(zone_base);
747                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
748
749                 if ((zone_base >= blockmap->alloc_offset &&
750                     (zone_base & HAMMER_BLOCKMAP_LAYER2_MASK) == 0) ||
751                      layer1->phys_offset == HAMMER_BLOCKMAP_FREE
752                 ) {
753                         bzero(layer1, sizeof(*layer1));
754                         layer1->blocks_free = HAMMER_BLOCKMAP_RADIX2;
755                         layer1->phys_offset = alloc_bigblock(NULL, zone_base);
756                         layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
757                         buffer1->cache.modified = 1;
758                 }
759                 layer2_offset = layer1->phys_offset +
760                                 HAMMER_BLOCKMAP_LAYER2_OFFSET(zone_base);
761                 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
762                 if (zone_base >= blockmap->alloc_offset ||
763                     layer2->u.phys_offset == HAMMER_BLOCKMAP_FREE) {
764                         bzero(layer2, sizeof(*layer2));
765                         layer2->u.phys_offset = HAMMER_BLOCKMAP_FREE;
766                         layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
767                         layer2->entry_crc = crc32(layer2,
768                                                   HAMMER_LAYER2_CRCSIZE);
769                         buffer2->cache.modified = 1;
770                 }
771                 zone_base += HAMMER_LARGEBLOCK_SIZE64;
772         }
773         if (blockmap->alloc_offset < zone_limit)
774                 blockmap->alloc_offset = zone_limit;
775
776         if (buffer1)
777                 rel_buffer(buffer1);
778         if (buffer2)
779                 rel_buffer(buffer2);
780 }
781
782 #if 0
783 /*
784  * Reserve space from the FIFO.  Make sure that bytes does not cross a 
785  * record boundary.
786  *
787  * Zero out base_bytes and initialize the fifo head and tail.  The
788  * data area is not zerod.
789  */
790 static
791 hammer_off_t
792 hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes,
793                   struct buffer_info **bufp, u_int16_t hdr_type)
794 {
795         struct buffer_info *buf;
796         struct volume_info *volume;
797         hammer_fifo_head_t head;
798         hammer_fifo_tail_t tail;
799         hammer_off_t off;
800         int32_t aligned_bytes;
801
802         aligned_bytes = (base_bytes + ext_bytes + HAMMER_TAIL_ONDISK_SIZE +
803                          HAMMER_HEAD_ALIGN_MASK) & ~HAMMER_HEAD_ALIGN_MASK;
804
805         volume = get_volume(RootVolNo);
806         off = volume->ondisk->vol0_fifo_end;
807
808         /*
809          * For now don't deal with transitions across buffer boundaries,
810          * only newfs_hammer uses this function.
811          */
812         assert((off & ~HAMMER_BUFMASK64) ==
813                 ((off + aligned_bytes) & ~HAMMER_BUFMASK));
814
815         *bufp = buf = get_buffer(off, 0);
816
817         buf->cache.modified = 1;
818         volume->cache.modified = 1;
819
820         head = (void *)((char *)buf->ondisk + ((int32_t)off & HAMMER_BUFMASK));
821         bzero(head, base_bytes);
822
823         head->hdr_signature = HAMMER_HEAD_SIGNATURE;
824         head->hdr_type = hdr_type;
825         head->hdr_size = aligned_bytes;
826         head->hdr_seq = volume->ondisk->vol0_next_seq++;
827
828         tail = (void*)((char *)head + aligned_bytes - HAMMER_TAIL_ONDISK_SIZE);
829         tail->tail_signature = HAMMER_TAIL_SIGNATURE;
830         tail->tail_type = hdr_type;
831         tail->tail_size = aligned_bytes;
832
833         volume->ondisk->vol0_fifo_end += aligned_bytes;
834         volume->cache.modified = 1;
835
836         rel_volume(volume);
837
838         return(off);
839 }
840
841 #endif
842
843 /*
844  * Flush various tracking structures to disk
845  */
846
847 /*
848  * Flush various tracking structures to disk
849  */
850 void
851 flush_all_volumes(void)
852 {
853         struct volume_info *vol;
854
855         TAILQ_FOREACH(vol, &VolList, entry)
856                 flush_volume(vol);
857 }
858
859 void
860 flush_volume(struct volume_info *volume)
861 {
862         struct buffer_info *buffer;
863         int i;
864
865         for (i = 0; i < HAMMER_BUFLISTS; ++i) {
866                 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
867                         flush_buffer(buffer);
868         }
869         writehammerbuf(volume, volume->ondisk, 0);
870         volume->cache.modified = 0;
871 }
872
873 void
874 flush_buffer(struct buffer_info *buffer)
875 {
876         writehammerbuf(buffer->volume, buffer->ondisk, buffer->buf_disk_offset);
877         buffer->cache.modified = 0;
878 }
879
880 #if 0
881 /*
882  * Generic buffer initialization
883  */
884 static void
885 init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type)
886 {
887         head->hdr_signature = HAMMER_HEAD_SIGNATURE;
888         head->hdr_type = hdr_type;
889         head->hdr_size = 0;
890         head->hdr_crc = 0;
891         head->hdr_seq = 0;
892 }
893
894 #endif
895
896 #if 0
897 /*
898  * Core I/O operations
899  */
900 static void
901 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
902 {
903         ssize_t n;
904
905         n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
906         if (n != HAMMER_BUFSIZE)
907                 err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
908 }
909
910 #endif
911
912 static void
913 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
914 {
915         ssize_t n;
916
917         n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
918         if (n != HAMMER_BUFSIZE)
919                 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
920 }
921
922 void
923 panic(const char *ctl, ...)
924 {
925         va_list va;
926
927         va_start(va, ctl);
928         vfprintf(stderr, ctl, va);
929         va_end(va);
930         fprintf(stderr, "\n");
931         exit(1);
932 }
933