614a6adeaa7181c6fee436e39fd8bef9a4877fd2
[dragonfly.git] / sbin / hammer / ondisk.c
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sbin/hammer/ondisk.c,v 1.25 2008/08/21 23:28:43 thomas Exp $
35  */
36
37 #include <sys/types.h>
38 #include <assert.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <stdarg.h>
42 #include <string.h>
43 #include <unistd.h>
44 #include <stddef.h>
45 #include <err.h>
46 #include <fcntl.h>
47 #include "hammer_util.h"
48
49 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
50                         struct buffer_info **bufferp);
51 static hammer_off_t alloc_bigblock(struct volume_info *volume, int zone);
52 static void get_buffer_readahead(struct buffer_info *base);
53 #if 0
54 static void init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type);
55 static hammer_off_t hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes,
56                         struct buffer_info **bufp, u_int16_t hdr_type);
57 static void readhammerbuf(struct volume_info *vol, void *data,
58                         int64_t offset);
59 #endif
60 static void writehammerbuf(struct volume_info *vol, const void *data,
61                         int64_t offset);
62
63 int DebugOpt;
64
65 uuid_t Hammer_FSType;
66 uuid_t Hammer_FSId;
67 int64_t BootAreaSize;
68 int64_t MemAreaSize;
69 int64_t UndoBufferSize;
70 int     UsingSuperClusters;
71 int     NumVolumes;
72 int     RootVolNo = -1;
73 int     UseReadBehind = -4;
74 int     UseReadAhead = 4;
75 int     AssertOnFailure = 1;
76 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
77
78 static __inline
79 int
80 buffer_hash(hammer_off_t buf_offset)
81 {
82         int hi;
83
84         hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
85         return(hi);
86 }
87
88 /*
89  * Lookup the requested information structure and related on-disk buffer.
90  * Missing structures are created.
91  */
92 struct volume_info *
93 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
94 {
95         struct volume_info *vol;
96         struct volume_info *scan;
97         struct hammer_volume_ondisk *ondisk;
98         int i, n;
99
100         /*
101          * Allocate the volume structure
102          */
103         vol = malloc(sizeof(*vol));
104         bzero(vol, sizeof(*vol));
105         for (i = 0; i < HAMMER_BUFLISTS; ++i)
106                 TAILQ_INIT(&vol->buffer_lists[i]);
107         vol->name = strdup(filename);
108         vol->fd = open(filename, oflags);
109         if (vol->fd < 0) {
110                 free(vol->name);
111                 free(vol);
112                 err(1, "setup_volume: %s: Open failed", filename);
113         }
114
115         /*
116          * Read or initialize the volume header
117          */
118         vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
119         if (isnew > 0) {
120                 bzero(ondisk, HAMMER_BUFSIZE);
121         } else {
122                 n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
123                 if (n != HAMMER_BUFSIZE) {
124                         err(1, "setup_volume: %s: Read failed at offset 0",
125                             filename);
126                 }
127                 vol_no = ondisk->vol_no;
128                 if (RootVolNo < 0) {
129                         RootVolNo = ondisk->vol_rootvol;
130                 } else if (RootVolNo != (int)ondisk->vol_rootvol) {
131                         errx(1, "setup_volume: %s: root volume disagreement: "
132                                 "%d vs %d",
133                                 vol->name, RootVolNo, ondisk->vol_rootvol);
134                 }
135
136                 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
137                         errx(1, "setup_volume: %s: Header does not indicate "
138                                 "that this is a hammer volume", vol->name);
139                 }
140                 if (TAILQ_EMPTY(&VolList)) {
141                         Hammer_FSId = vol->ondisk->vol_fsid;
142                 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
143                         errx(1, "setup_volume: %s: FSId does match other "
144                                 "volumes!", vol->name);
145                 }
146         }
147         vol->vol_no = vol_no;
148
149         if (isnew > 0) {
150                 /*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/
151                 vol->cache.modified = 1;
152         }
153
154         /*
155          * Link the volume structure in
156          */
157         TAILQ_FOREACH(scan, &VolList, entry) {
158                 if (scan->vol_no == vol_no) {
159                         errx(1, "setup_volume %s: Duplicate volume number %d "
160                                 "against %s", filename, vol_no, scan->name);
161                 }
162         }
163         TAILQ_INSERT_TAIL(&VolList, vol, entry);
164         return(vol);
165 }
166
167 struct volume_info *
168 get_volume(int32_t vol_no)
169 {
170         struct volume_info *vol;
171
172         TAILQ_FOREACH(vol, &VolList, entry) {
173                 if (vol->vol_no == vol_no)
174                         break;
175         }
176         if (vol == NULL)
177                 errx(1, "get_volume: Volume %d does not exist!", vol_no);
178         ++vol->cache.refs;
179         /* not added to or removed from hammer cache */
180         return(vol);
181 }
182
183 void
184 rel_volume(struct volume_info *volume)
185 {
186         /* not added to or removed from hammer cache */
187         --volume->cache.refs;
188 }
189
190 /*
191  * Acquire the specified buffer.
192  */
193 struct buffer_info *
194 get_buffer(hammer_off_t buf_offset, int isnew)
195 {
196         void *ondisk;
197         struct buffer_info *buf;
198         struct volume_info *volume;
199         hammer_off_t orig_offset = buf_offset;
200         int vol_no;
201         int zone;
202         int hi, n;
203         int dora = 0;
204
205         zone = HAMMER_ZONE_DECODE(buf_offset);
206         if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
207                 buf_offset = blockmap_lookup(buf_offset, NULL, NULL, NULL);
208         }
209         if (buf_offset == HAMMER_OFF_BAD)
210                 return(NULL);
211
212         if (AssertOnFailure) {
213                 assert((buf_offset & HAMMER_OFF_ZONE_MASK) ==
214                        HAMMER_ZONE_RAW_BUFFER);
215         }
216         vol_no = HAMMER_VOL_DECODE(buf_offset);
217         volume = get_volume(vol_no);
218         buf_offset &= ~HAMMER_BUFMASK64;
219
220         hi = buffer_hash(buf_offset);
221
222         TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry) {
223                 if (buf->buf_offset == buf_offset)
224                         break;
225         }
226         if (buf == NULL) {
227                 buf = malloc(sizeof(*buf));
228                 bzero(buf, sizeof(*buf));
229                 if (DebugOpt) {
230                         fprintf(stderr, "get_buffer %016llx %016llx\n",
231                                 (long long)orig_offset, (long long)buf_offset);
232                 }
233                 buf->buf_offset = buf_offset;
234                 buf->raw_offset = volume->ondisk->vol_buf_beg +
235                                   (buf_offset & HAMMER_OFF_SHORT_MASK);
236                 buf->volume = volume;
237                 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
238                 ++volume->cache.refs;
239                 buf->cache.u.buffer = buf;
240                 hammer_cache_add(&buf->cache, ISBUFFER);
241                 dora = (isnew == 0);
242                 if (isnew < 0)
243                         buf->flags |= HAMMER_BUFINFO_READAHEAD;
244         } else {
245                 if (isnew >= 0) {
246                         buf->flags &= ~HAMMER_BUFINFO_READAHEAD;
247                         hammer_cache_used(&buf->cache);
248                 }
249                 ++buf->use_count;
250         }
251         ++buf->cache.refs;
252         hammer_cache_flush();
253         if ((ondisk = buf->ondisk) == NULL) {
254                 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
255                 if (isnew <= 0) {
256                         n = pread(volume->fd, ondisk, HAMMER_BUFSIZE,
257                                   buf->raw_offset);
258                         if (n != HAMMER_BUFSIZE) {
259                                 if (AssertOnFailure)
260                                 err(1, "get_buffer: %s:%016llx Read failed at "
261                                        "offset %016llx",
262                                     volume->name,
263                                     (long long)buf->buf_offset,
264                                     (long long)buf->raw_offset);
265                                 bzero(ondisk, HAMMER_BUFSIZE);
266                         }
267                 }
268         }
269         if (isnew > 0) {
270                 bzero(ondisk, HAMMER_BUFSIZE);
271                 buf->cache.modified = 1;
272         }
273         if (dora)
274                 get_buffer_readahead(buf);
275         return(buf);
276 }
277
278 static void
279 get_buffer_readahead(struct buffer_info *base)
280 {
281         struct buffer_info *buf;
282         struct volume_info *vol;
283         hammer_off_t buf_offset;
284         int64_t raw_offset;
285         int ri = UseReadBehind;
286         int re = UseReadAhead;
287         int hi;
288
289         raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
290         vol = base->volume;
291
292         while (ri < re) {
293                 if (raw_offset >= vol->ondisk->vol_buf_end)
294                         break;
295                 if (raw_offset < vol->ondisk->vol_buf_beg) {
296                         ++ri;
297                         raw_offset += HAMMER_BUFSIZE;
298                         continue;
299                 }
300                 buf_offset = HAMMER_VOL_ENCODE(vol->vol_no) |
301                              HAMMER_ZONE_RAW_BUFFER |
302                              (raw_offset - vol->ondisk->vol_buf_beg);
303                 hi = buffer_hash(raw_offset);
304                 TAILQ_FOREACH(buf, &vol->buffer_lists[hi], entry) {
305                         if (buf->raw_offset == raw_offset)
306                                 break;
307                 }
308                 if (buf == NULL) {
309                         buf = get_buffer(buf_offset, -1);
310                         rel_buffer(buf);
311                 }
312                 ++ri;
313                 raw_offset += HAMMER_BUFSIZE;
314         }
315 }
316
317 void
318 rel_buffer(struct buffer_info *buffer)
319 {
320         struct volume_info *volume;
321         int hi;
322
323         assert(buffer->cache.refs > 0);
324         if (--buffer->cache.refs == 0) {
325                 if (buffer->cache.delete) {
326                         hi = buffer_hash(buffer->buf_offset);
327                         volume = buffer->volume;
328                         if (buffer->cache.modified)
329                                 flush_buffer(buffer);
330                         TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
331                         hammer_cache_del(&buffer->cache);
332                         free(buffer->ondisk);
333                         free(buffer);
334                         rel_volume(volume);
335                 }
336         }
337 }
338
339 void *
340 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
341                 int isnew)
342 {
343         struct buffer_info *buffer;
344
345         if ((buffer = *bufferp) != NULL) {
346                 if (isnew > 0 ||
347                     ((buffer->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
348                         rel_buffer(buffer);
349                         buffer = *bufferp = NULL;
350                 }
351         }
352         if (buffer == NULL)
353                 buffer = *bufferp = get_buffer(buf_offset, isnew);
354         return((char *)buffer->ondisk + ((int32_t)buf_offset & HAMMER_BUFMASK));
355 }
356
357 /*
358  * Retrieve a pointer to a B-Tree node given a cluster offset.  The underlying
359  * bufp is freed if non-NULL and a referenced buffer is loaded into it.
360  */
361 hammer_node_ondisk_t
362 get_node(hammer_off_t node_offset, struct buffer_info **bufp)
363 {
364         struct buffer_info *buf;
365
366         if (*bufp)
367                 rel_buffer(*bufp);
368         *bufp = buf = get_buffer(node_offset, 0);
369         if (buf) {
370                 return((void *)((char *)buf->ondisk +
371                                 (int32_t)(node_offset & HAMMER_BUFMASK)));
372         } else {
373                 return(NULL);
374         }
375 }
376
377 /*
378  * Allocate HAMMER elements - btree nodes, data storage, and record elements
379  *
380  * NOTE: hammer_alloc_fifo() initializes the fifo header for the returned
381  * item and zero's out the remainder, so don't bzero() it.
382  */
383 void *
384 alloc_btree_element(hammer_off_t *offp)
385 {
386         struct buffer_info *buffer = NULL;
387         hammer_node_ondisk_t node;
388
389         node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
390                               offp, &buffer);
391         bzero(node, sizeof(*node));
392         /* XXX buffer not released, pointer remains valid */
393         return(node);
394 }
395
396 void *
397 alloc_data_element(hammer_off_t *offp, int32_t data_len,
398                    struct buffer_info **data_bufferp)
399 {
400         void *data;
401
402         if (data_len >= HAMMER_BUFSIZE) {
403                 assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
404                 data = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
405                                       offp, data_bufferp);
406                 bzero(data, data_len);
407         } else if (data_len) {
408                 data = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
409                                       offp, data_bufferp);
410                 bzero(data, data_len);
411         } else {
412                 data = NULL;
413         }
414         return (data);
415 }
416
417 /*
418  * Format a new freemap.  Set all layer1 entries to UNAVAIL.  The initialize
419  * code will load each volume's freemap.
420  */
421 void
422 format_freemap(struct volume_info *root_vol, hammer_blockmap_t blockmap)
423 {
424         struct buffer_info *buffer = NULL;
425         hammer_off_t layer1_offset;
426         struct hammer_blockmap_layer1 *layer1;
427         int i, isnew;
428
429         layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
430         for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
431                 isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
432                 layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
433                                          &buffer, isnew);
434                 bzero(layer1, sizeof(*layer1));
435                 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
436                 layer1->blocks_free = 0;
437                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
438         }
439         rel_buffer(buffer);
440
441         blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
442         blockmap->phys_offset = layer1_offset;
443         blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
444         blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
445         blockmap->reserved01 = 0;
446         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
447         root_vol->cache.modified = 1;
448 }
449
450 /*
451  * Load the volume's remaining free space into the freemap.
452  *
453  * Returns the number of bigblocks available.
454  */
455 int64_t
456 initialize_freemap(struct volume_info *vol)
457 {
458         struct volume_info *root_vol;
459         struct buffer_info *buffer1 = NULL;
460         struct buffer_info *buffer2 = NULL;
461         struct hammer_blockmap_layer1 *layer1;
462         struct hammer_blockmap_layer2 *layer2;
463         hammer_off_t layer1_base;
464         hammer_off_t layer1_offset;
465         hammer_off_t layer2_offset;
466         hammer_off_t phys_offset;
467         hammer_off_t aligned_vol_free_end;
468         int64_t count = 0;
469         int modified1 = 0;
470
471         root_vol = get_volume(RootVolNo);
472         aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
473                                 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
474
475         printf("initialize freemap volume %d\n", vol->vol_no);
476
477         /*
478          * Initialize the freemap.  First preallocate the bigblocks required
479          * to implement layer2.   This preallocation is a bootstrap allocation
480          * using blocks from the target volume.
481          */
482         layer1_base = root_vol->ondisk->vol0_blockmap[
483                                         HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
484         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
485              phys_offset < aligned_vol_free_end;
486              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
487                 layer1_offset = layer1_base +
488                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
489                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
490                 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
491                         layer1->phys_offset = alloc_bigblock(vol,
492                                                 HAMMER_ZONE_FREEMAP_INDEX);
493                         layer1->blocks_free = 0;
494                         buffer1->cache.modified = 1;
495                         layer1->layer1_crc = crc32(layer1,
496                                                    HAMMER_LAYER1_CRCSIZE);
497                 }
498         }
499
500         /*
501          * Now fill everything in.
502          */
503         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
504              phys_offset < aligned_vol_free_end;
505              phys_offset += HAMMER_LARGEBLOCK_SIZE) {
506                 modified1 = 0;
507                 layer1_offset = layer1_base +
508                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
509                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
510
511                 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
512                 layer2_offset = layer1->phys_offset +
513                                 HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
514
515                 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
516                 bzero(layer2, sizeof(*layer2));
517                 if (phys_offset < vol->vol_free_off) {
518                         /*
519                          * Fixups XXX - bigblocks already allocated as part
520                          * of the freemap bootstrap.
521                          */
522                         if (layer2->zone == 0) {
523                                 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
524                                 layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
525                                 layer2->bytes_free = 0;
526                         }
527                 } else if (phys_offset < vol->vol_free_end) {
528                         ++layer1->blocks_free;
529                         buffer1->cache.modified = 1;
530                         layer2->zone = 0;
531                         layer2->append_off = 0;
532                         layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
533                         ++count;
534                         modified1 = 1;
535                 } else {
536                         layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
537                         layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
538                         layer2->bytes_free = 0;
539                 }
540                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
541                 buffer2->cache.modified = 1;
542
543                 /*
544                  * Finish-up layer 1
545                  */
546                 if (modified1) {
547                         layer1->layer1_crc = crc32(layer1,
548                                                    HAMMER_LAYER1_CRCSIZE);
549                         buffer1->cache.modified = 1;
550                 }
551         }
552         rel_buffer(buffer1);
553         rel_buffer(buffer2);
554         rel_volume(root_vol);
555         return(count);
556 }
557
558 /*
559  * Allocate big-blocks using our poor-man's volume->vol_free_off.
560  *
561  * If the zone is HAMMER_ZONE_FREEMAP_INDEX we are bootstrapping the freemap
562  * itself and cannot update it yet.
563  */
564 hammer_off_t
565 alloc_bigblock(struct volume_info *volume, int zone)
566 {
567         struct buffer_info *buffer = NULL;
568         struct volume_info *root_vol;
569         hammer_off_t result_offset;
570         hammer_off_t layer_offset;
571         struct hammer_blockmap_layer1 *layer1;
572         struct hammer_blockmap_layer2 *layer2;
573         int didget;
574
575         if (volume == NULL) {
576                 volume = get_volume(RootVolNo);
577                 didget = 1;
578         } else {
579                 didget = 0;
580         }
581         result_offset = volume->vol_free_off;
582         if (result_offset >= volume->vol_free_end)
583                 panic("alloc_bigblock: Ran out of room, filesystem too small");
584         volume->vol_free_off += HAMMER_LARGEBLOCK_SIZE;
585
586         /*
587          * Update the freemap.
588          */
589         if (zone != HAMMER_ZONE_FREEMAP_INDEX) {
590                 root_vol = get_volume(RootVolNo);
591                 layer_offset = root_vol->ondisk->vol0_blockmap[
592                                         HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
593                 layer_offset += HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
594                 layer1 = get_buffer_data(layer_offset, &buffer, 0);
595                 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
596                 --layer1->blocks_free;
597                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
598                 buffer->cache.modified = 1;
599                 layer_offset = layer1->phys_offset +
600                                HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
601                 layer2 = get_buffer_data(layer_offset, &buffer, 0);
602                 assert(layer2->zone == 0);
603                 layer2->zone = zone;
604                 layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
605                 layer2->bytes_free = 0;
606                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
607                 buffer->cache.modified = 1;
608
609                 --root_vol->ondisk->vol0_stat_freebigblocks;
610                 root_vol->cache.modified = 1;
611
612                 rel_buffer(buffer);
613                 rel_volume(root_vol);
614         }
615
616         if (didget)
617                 rel_volume(volume);
618         return(result_offset);
619 }
620
621 /*
622  * Format the undo-map for the root volume.
623  */
624 void
625 format_undomap(hammer_volume_ondisk_t ondisk)
626 {
627         const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
628         hammer_off_t undo_limit;
629         hammer_blockmap_t blockmap;
630         struct buffer_info *buffer = NULL;
631         hammer_off_t scan;
632         int n;
633         int limit_index;
634         u_int32_t seqno;
635
636         /*
637          * Size the undo buffer in multiples of HAMMER_LARGEBLOCK_SIZE,
638          * up to HAMMER_UNDO_LAYER2 large blocks.  Size to approximately
639          * 0.1% of the disk.
640          *
641          * The minimum UNDO fifo size is 100MB.
642          */
643         undo_limit = UndoBufferSize;
644         if (undo_limit == 0) {
645                 undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
646                 if (undo_limit < 100*1024*1024)
647                         undo_limit = 100*1024*1024;
648         }
649         undo_limit = (undo_limit + HAMMER_LARGEBLOCK_MASK64) &
650                      ~HAMMER_LARGEBLOCK_MASK64;
651         if (undo_limit < HAMMER_LARGEBLOCK_SIZE)
652                 undo_limit = HAMMER_LARGEBLOCK_SIZE;
653         if (undo_limit > HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2)
654                 undo_limit = HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2;
655         UndoBufferSize = undo_limit;
656
657         blockmap = &ondisk->vol0_blockmap[undo_zone];
658         bzero(blockmap, sizeof(*blockmap));
659         blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
660         blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
661         blockmap->next_offset = blockmap->first_offset;
662         blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
663         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
664
665         n = 0;
666         scan = blockmap->next_offset;
667         limit_index = undo_limit / HAMMER_LARGEBLOCK_SIZE;
668
669         assert(limit_index <= HAMMER_UNDO_LAYER2);
670
671         for (n = 0; n < limit_index; ++n) {
672                 ondisk->vol0_undo_array[n] = alloc_bigblock(NULL,
673                                                         HAMMER_ZONE_UNDO_INDEX);
674                 scan += HAMMER_LARGEBLOCK_SIZE;
675         }
676         while (n < HAMMER_UNDO_LAYER2) {
677                 ondisk->vol0_undo_array[n] = HAMMER_BLOCKMAP_UNAVAIL;
678                 ++n;
679         }
680
681         /*
682          * Pre-initialize the UNDO blocks (HAMMER version 4+)
683          */
684         printf("initializing the undo map (%jd MB)\n",
685                 (intmax_t)(blockmap->alloc_offset & HAMMER_OFF_LONG_MASK) /
686                 (1024 * 1024));
687
688         scan = blockmap->first_offset;
689         seqno = 0;
690
691         while (scan < blockmap->alloc_offset) {
692                 hammer_fifo_head_t head;
693                 hammer_fifo_tail_t tail;
694                 int isnew;
695                 int bytes = HAMMER_UNDO_ALIGN;
696
697                 isnew = ((scan & HAMMER_BUFMASK64) == 0);
698                 head = get_buffer_data(scan, &buffer, isnew);
699                 buffer->cache.modified = 1;
700                 tail = (void *)((char *)head + bytes - sizeof(*tail));
701
702                 bzero(head, bytes);
703                 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
704                 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
705                 head->hdr_size = bytes;
706                 head->hdr_seq = seqno++;
707
708                 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
709                 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
710                 tail->tail_size = bytes;
711
712                 head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
713                                 crc32(head + 1, bytes - sizeof(*head));
714
715                 scan += bytes;
716         }
717         if (buffer)
718                 rel_buffer(buffer);
719 }
720
721 /*
722  * Format a new blockmap.  This is mostly a degenerate case because
723  * all allocations are now actually done from the freemap.
724  */
725 void
726 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base)
727 {
728         blockmap->phys_offset = 0;
729         blockmap->alloc_offset = zone_base | HAMMER_VOL_ENCODE(255) |
730                                  HAMMER_SHORT_OFF_ENCODE(-1);
731         blockmap->first_offset = zone_base;
732         blockmap->next_offset = zone_base;
733         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
734 }
735
736 /*
737  * Allocate a chunk of data out of a blockmap.  This is a simplified
738  * version which uses next_offset as a simple allocation iterator.
739  */
740 static
741 void *
742 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
743                struct buffer_info **bufferp)
744 {
745         struct buffer_info *buffer1 = NULL;
746         struct buffer_info *buffer2 = NULL;
747         struct volume_info *volume;
748         hammer_blockmap_t blockmap;
749         hammer_blockmap_t freemap;
750         struct hammer_blockmap_layer1 *layer1;
751         struct hammer_blockmap_layer2 *layer2;
752         hammer_off_t layer1_offset;
753         hammer_off_t layer2_offset;
754         hammer_off_t zone2_offset;
755         void *ptr;
756
757         volume = get_volume(RootVolNo);
758
759         blockmap = &volume->ondisk->vol0_blockmap[zone];
760         freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
761
762         /*
763          * Alignment and buffer-boundary issues.  If the allocation would
764          * cross a buffer boundary we have to skip to the next buffer.
765          */
766         bytes = (bytes + 15) & ~15;
767
768 again:
769         if ((blockmap->next_offset ^ (blockmap->next_offset + bytes - 1)) &
770             ~HAMMER_BUFMASK64) {
771                 volume->cache.modified = 1;
772                 blockmap->next_offset = (blockmap->next_offset + bytes) &
773                                         ~HAMMER_BUFMASK64;
774         }
775
776         /*
777          * Dive layer 1.  For now we can't allocate data outside of volume 0.
778          */
779         layer1_offset = freemap->phys_offset +
780                         HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
781
782         layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
783
784         if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
785                 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
786                 exit(1);
787         }
788
789         /*
790          * Dive layer 2
791          */
792         layer2_offset = layer1->phys_offset +
793                         HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
794
795         layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
796
797         if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
798                 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
799                 exit(1);
800         }
801
802         /*
803          * If we are entering a new bigblock assign ownership to our
804          * zone.  If the bigblock is owned by another zone skip it.
805          */
806         if (layer2->zone == 0) {
807                 --layer1->blocks_free;
808                 layer2->zone = zone;
809                 assert(layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE);
810                 assert(layer2->append_off == 0);
811         }
812         if (layer2->zone != zone) {
813                 blockmap->next_offset = (blockmap->next_offset + HAMMER_LARGEBLOCK_SIZE) &
814                                         ~HAMMER_LARGEBLOCK_MASK64;
815                 goto again;
816         }
817
818         buffer1->cache.modified = 1;
819         buffer2->cache.modified = 1;
820         volume->cache.modified = 1;
821         assert(layer2->append_off ==
822                (blockmap->next_offset & HAMMER_LARGEBLOCK_MASK));
823         layer2->bytes_free -= bytes;
824         *result_offp = blockmap->next_offset;
825         blockmap->next_offset += bytes;
826         layer2->append_off = (int)blockmap->next_offset &
827                               HAMMER_LARGEBLOCK_MASK;
828
829         layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
830         layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
831
832         zone2_offset = (*result_offp & ~HAMMER_OFF_ZONE_MASK) |
833                         HAMMER_ZONE_ENCODE(zone, 0);
834
835         ptr = get_buffer_data(zone2_offset, bufferp, 0);
836         (*bufferp)->cache.modified = 1;
837
838         if (buffer1)
839                 rel_buffer(buffer1);
840         if (buffer2)
841                 rel_buffer(buffer2);
842
843         rel_volume(volume);
844         return(ptr);
845 }
846
847 /*
848  * Flush various tracking structures to disk
849  */
850
851 /*
852  * Flush various tracking structures to disk
853  */
854 void
855 flush_all_volumes(void)
856 {
857         struct volume_info *vol;
858
859         TAILQ_FOREACH(vol, &VolList, entry)
860                 flush_volume(vol);
861 }
862
863 void
864 flush_volume(struct volume_info *volume)
865 {
866         struct buffer_info *buffer;
867         int i;
868
869         for (i = 0; i < HAMMER_BUFLISTS; ++i) {
870                 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
871                         flush_buffer(buffer);
872         }
873         writehammerbuf(volume, volume->ondisk, 0);
874         volume->cache.modified = 0;
875 }
876
877 void
878 flush_buffer(struct buffer_info *buffer)
879 {
880         writehammerbuf(buffer->volume, buffer->ondisk, buffer->raw_offset);
881         buffer->cache.modified = 0;
882 }
883
884 #if 0
885 /*
886  * Generic buffer initialization
887  */
888 static void
889 init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type)
890 {
891         head->hdr_signature = HAMMER_HEAD_SIGNATURE;
892         head->hdr_type = hdr_type;
893         head->hdr_size = 0;
894         head->hdr_crc = 0;
895         head->hdr_seq = 0;
896 }
897
898 #endif
899
900 #if 0
901 /*
902  * Core I/O operations
903  */
904 static void
905 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
906 {
907         ssize_t n;
908
909         n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
910         if (n != HAMMER_BUFSIZE)
911                 err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
912 }
913
914 #endif
915
916 static void
917 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
918 {
919         ssize_t n;
920
921         n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
922         if (n != HAMMER_BUFSIZE)
923                 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
924 }
925
926 void
927 panic(const char *ctl, ...)
928 {
929         va_list va;
930
931         va_start(va, ctl);
932         vfprintf(stderr, ctl, va);
933         va_end(va);
934         fprintf(stderr, "\n");
935         exit(1);
936 }
937