sbin/newfs_hammer: Properly allocate root inode and pfs from meta zone
[dragonfly.git] / sbin / hammer / ondisk.c
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include <sys/types.h>
36 #include <assert.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <stdarg.h>
40 #include <string.h>
41 #include <unistd.h>
42 #include <stddef.h>
43 #include <err.h>
44 #include <fcntl.h>
45 #include "hammer_util.h"
46
47 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
48                         struct buffer_info **bufferp);
49 static hammer_off_t alloc_bigblock(struct volume_info *volume, int zone);
50 static void get_buffer_readahead(struct buffer_info *base);
51 static __inline void *get_ondisk(hammer_off_t buf_offset,
52                         struct buffer_info **bufferp, int isnew);
53 #if 0
54 static void init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type);
55 static void readhammerbuf(struct volume_info *vol, void *data,
56                         int64_t offset);
57 #endif
58 static void writehammerbuf(struct volume_info *vol, const void *data,
59                         int64_t offset);
60
61 int DebugOpt;
62
63 uuid_t Hammer_FSType;
64 uuid_t Hammer_FSId;
65 int64_t BootAreaSize;
66 int64_t MemAreaSize;
67 int64_t UndoBufferSize;
68 int     UsingSuperClusters;
69 int     NumVolumes;
70 int     RootVolNo = -1;
71 int     UseReadBehind = -4;
72 int     UseReadAhead = 4;
73 int     AssertOnFailure = 1;
74 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
75
76 static __inline
77 int
78 buffer_hash(hammer_off_t buf_offset)
79 {
80         int hi;
81
82         hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
83         return(hi);
84 }
85
86 /*
87  * Lookup the requested information structure and related on-disk buffer.
88  * Missing structures are created.
89  */
90 struct volume_info *
91 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
92 {
93         struct volume_info *vol;
94         struct volume_info *scan;
95         struct hammer_volume_ondisk *ondisk;
96         int i, n;
97
98         /*
99          * Allocate the volume structure
100          */
101         vol = malloc(sizeof(*vol));
102         bzero(vol, sizeof(*vol));
103         for (i = 0; i < HAMMER_BUFLISTS; ++i)
104                 TAILQ_INIT(&vol->buffer_lists[i]);
105         vol->name = strdup(filename);
106         vol->fd = open(filename, oflags);
107         if (vol->fd < 0) {
108                 free(vol->name);
109                 free(vol);
110                 err(1, "setup_volume: %s: Open failed", filename);
111         }
112
113         /*
114          * Read or initialize the volume header
115          */
116         vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
117         if (isnew > 0) {
118                 bzero(ondisk, HAMMER_BUFSIZE);
119         } else {
120                 n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
121                 if (n != HAMMER_BUFSIZE) {
122                         err(1, "setup_volume: %s: Read failed at offset 0",
123                             filename);
124                 }
125                 vol_no = ondisk->vol_no;
126                 if (RootVolNo < 0) {
127                         RootVolNo = ondisk->vol_rootvol;
128                 } else if (RootVolNo != (int)ondisk->vol_rootvol) {
129                         errx(1, "setup_volume: %s: root volume disagreement: "
130                                 "%d vs %d",
131                                 vol->name, RootVolNo, ondisk->vol_rootvol);
132                 }
133
134                 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
135                         errx(1, "setup_volume: %s: Header does not indicate "
136                                 "that this is a hammer volume", vol->name);
137                 }
138                 if (TAILQ_EMPTY(&VolList)) {
139                         Hammer_FSId = vol->ondisk->vol_fsid;
140                 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
141                         errx(1, "setup_volume: %s: FSId does match other "
142                                 "volumes!", vol->name);
143                 }
144         }
145         vol->vol_no = vol_no;
146
147         if (isnew > 0) {
148                 /*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/
149                 vol->cache.modified = 1;
150         }
151
152         /*
153          * Link the volume structure in
154          */
155         TAILQ_FOREACH(scan, &VolList, entry) {
156                 if (scan->vol_no == vol_no) {
157                         errx(1, "setup_volume %s: Duplicate volume number %d "
158                                 "against %s", filename, vol_no, scan->name);
159                 }
160         }
161         TAILQ_INSERT_TAIL(&VolList, vol, entry);
162         return(vol);
163 }
164
165 struct volume_info *
166 test_volume(int32_t vol_no)
167 {
168         struct volume_info *vol;
169
170         TAILQ_FOREACH(vol, &VolList, entry) {
171                 if (vol->vol_no == vol_no)
172                         break;
173         }
174         if (vol == NULL)
175                 return(NULL);
176         ++vol->cache.refs;
177         /* not added to or removed from hammer cache */
178         return(vol);
179 }
180
181 struct volume_info *
182 get_volume(int32_t vol_no)
183 {
184         struct volume_info *vol;
185
186         TAILQ_FOREACH(vol, &VolList, entry) {
187                 if (vol->vol_no == vol_no)
188                         break;
189         }
190         if (vol == NULL)
191                 errx(1, "get_volume: Volume %d does not exist!", vol_no);
192         ++vol->cache.refs;
193         /* not added to or removed from hammer cache */
194         return(vol);
195 }
196
197 void
198 rel_volume(struct volume_info *volume)
199 {
200         if (volume == NULL)
201                 return;
202         /* not added to or removed from hammer cache */
203         --volume->cache.refs;
204 }
205
206 /*
207  * Acquire the specified buffer.
208  */
209 struct buffer_info *
210 get_buffer(hammer_off_t buf_offset, int isnew)
211 {
212         void *ondisk;
213         struct buffer_info *buf;
214         struct volume_info *volume;
215         hammer_off_t orig_offset = buf_offset;
216         int vol_no;
217         int zone;
218         int hi, n;
219         int dora = 0;
220
221         zone = HAMMER_ZONE_DECODE(buf_offset);
222         if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
223                 buf_offset = blockmap_lookup(buf_offset, NULL, NULL, NULL);
224         }
225         if (buf_offset == HAMMER_OFF_BAD)
226                 return(NULL);
227
228         if (AssertOnFailure) {
229                 assert((buf_offset & HAMMER_OFF_ZONE_MASK) ==
230                        HAMMER_ZONE_RAW_BUFFER);
231         }
232         vol_no = HAMMER_VOL_DECODE(buf_offset);
233         volume = test_volume(vol_no);
234         if (volume == NULL) {
235                 if (AssertOnFailure)
236                         errx(1, "get_buffer: Volume %d not found!", vol_no);
237                 return(NULL);
238         }
239
240         buf_offset &= ~HAMMER_BUFMASK64;
241
242         hi = buffer_hash(buf_offset);
243
244         TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry) {
245                 if (buf->buf_offset == buf_offset)
246                         break;
247         }
248         if (buf == NULL) {
249                 buf = malloc(sizeof(*buf));
250                 bzero(buf, sizeof(*buf));
251                 if (DebugOpt) {
252                         fprintf(stderr, "get_buffer: %016llx %016llx at %p\n",
253                                 (long long)orig_offset, (long long)buf_offset,
254                                 buf);
255                 }
256                 buf->buf_offset = buf_offset;
257                 buf->raw_offset = volume->ondisk->vol_buf_beg +
258                                   (buf_offset & HAMMER_OFF_SHORT_MASK);
259                 buf->volume = volume;
260                 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
261                 ++volume->cache.refs;
262                 buf->cache.u.buffer = buf;
263                 hammer_cache_add(&buf->cache, ISBUFFER);
264                 dora = (isnew == 0);
265                 if (isnew < 0)
266                         buf->flags |= HAMMER_BUFINFO_READAHEAD;
267         } else {
268                 if (DebugOpt) {
269                         fprintf(stderr, "get_buffer: %016llx %016llx at %p *\n",
270                                 (long long)orig_offset, (long long)buf_offset,
271                                 buf);
272                 }
273                 if (isnew >= 0) {
274                         buf->flags &= ~HAMMER_BUFINFO_READAHEAD;
275                         hammer_cache_used(&buf->cache);
276                 }
277                 ++buf->use_count;
278         }
279         ++buf->cache.refs;
280         hammer_cache_flush();
281         if ((ondisk = buf->ondisk) == NULL) {
282                 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
283                 if (isnew <= 0) {
284                         n = pread(volume->fd, ondisk, HAMMER_BUFSIZE,
285                                   buf->raw_offset);
286                         if (n != HAMMER_BUFSIZE) {
287                                 if (AssertOnFailure)
288                                         err(1, "get_buffer: %s:%016llx "
289                                             "Read failed at offset %016llx",
290                                             volume->name,
291                                             (long long)buf->buf_offset,
292                                             (long long)buf->raw_offset);
293                                 bzero(ondisk, HAMMER_BUFSIZE);
294                         }
295                 }
296         }
297         if (isnew > 0) {
298                 bzero(ondisk, HAMMER_BUFSIZE);
299                 buf->cache.modified = 1;
300         }
301         if (dora)
302                 get_buffer_readahead(buf);
303         return(buf);
304 }
305
306 static void
307 get_buffer_readahead(struct buffer_info *base)
308 {
309         struct buffer_info *buf;
310         struct volume_info *vol;
311         hammer_off_t buf_offset;
312         int64_t raw_offset;
313         int ri = UseReadBehind;
314         int re = UseReadAhead;
315         int hi;
316
317         raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
318         vol = base->volume;
319
320         while (ri < re) {
321                 if (raw_offset >= vol->ondisk->vol_buf_end)
322                         break;
323                 if (raw_offset < vol->ondisk->vol_buf_beg) {
324                         ++ri;
325                         raw_offset += HAMMER_BUFSIZE;
326                         continue;
327                 }
328                 buf_offset = HAMMER_VOL_ENCODE(vol->vol_no) |
329                              HAMMER_ZONE_RAW_BUFFER |
330                              (raw_offset - vol->ondisk->vol_buf_beg);
331                 hi = buffer_hash(raw_offset);
332                 TAILQ_FOREACH(buf, &vol->buffer_lists[hi], entry) {
333                         if (buf->raw_offset == raw_offset)
334                                 break;
335                 }
336                 if (buf == NULL) {
337                         buf = get_buffer(buf_offset, -1);
338                         rel_buffer(buf);
339                 }
340                 ++ri;
341                 raw_offset += HAMMER_BUFSIZE;
342         }
343 }
344
345 void
346 rel_buffer(struct buffer_info *buffer)
347 {
348         struct volume_info *volume;
349         int hi;
350
351         if (buffer == NULL)
352                 return;
353         assert(buffer->cache.refs > 0);
354         if (--buffer->cache.refs == 0) {
355                 if (buffer->cache.delete) {
356                         hi = buffer_hash(buffer->buf_offset);
357                         volume = buffer->volume;
358                         if (buffer->cache.modified)
359                                 flush_buffer(buffer);
360                         TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
361                         hammer_cache_del(&buffer->cache);
362                         free(buffer->ondisk);
363                         free(buffer);
364                         rel_volume(volume);
365                 }
366         }
367 }
368
369 /*
370  * Retrieve a pointer to a buffer data given a buffer offset.  The underlying
371  * bufferp is freed if isnew or the offset is out of range of the cached data.
372  * If bufferp is freed a referenced buffer is loaded into it.
373  */
374 void *
375 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
376                 int isnew)
377 {
378         if (*bufferp != NULL) {
379                 if (isnew > 0 ||
380                     (((*bufferp)->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
381                         rel_buffer(*bufferp);
382                         *bufferp = NULL;
383                 }
384         }
385         return(get_ondisk(buf_offset, bufferp, isnew));
386 }
387
388 /*
389  * Retrieve a pointer to a B-Tree node given a cluster offset.  The underlying
390  * bufferp is freed if non-NULL and a referenced buffer is loaded into it.
391  */
392 hammer_node_ondisk_t
393 get_node(hammer_off_t node_offset, struct buffer_info **bufferp)
394 {
395         if (*bufferp != NULL) {
396                 rel_buffer(*bufferp);
397                 *bufferp = NULL;
398         }
399         return(get_ondisk(node_offset, bufferp, 0));
400 }
401
402 /*
403  * Return a pointer to a buffer data given a buffer offset.
404  * If *bufferp is NULL acquire the buffer otherwise use that buffer.
405  */
406 static __inline
407 void *
408 get_ondisk(hammer_off_t buf_offset, struct buffer_info **bufferp,
409         int isnew)
410 {
411         struct buffer_info *buffer;
412
413         buffer = *bufferp;
414         if (buffer == NULL) {
415                 buffer = *bufferp = get_buffer(buf_offset, isnew);
416                 if (buffer == NULL)
417                         return(NULL);
418         }
419
420         return((char *)buffer->ondisk +
421                 ((int32_t)buf_offset & HAMMER_BUFMASK));
422 }
423
424 /*
425  * Allocate HAMMER elements - btree nodes, meta data, data storage
426  */
427 void *
428 alloc_btree_element(hammer_off_t *offp)
429 {
430         struct buffer_info *buffer = NULL;
431         hammer_node_ondisk_t node;
432
433         node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
434                               offp, &buffer);
435         bzero(node, sizeof(*node));
436         /* XXX buffer not released, pointer remains valid */
437         return(node);
438 }
439
440 void *
441 alloc_meta_element(hammer_off_t *offp, int32_t data_len,
442                    struct buffer_info **data_bufferp)
443 {
444         void *data;
445
446         data = alloc_blockmap(HAMMER_ZONE_META_INDEX, data_len,
447                               offp, data_bufferp);
448         bzero(data, data_len);
449         return (data);
450 }
451
452 void *
453 alloc_data_element(hammer_off_t *offp, int32_t data_len,
454                    struct buffer_info **data_bufferp)
455 {
456         void *data;
457
458         if (data_len >= HAMMER_BUFSIZE) {
459                 assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
460                 data = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
461                                       offp, data_bufferp);
462                 bzero(data, data_len);
463         } else if (data_len) {
464                 data = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
465                                       offp, data_bufferp);
466                 bzero(data, data_len);
467         } else {
468                 data = NULL;
469         }
470         return (data);
471 }
472
473 /*
474  * Format a new freemap.  Set all layer1 entries to UNAVAIL.  The initialize
475  * code will load each volume's freemap.
476  */
477 void
478 format_freemap(struct volume_info *root_vol, hammer_blockmap_t blockmap)
479 {
480         struct buffer_info *buffer = NULL;
481         hammer_off_t layer1_offset;
482         struct hammer_blockmap_layer1 *layer1;
483         int i, isnew;
484
485         layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
486         for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
487                 isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
488                 layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
489                                          &buffer, isnew);
490                 bzero(layer1, sizeof(*layer1));
491                 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
492                 layer1->blocks_free = 0;
493                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
494         }
495         rel_buffer(buffer);
496
497         blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
498         blockmap->phys_offset = layer1_offset;
499         blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
500         blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
501         blockmap->reserved01 = 0;
502         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
503         root_vol->cache.modified = 1;
504 }
505
506 /*
507  * Load the volume's remaining free space into the freemap.
508  *
509  * Returns the number of bigblocks available.
510  */
511 int64_t
512 initialize_freemap(struct volume_info *vol)
513 {
514         struct volume_info *root_vol;
515         struct buffer_info *buffer1 = NULL;
516         struct buffer_info *buffer2 = NULL;
517         struct hammer_blockmap_layer1 *layer1;
518         struct hammer_blockmap_layer2 *layer2;
519         hammer_off_t layer1_base;
520         hammer_off_t layer1_offset;
521         hammer_off_t layer2_offset;
522         hammer_off_t phys_offset;
523         hammer_off_t aligned_vol_free_end;
524         int64_t count = 0;
525         int modified1 = 0;
526
527         root_vol = get_volume(RootVolNo);
528         aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
529                                 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
530
531         printf("initialize freemap volume %d\n", vol->vol_no);
532
533         /*
534          * Initialize the freemap.  First preallocate the bigblocks required
535          * to implement layer2.   This preallocation is a bootstrap allocation
536          * using blocks from the target volume.
537          */
538         layer1_base = root_vol->ondisk->vol0_blockmap[
539                                         HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
540         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
541              phys_offset < aligned_vol_free_end;
542              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
543                 layer1_offset = layer1_base +
544                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
545                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
546                 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
547                         layer1->phys_offset = alloc_bigblock(vol,
548                                                 HAMMER_ZONE_FREEMAP_INDEX);
549                         layer1->blocks_free = 0;
550                         buffer1->cache.modified = 1;
551                         layer1->layer1_crc = crc32(layer1,
552                                                    HAMMER_LAYER1_CRCSIZE);
553                 }
554         }
555
556         /*
557          * Now fill everything in.
558          */
559         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
560              phys_offset < aligned_vol_free_end;
561              phys_offset += HAMMER_BIGBLOCK_SIZE) {
562                 modified1 = 0;
563                 layer1_offset = layer1_base +
564                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
565                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
566
567                 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
568                 layer2_offset = layer1->phys_offset +
569                                 HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
570
571                 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
572                 bzero(layer2, sizeof(*layer2));
573                 if (phys_offset < vol->vol_free_off) {
574                         /*
575                          * Fixups XXX - bigblocks already allocated as part
576                          * of the freemap bootstrap.
577                          */
578                         if (layer2->zone == 0) {
579                                 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
580                                 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
581                                 layer2->bytes_free = 0;
582                         }
583                 } else if (phys_offset < vol->vol_free_end) {
584                         ++layer1->blocks_free;
585                         buffer1->cache.modified = 1;
586                         layer2->zone = 0;
587                         layer2->append_off = 0;
588                         layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
589                         ++count;
590                         modified1 = 1;
591                 } else {
592                         layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
593                         layer2->append_off = HAMMER_BIGBLOCK_SIZE;
594                         layer2->bytes_free = 0;
595                 }
596                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
597                 buffer2->cache.modified = 1;
598
599                 /*
600                  * Finish-up layer 1
601                  */
602                 if (modified1) {
603                         layer1->layer1_crc = crc32(layer1,
604                                                    HAMMER_LAYER1_CRCSIZE);
605                         buffer1->cache.modified = 1;
606                 }
607         }
608         rel_buffer(buffer1);
609         rel_buffer(buffer2);
610         rel_volume(root_vol);
611         return(count);
612 }
613
614 /*
615  * Allocate big-blocks using our poor-man's volume->vol_free_off.
616  *
617  * If the zone is HAMMER_ZONE_FREEMAP_INDEX we are bootstrapping the freemap
618  * itself and cannot update it yet.
619  */
620 hammer_off_t
621 alloc_bigblock(struct volume_info *volume, int zone)
622 {
623         struct buffer_info *buffer1 = NULL;
624         struct buffer_info *buffer2 = NULL;
625         struct volume_info *root_vol;
626         hammer_off_t result_offset;
627         hammer_off_t layer_offset;
628         struct hammer_blockmap_layer1 *layer1;
629         struct hammer_blockmap_layer2 *layer2;
630
631         if (volume == NULL)
632                 volume = get_volume(RootVolNo);
633
634         result_offset = volume->vol_free_off;
635         if (result_offset >= volume->vol_free_end)
636                 panic("alloc_bigblock: Ran out of room, filesystem too small");
637         volume->vol_free_off += HAMMER_BIGBLOCK_SIZE;
638
639         /*
640          * Update the freemap.
641          */
642         if (zone != HAMMER_ZONE_FREEMAP_INDEX) {
643                 root_vol = get_volume(RootVolNo);
644                 layer_offset = root_vol->ondisk->vol0_blockmap[
645                                         HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
646                 layer_offset += HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
647                 layer1 = get_buffer_data(layer_offset, &buffer1, 0);
648                 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
649                 --layer1->blocks_free;
650                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
651                 buffer1->cache.modified = 1;
652                 layer_offset = layer1->phys_offset +
653                                HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
654                 layer2 = get_buffer_data(layer_offset, &buffer2, 0);
655                 assert(layer2->zone == 0);
656                 layer2->zone = zone;
657                 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
658                 layer2->bytes_free = 0;
659                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
660                 buffer2->cache.modified = 1;
661
662                 --root_vol->ondisk->vol0_stat_freebigblocks;
663                 root_vol->cache.modified = 1;
664
665                 rel_buffer(buffer1);
666                 rel_buffer(buffer2);
667                 rel_volume(root_vol);
668         }
669
670         rel_volume(volume);
671         return(result_offset);
672 }
673
674 /*
675  * Format the undo-map for the root volume.
676  */
677 void
678 format_undomap(hammer_volume_ondisk_t ondisk)
679 {
680         const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
681         hammer_off_t undo_limit;
682         hammer_blockmap_t blockmap;
683         struct buffer_info *buffer = NULL;
684         hammer_off_t scan;
685         int n;
686         int limit_index;
687         u_int32_t seqno;
688
689         /*
690          * Size the undo buffer in multiples of HAMMER_BIGBLOCK_SIZE,
691          * up to HAMMER_UNDO_LAYER2 big blocks.  Size to approximately
692          * 0.1% of the disk.
693          *
694          * The minimum UNDO fifo size is 500MB, or approximately 1% of
695          * the recommended 50G disk.
696          *
697          * Changing this minimum is rather dangerous as complex filesystem
698          * operations can cause the UNDO FIFO to fill up otherwise.
699          */
700         undo_limit = UndoBufferSize;
701         if (undo_limit == 0) {
702                 undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
703                 if (undo_limit < 500*1024*1024)
704                         undo_limit = 500*1024*1024;
705         }
706         undo_limit = (undo_limit + HAMMER_BIGBLOCK_MASK64) &
707                      ~HAMMER_BIGBLOCK_MASK64;
708         if (undo_limit < HAMMER_BIGBLOCK_SIZE)
709                 undo_limit = HAMMER_BIGBLOCK_SIZE;
710         if (undo_limit > HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2)
711                 undo_limit = HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2;
712         UndoBufferSize = undo_limit;
713
714         blockmap = &ondisk->vol0_blockmap[undo_zone];
715         bzero(blockmap, sizeof(*blockmap));
716         blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
717         blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
718         blockmap->next_offset = blockmap->first_offset;
719         blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
720         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
721
722         n = 0;
723         scan = blockmap->next_offset;
724         limit_index = undo_limit / HAMMER_BIGBLOCK_SIZE;
725
726         assert(limit_index <= HAMMER_UNDO_LAYER2);
727
728         for (n = 0; n < limit_index; ++n) {
729                 ondisk->vol0_undo_array[n] = alloc_bigblock(NULL,
730                                                         HAMMER_ZONE_UNDO_INDEX);
731                 scan += HAMMER_BIGBLOCK_SIZE;
732         }
733         while (n < HAMMER_UNDO_LAYER2) {
734                 ondisk->vol0_undo_array[n] = HAMMER_BLOCKMAP_UNAVAIL;
735                 ++n;
736         }
737
738         /*
739          * Pre-initialize the UNDO blocks (HAMMER version 4+)
740          */
741         printf("initializing the undo map (%jd MB)\n",
742                 (intmax_t)(blockmap->alloc_offset & HAMMER_OFF_LONG_MASK) /
743                 (1024 * 1024));
744
745         scan = blockmap->first_offset;
746         seqno = 0;
747
748         while (scan < blockmap->alloc_offset) {
749                 hammer_fifo_head_t head;
750                 hammer_fifo_tail_t tail;
751                 int isnew;
752                 int bytes = HAMMER_UNDO_ALIGN;
753
754                 isnew = ((scan & HAMMER_BUFMASK64) == 0);
755                 head = get_buffer_data(scan, &buffer, isnew);
756                 buffer->cache.modified = 1;
757                 tail = (void *)((char *)head + bytes - sizeof(*tail));
758
759                 bzero(head, bytes);
760                 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
761                 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
762                 head->hdr_size = bytes;
763                 head->hdr_seq = seqno++;
764
765                 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
766                 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
767                 tail->tail_size = bytes;
768
769                 head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
770                                 crc32(head + 1, bytes - sizeof(*head));
771
772                 scan += bytes;
773         }
774         rel_buffer(buffer);
775 }
776
777 /*
778  * Format a new blockmap.  This is mostly a degenerate case because
779  * all allocations are now actually done from the freemap.
780  */
781 void
782 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base)
783 {
784         blockmap->phys_offset = 0;
785         blockmap->alloc_offset = zone_base | HAMMER_VOL_ENCODE(255) |
786                                  HAMMER_SHORT_OFF_ENCODE(-1);
787         blockmap->first_offset = zone_base;
788         blockmap->next_offset = zone_base;
789         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
790 }
791
792 /*
793  * Allocate a chunk of data out of a blockmap.  This is a simplified
794  * version which uses next_offset as a simple allocation iterator.
795  */
796 static
797 void *
798 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
799                struct buffer_info **bufferp)
800 {
801         struct buffer_info *buffer1 = NULL;
802         struct buffer_info *buffer2 = NULL;
803         struct volume_info *volume;
804         hammer_blockmap_t blockmap;
805         hammer_blockmap_t freemap;
806         struct hammer_blockmap_layer1 *layer1;
807         struct hammer_blockmap_layer2 *layer2;
808         hammer_off_t layer1_offset;
809         hammer_off_t layer2_offset;
810         hammer_off_t zone2_offset;
811         void *ptr;
812
813         volume = get_volume(RootVolNo);
814
815         blockmap = &volume->ondisk->vol0_blockmap[zone];
816         freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
817
818         /*
819          * Alignment and buffer-boundary issues.  If the allocation would
820          * cross a buffer boundary we have to skip to the next buffer.
821          */
822         bytes = (bytes + 15) & ~15;
823
824 again:
825         if ((blockmap->next_offset ^ (blockmap->next_offset + bytes - 1)) &
826             ~HAMMER_BUFMASK64) {
827                 volume->cache.modified = 1;
828                 blockmap->next_offset = (blockmap->next_offset + bytes) &
829                                         ~HAMMER_BUFMASK64;
830         }
831
832         /*
833          * Dive layer 1.  For now we can't allocate data outside of volume 0.
834          */
835         layer1_offset = freemap->phys_offset +
836                         HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
837
838         layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
839
840         if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
841                 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
842                 exit(1);
843         }
844
845         /*
846          * Dive layer 2
847          */
848         layer2_offset = layer1->phys_offset +
849                         HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
850
851         layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
852
853         if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
854                 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
855                 exit(1);
856         }
857
858         /*
859          * If we are entering a new bigblock assign ownership to our
860          * zone.  If the bigblock is owned by another zone skip it.
861          */
862         if (layer2->zone == 0) {
863                 --layer1->blocks_free;
864                 layer2->zone = zone;
865                 assert(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
866                 assert(layer2->append_off == 0);
867         }
868         if (layer2->zone != zone) {
869                 blockmap->next_offset = (blockmap->next_offset + HAMMER_BIGBLOCK_SIZE) &
870                                         ~HAMMER_BIGBLOCK_MASK64;
871                 goto again;
872         }
873
874         buffer1->cache.modified = 1;
875         buffer2->cache.modified = 1;
876         volume->cache.modified = 1;
877         assert(layer2->append_off ==
878                (blockmap->next_offset & HAMMER_BIGBLOCK_MASK));
879         layer2->bytes_free -= bytes;
880         *result_offp = blockmap->next_offset;
881         blockmap->next_offset += bytes;
882         layer2->append_off = (int)blockmap->next_offset &
883                               HAMMER_BIGBLOCK_MASK;
884
885         layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
886         layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
887
888         zone2_offset = (*result_offp & ~HAMMER_OFF_ZONE_MASK) |
889                         HAMMER_ZONE_ENCODE(zone, 0);
890
891         ptr = get_buffer_data(zone2_offset, bufferp, 0);
892         (*bufferp)->cache.modified = 1;
893
894         rel_buffer(buffer1);
895         rel_buffer(buffer2);
896         rel_volume(volume);
897         return(ptr);
898 }
899
900 /*
901  * Flush various tracking structures to disk
902  */
903
904 /*
905  * Flush various tracking structures to disk
906  */
907 void
908 flush_all_volumes(void)
909 {
910         struct volume_info *vol;
911
912         TAILQ_FOREACH(vol, &VolList, entry)
913                 flush_volume(vol);
914 }
915
916 void
917 flush_volume(struct volume_info *volume)
918 {
919         struct buffer_info *buffer;
920         int i;
921
922         for (i = 0; i < HAMMER_BUFLISTS; ++i) {
923                 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
924                         flush_buffer(buffer);
925         }
926         writehammerbuf(volume, volume->ondisk, 0);
927         volume->cache.modified = 0;
928 }
929
930 void
931 flush_buffer(struct buffer_info *buffer)
932 {
933         writehammerbuf(buffer->volume, buffer->ondisk, buffer->raw_offset);
934         buffer->cache.modified = 0;
935 }
936
937 #if 0
938 /*
939  * Generic buffer initialization
940  */
941 static void
942 init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type)
943 {
944         head->hdr_signature = HAMMER_HEAD_SIGNATURE;
945         head->hdr_type = hdr_type;
946         head->hdr_size = 0;
947         head->hdr_crc = 0;
948         head->hdr_seq = 0;
949 }
950
951 #endif
952
953 #if 0
954 /*
955  * Core I/O operations
956  */
957 static void
958 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
959 {
960         ssize_t n;
961
962         n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
963         if (n != HAMMER_BUFSIZE)
964                 err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
965 }
966
967 #endif
968
969 static void
970 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
971 {
972         ssize_t n;
973
974         n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
975         if (n != HAMMER_BUFSIZE)
976                 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
977 }
978
979 void
980 panic(const char *ctl, ...)
981 {
982         va_list va;
983
984         va_start(va, ctl);
985         vfprintf(stderr, ctl, va);
986         va_end(va);
987         fprintf(stderr, "\n");
988         exit(1);
989 }
990