sbin/hammer: Merge check_volume()
[dragonfly.git] / sbin / hammer / ondisk.c
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include <sys/stat.h>
36 #include <sys/diskslice.h>
37 #include <sys/diskmbr.h>
38 #include <assert.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <stdarg.h>
42 #include <string.h>
43 #include <unistd.h>
44 #include <stddef.h>
45 #include <err.h>
46 #include <fcntl.h>
47
48 #include "hammer_util.h"
49
50 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
51                         struct buffer_info **bufferp);
52 static hammer_off_t alloc_bigblock(struct volume_info *volume, int zone);
53 static void get_buffer_readahead(struct buffer_info *base);
54 static __inline void *get_ondisk(hammer_off_t buf_offset,
55                         struct buffer_info **bufferp, int isnew);
56 static int readhammerbuf(struct volume_info *vol, void *data, int64_t offset);
57 static void writehammerbuf(struct volume_info *vol, const void *data,
58                         int64_t offset);
59
60 int DebugOpt;
61
62 uuid_t Hammer_FSType;
63 uuid_t Hammer_FSId;
64 int64_t BootAreaSize;
65 int64_t MemAreaSize;
66 int64_t UndoBufferSize;
67 int     UsingSuperClusters;
68 int     NumVolumes;
69 int     RootVolNo = -1;
70 int     UseReadBehind = -4;
71 int     UseReadAhead = 4;
72 int     AssertOnFailure = 1;
73 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
74
75 static __inline
76 int
77 buffer_hash(hammer_off_t buf_offset)
78 {
79         int hi;
80
81         hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
82         return(hi);
83 }
84
85 static struct buffer_info*
86 find_buffer(struct volume_info *volume, hammer_off_t buf_offset)
87 {
88         int hi;
89         struct buffer_info *buf;
90
91         hi = buffer_hash(buf_offset);
92         TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry)
93                 if (buf->buf_offset == buf_offset)
94                         return(buf);
95         return(NULL);
96 }
97
98 /*
99  * Lookup the requested information structure and related on-disk buffer.
100  * Missing structures are created.
101  */
102 struct volume_info *
103 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
104 {
105         struct volume_info *vol;
106         struct volume_info *scan;
107         struct hammer_volume_ondisk *ondisk;
108         int i, n;
109         struct stat st1, st2;
110
111         /*
112          * Allocate the volume structure
113          */
114         vol = malloc(sizeof(*vol));
115         bzero(vol, sizeof(*vol));
116         for (i = 0; i < HAMMER_BUFLISTS; ++i)
117                 TAILQ_INIT(&vol->buffer_lists[i]);
118         vol->name = strdup(filename);
119         vol->fd = open(vol->name, oflags);
120         if (vol->fd < 0) {
121                 err(1, "setup_volume: %s: Open failed", vol->name);
122         }
123
124         /*
125          * Read or initialize the volume header
126          */
127         vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
128         if (isnew > 0) {
129                 bzero(ondisk, HAMMER_BUFSIZE);
130         } else {
131                 n = readhammerbuf(vol, ondisk, 0);
132                 if (n == -1) {
133                         err(1, "setup_volume: %s: Read failed at offset 0",
134                             vol->name);
135                 }
136                 vol_no = ondisk->vol_no;
137                 if (RootVolNo < 0) {
138                         RootVolNo = ondisk->vol_rootvol;
139                 } else if (RootVolNo != (int)ondisk->vol_rootvol) {
140                         errx(1, "setup_volume: %s: root volume disagreement: "
141                                 "%d vs %d",
142                                 vol->name, RootVolNo, ondisk->vol_rootvol);
143                 }
144
145                 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
146                         errx(1, "setup_volume: %s: Header does not indicate "
147                                 "that this is a hammer volume", vol->name);
148                 }
149                 if (TAILQ_EMPTY(&VolList)) {
150                         Hammer_FSId = vol->ondisk->vol_fsid;
151                 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
152                         errx(1, "setup_volume: %s: FSId does match other "
153                                 "volumes!", vol->name);
154                 }
155         }
156         vol->vol_no = vol_no;
157
158         if (isnew > 0) {
159                 vol->cache.modified = 1;
160         }
161
162         if (fstat(vol->fd, &st1) != 0){
163                 errx(1, "setup_volume: %s: Failed to stat", vol->name);
164         }
165
166         /*
167          * Link the volume structure in
168          */
169         TAILQ_FOREACH(scan, &VolList, entry) {
170                 if (scan->vol_no == vol_no) {
171                         errx(1, "setup_volume: %s: Duplicate volume number %d "
172                                 "against %s", vol->name, vol_no, scan->name);
173                 }
174                 if (fstat(scan->fd, &st2) != 0){
175                         errx(1, "setup_volume: %s: Failed to stat %s",
176                                 vol->name, scan->name);
177                 }
178                 if ((st1.st_ino == st2.st_ino) && (st1.st_dev == st2.st_dev)) {
179                         errx(1, "setup_volume: %s: Specified more than once",
180                                 vol->name);
181                 }
182         }
183         TAILQ_INSERT_TAIL(&VolList, vol, entry);
184         return(vol);
185 }
186
187 /*
188  * Check basic volume characteristics.
189  */
190 void
191 check_volume(struct volume_info *vol)
192 {
193         struct partinfo pinfo;
194         struct stat st;
195
196         /*
197          * Get basic information about the volume
198          */
199         if (ioctl(vol->fd, DIOCGPART, &pinfo) < 0) {
200                 /*
201                  * Allow the formatting of regular files as HAMMER volumes
202                  */
203                 if (fstat(vol->fd, &st) < 0)
204                         err(1, "Unable to stat %s", vol->name);
205                 vol->size = st.st_size;
206                 vol->type = "REGFILE";
207         } else {
208                 /*
209                  * When formatting a block device as a HAMMER volume the
210                  * sector size must be compatible.  HAMMER uses 16384 byte
211                  * filesystem buffers.
212                  */
213                 if (pinfo.reserved_blocks) {
214                         errx(1, "HAMMER cannot be placed in a partition "
215                                 "which overlaps the disklabel or MBR");
216                 }
217                 if (pinfo.media_blksize > HAMMER_BUFSIZE ||
218                     HAMMER_BUFSIZE % pinfo.media_blksize) {
219                         errx(1, "A media sector size of %d is not supported",
220                              pinfo.media_blksize);
221                 }
222
223                 vol->size = pinfo.media_size;
224                 vol->device_offset = pinfo.media_offset;
225                 vol->type = "DEVICE";
226         }
227
228         /*
229          * Reserve space for (future) header junk, setup our poor-man's
230          * big-block allocator.
231          */
232         vol->vol_alloc = HAMMER_BUFSIZE * 16;
233 }
234
235 struct volume_info *
236 get_volume(int32_t vol_no)
237 {
238         struct volume_info *vol;
239
240         TAILQ_FOREACH(vol, &VolList, entry) {
241                 if (vol->vol_no == vol_no)
242                         break;
243         }
244         if (vol == NULL) {
245                 if (AssertOnFailure)
246                         errx(1, "get_volume: Volume %d does not exist!",
247                                 vol_no);
248                 return(NULL);
249         }
250         ++vol->cache.refs;
251         /* not added to or removed from hammer cache */
252         return(vol);
253 }
254
255 void
256 rel_volume(struct volume_info *volume)
257 {
258         if (volume == NULL)
259                 return;
260         /* not added to or removed from hammer cache */
261         --volume->cache.refs;
262 }
263
264 /*
265  * Acquire the specified buffer.  isnew is -1 only when called
266  * via get_buffer_readahead() to prevent another readahead.
267  */
268 struct buffer_info *
269 get_buffer(hammer_off_t buf_offset, int isnew)
270 {
271         void *ondisk;
272         struct buffer_info *buf;
273         struct volume_info *volume;
274         hammer_off_t orig_offset = buf_offset;
275         int vol_no;
276         int zone;
277         int hi, n;
278         int dora = 0;
279
280         zone = HAMMER_ZONE_DECODE(buf_offset);
281         if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
282                 buf_offset = blockmap_lookup(buf_offset, NULL, NULL, NULL);
283         }
284         if (buf_offset == HAMMER_OFF_BAD)
285                 return(NULL);
286
287         if (AssertOnFailure) {
288                 assert((buf_offset & HAMMER_OFF_ZONE_MASK) ==
289                        HAMMER_ZONE_RAW_BUFFER);
290         }
291         vol_no = HAMMER_VOL_DECODE(buf_offset);
292         volume = get_volume(vol_no);
293         if (volume == NULL)
294                 return(NULL);
295
296         buf_offset &= ~HAMMER_BUFMASK64;
297         buf = find_buffer(volume, buf_offset);
298
299         if (buf == NULL) {
300                 buf = malloc(sizeof(*buf));
301                 bzero(buf, sizeof(*buf));
302                 if (DebugOpt > 1) {
303                         fprintf(stderr, "get_buffer: %016llx %016llx at %p\n",
304                                 (long long)orig_offset, (long long)buf_offset,
305                                 buf);
306                 }
307                 buf->buf_offset = buf_offset;
308                 buf->raw_offset = volume->ondisk->vol_buf_beg +
309                                   (buf_offset & HAMMER_OFF_SHORT_MASK);
310                 buf->volume = volume;
311                 hi = buffer_hash(buf_offset);
312                 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
313                 ++volume->cache.refs;
314                 buf->cache.u.buffer = buf;
315                 hammer_cache_add(&buf->cache, ISBUFFER);
316                 dora = (isnew == 0);
317         } else {
318                 if (DebugOpt > 1) {
319                         fprintf(stderr, "get_buffer: %016llx %016llx at %p *\n",
320                                 (long long)orig_offset, (long long)buf_offset,
321                                 buf);
322                 }
323                 hammer_cache_used(&buf->cache);
324                 ++buf->use_count;
325         }
326         ++buf->cache.refs;
327         hammer_cache_flush();
328         if ((ondisk = buf->ondisk) == NULL) {
329                 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
330                 if (isnew <= 0) {
331                         n = readhammerbuf(volume, ondisk, buf->raw_offset);
332                         if (n == -1) {
333                                 if (AssertOnFailure)
334                                         err(1, "get_buffer: %s:%016llx "
335                                             "Read failed at offset %016llx",
336                                             volume->name,
337                                             (long long)buf->buf_offset,
338                                             (long long)buf->raw_offset);
339                                 bzero(ondisk, HAMMER_BUFSIZE);
340                         }
341                 }
342         }
343         if (isnew > 0) {
344                 bzero(ondisk, HAMMER_BUFSIZE);
345                 buf->cache.modified = 1;
346         }
347         if (dora)
348                 get_buffer_readahead(buf);
349         return(buf);
350 }
351
352 static void
353 get_buffer_readahead(struct buffer_info *base)
354 {
355         struct buffer_info *buf;
356         struct volume_info *vol;
357         hammer_off_t buf_offset;
358         int64_t raw_offset;
359         int ri = UseReadBehind;
360         int re = UseReadAhead;
361
362         raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
363         vol = base->volume;
364
365         while (ri < re) {
366                 if (raw_offset >= vol->ondisk->vol_buf_end)
367                         break;
368                 if (raw_offset < vol->ondisk->vol_buf_beg || ri == 0) {
369                         ++ri;
370                         raw_offset += HAMMER_BUFSIZE;
371                         continue;
372                 }
373                 buf_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no,
374                         raw_offset - vol->ondisk->vol_buf_beg);
375                 buf = find_buffer(vol, buf_offset);
376                 if (buf == NULL) {
377                         buf = get_buffer(buf_offset, -1);
378                         rel_buffer(buf);
379                 }
380                 ++ri;
381                 raw_offset += HAMMER_BUFSIZE;
382         }
383 }
384
385 void
386 rel_buffer(struct buffer_info *buffer)
387 {
388         struct volume_info *volume;
389         int hi;
390
391         if (buffer == NULL)
392                 return;
393         assert(buffer->cache.refs > 0);
394         if (--buffer->cache.refs == 0) {
395                 if (buffer->cache.delete) {
396                         hi = buffer_hash(buffer->buf_offset);
397                         volume = buffer->volume;
398                         if (buffer->cache.modified)
399                                 flush_buffer(buffer);
400                         TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
401                         hammer_cache_del(&buffer->cache);
402                         free(buffer->ondisk);
403                         free(buffer);
404                         rel_volume(volume);
405                 }
406         }
407 }
408
409 /*
410  * Retrieve a pointer to a buffer data given a buffer offset.  The underlying
411  * bufferp is freed if isnew or the offset is out of range of the cached data.
412  * If bufferp is freed a referenced buffer is loaded into it.
413  */
414 void *
415 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
416                 int isnew)
417 {
418         if (*bufferp != NULL) {
419                 if (isnew > 0 ||
420                     (((*bufferp)->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
421                         rel_buffer(*bufferp);
422                         *bufferp = NULL;
423                 }
424         }
425         return(get_ondisk(buf_offset, bufferp, isnew));
426 }
427
428 /*
429  * Retrieve a pointer to a B-Tree node given a cluster offset.  The underlying
430  * bufferp is freed if non-NULL and a referenced buffer is loaded into it.
431  */
432 hammer_node_ondisk_t
433 get_node(hammer_off_t node_offset, struct buffer_info **bufferp)
434 {
435         if (*bufferp != NULL) {
436                 rel_buffer(*bufferp);
437                 *bufferp = NULL;
438         }
439         return(get_ondisk(node_offset, bufferp, 0));
440 }
441
442 /*
443  * Return a pointer to a buffer data given a buffer offset.
444  * If *bufferp is NULL acquire the buffer otherwise use that buffer.
445  */
446 static __inline
447 void *
448 get_ondisk(hammer_off_t buf_offset, struct buffer_info **bufferp, int isnew)
449 {
450         struct buffer_info *buffer;
451
452         buffer = *bufferp;
453         if (buffer == NULL) {
454                 buffer = *bufferp = get_buffer(buf_offset, isnew);
455                 if (buffer == NULL)
456                         return(NULL);
457         }
458
459         return((char *)buffer->ondisk +
460                 ((int32_t)buf_offset & HAMMER_BUFMASK));
461 }
462
463 /*
464  * Allocate HAMMER elements - btree nodes, meta data, data storage
465  */
466 void *
467 alloc_btree_element(hammer_off_t *offp, struct buffer_info **data_bufferp)
468 {
469         hammer_node_ondisk_t node;
470
471         node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
472                               offp, data_bufferp);
473         bzero(node, sizeof(*node));
474         return (node);
475 }
476
477 void *
478 alloc_meta_element(hammer_off_t *offp, int32_t data_len,
479                    struct buffer_info **data_bufferp)
480 {
481         void *data;
482
483         data = alloc_blockmap(HAMMER_ZONE_META_INDEX, data_len,
484                               offp, data_bufferp);
485         bzero(data, data_len);
486         return (data);
487 }
488
489 /*
490  * The only data_len supported by HAMMER userspace for large data zone
491  * (zone 10) is HAMMER_BUFSIZE which is 16KB.  >16KB data does not fit
492  * in a buffer allocated by get_buffer().  Also alloc_blockmap() does
493  * not consider >16KB buffer size.
494  */
495 void *
496 alloc_data_element(hammer_off_t *offp, int32_t data_len,
497                    struct buffer_info **data_bufferp)
498 {
499         void *data;
500
501         if (data_len >= HAMMER_BUFSIZE) {
502                 assert(data_len == HAMMER_BUFSIZE); /* just one buffer */
503                 data = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
504                                       offp, data_bufferp);
505                 bzero(data, data_len);
506         } else if (data_len) {
507                 data = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
508                                       offp, data_bufferp);
509                 bzero(data, data_len);
510         } else {
511                 data = NULL;
512         }
513         return (data);
514 }
515
516 /*
517  * Format a new freemap.  Set all layer1 entries to UNAVAIL.  The initialize
518  * code will load each volume's freemap.
519  */
520 void
521 format_freemap(struct volume_info *root_vol)
522 {
523         struct buffer_info *buffer = NULL;
524         hammer_off_t layer1_offset;
525         hammer_blockmap_t blockmap;
526         struct hammer_blockmap_layer1 *layer1;
527         int i, isnew;
528
529         /* Only root volume needs formatting */
530         assert(root_vol->vol_no == RootVolNo);
531
532         layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
533         for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
534                 isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
535                 layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
536                                          &buffer, isnew);
537                 bzero(layer1, sizeof(*layer1));
538                 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
539                 layer1->blocks_free = 0;
540                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
541         }
542         rel_buffer(buffer);
543
544         blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
545         bzero(blockmap, sizeof(*blockmap));
546         blockmap->phys_offset = layer1_offset;
547         blockmap->first_offset = 0;
548         blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
549         blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
550         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
551         root_vol->cache.modified = 1;
552 }
553
554 /*
555  * Load the volume's remaining free space into the freemap.
556  *
557  * Returns the number of big-blocks available.
558  */
559 int64_t
560 initialize_freemap(struct volume_info *vol)
561 {
562         struct volume_info *root_vol;
563         struct buffer_info *buffer1 = NULL;
564         struct buffer_info *buffer2 = NULL;
565         struct hammer_blockmap_layer1 *layer1;
566         struct hammer_blockmap_layer2 *layer2;
567         hammer_off_t layer1_base;
568         hammer_off_t layer1_offset;
569         hammer_off_t layer2_offset;
570         hammer_off_t phys_offset;
571         hammer_off_t block_offset;
572         hammer_off_t aligned_vol_free_end;
573         hammer_blockmap_t freemap;
574         int64_t count = 0;
575         int64_t layer1_count = 0;
576
577         root_vol = get_volume(RootVolNo);
578         aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
579                                 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
580
581         printf("initialize freemap volume %d\n", vol->vol_no);
582
583         /*
584          * Initialize the freemap.  First preallocate the big-blocks required
585          * to implement layer2.   This preallocation is a bootstrap allocation
586          * using blocks from the target volume.
587          */
588         freemap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
589         layer1_base = freemap->phys_offset;
590
591         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
592              phys_offset < aligned_vol_free_end;
593              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
594                 layer1_offset = layer1_base +
595                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
596                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
597                 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
598                         layer1->phys_offset = alloc_bigblock(vol,
599                                                 HAMMER_ZONE_FREEMAP_INDEX);
600                         layer1->blocks_free = 0;
601                         buffer1->cache.modified = 1;
602                         layer1->layer1_crc = crc32(layer1,
603                                                    HAMMER_LAYER1_CRCSIZE);
604                 }
605         }
606
607         /*
608          * Now fill everything in.
609          */
610         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
611              phys_offset < aligned_vol_free_end;
612              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
613                 layer1_count = 0;
614                 layer1_offset = layer1_base +
615                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
616                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
617                 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
618
619                 for (block_offset = 0;
620                      block_offset < HAMMER_BLOCKMAP_LAYER2;
621                      block_offset += HAMMER_BIGBLOCK_SIZE) {
622                         layer2_offset = layer1->phys_offset +
623                                         HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
624                         layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
625                         bzero(layer2, sizeof(*layer2));
626
627                         if (phys_offset + block_offset < vol->vol_free_off) {
628                                 /*
629                                  * Fixups XXX - big-blocks already allocated as part
630                                  * of the freemap bootstrap.
631                                  */
632                                 if (layer2->zone == 0) {
633                                         layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
634                                         layer2->append_off = HAMMER_BIGBLOCK_SIZE;
635                                         layer2->bytes_free = 0;
636                                 }
637                         } else if (phys_offset + block_offset < vol->vol_free_end) {
638                                 layer2->zone = 0;
639                                 layer2->append_off = 0;
640                                 layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
641                                 ++count;
642                                 ++layer1_count;
643                         } else {
644                                 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
645                                 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
646                                 layer2->bytes_free = 0;
647                         }
648                         layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
649                         buffer2->cache.modified = 1;
650                 }
651
652                 layer1->blocks_free += layer1_count;
653                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
654                 buffer1->cache.modified = 1;
655         }
656
657         rel_buffer(buffer1);
658         rel_buffer(buffer2);
659         rel_volume(root_vol);
660         return(count);
661 }
662
663 /*
664  * Returns the number of big-blocks available for filesystem data and undos
665  * without formatting.
666  */
667 int64_t
668 count_freemap(struct volume_info *vol)
669 {
670         hammer_off_t phys_offset;
671         hammer_off_t vol_free_off;
672         hammer_off_t aligned_vol_free_end;
673         int64_t count = 0;
674
675         vol_free_off = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
676         aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
677                                 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
678
679         if (vol->vol_no == RootVolNo)
680                 vol_free_off += HAMMER_BIGBLOCK_SIZE;
681
682         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
683              phys_offset < aligned_vol_free_end;
684              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
685                 vol_free_off += HAMMER_BIGBLOCK_SIZE;
686         }
687
688         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
689              phys_offset < aligned_vol_free_end;
690              phys_offset += HAMMER_BIGBLOCK_SIZE) {
691                 if (phys_offset < vol_free_off) {
692                         ;
693                 } else if (phys_offset < vol->vol_free_end) {
694                         ++count;
695                 }
696         }
697
698         return(count);
699 }
700
701 /*
702  * Allocate big-blocks using our poor-man's volume->vol_free_off.
703  *
704  * If the zone is HAMMER_ZONE_FREEMAP_INDEX we are bootstrapping the freemap
705  * itself and cannot update it yet.
706  */
707 hammer_off_t
708 alloc_bigblock(struct volume_info *volume, int zone)
709 {
710         struct buffer_info *buffer1 = NULL;
711         struct buffer_info *buffer2 = NULL;
712         struct volume_info *root_vol;
713         hammer_off_t result_offset;
714         hammer_off_t layer_offset;
715         hammer_blockmap_t freemap;
716         struct hammer_blockmap_layer1 *layer1;
717         struct hammer_blockmap_layer2 *layer2;
718
719         if (volume == NULL)
720                 volume = get_volume(RootVolNo);
721
722         result_offset = volume->vol_free_off;
723         if (result_offset >= volume->vol_free_end)
724                 errx(1, "alloc_bigblock: Ran out of room, filesystem too small");
725         volume->vol_free_off += HAMMER_BIGBLOCK_SIZE;
726
727         /*
728          * Update the freemap.
729          */
730         if (zone != HAMMER_ZONE_FREEMAP_INDEX) {
731                 root_vol = get_volume(RootVolNo);
732                 freemap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
733                 layer_offset = freemap->phys_offset +
734                                HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
735                 layer1 = get_buffer_data(layer_offset, &buffer1, 0);
736                 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
737                 --layer1->blocks_free;
738                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
739                 buffer1->cache.modified = 1;
740                 layer_offset = layer1->phys_offset +
741                                HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
742                 layer2 = get_buffer_data(layer_offset, &buffer2, 0);
743                 assert(layer2->zone == 0);
744                 layer2->zone = zone;
745                 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
746                 layer2->bytes_free = 0;
747                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
748                 buffer2->cache.modified = 1;
749
750                 --root_vol->ondisk->vol0_stat_freebigblocks;
751                 root_vol->cache.modified = 1;
752
753                 rel_buffer(buffer1);
754                 rel_buffer(buffer2);
755                 rel_volume(root_vol);
756         }
757
758         rel_volume(volume);
759         return(result_offset);
760 }
761
762 /*
763  * Format the undomap for the root volume.
764  */
765 void
766 format_undomap(struct volume_info *root_vol)
767 {
768         const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
769         hammer_off_t undo_limit;
770         hammer_blockmap_t blockmap;
771         struct hammer_volume_ondisk *ondisk;
772         struct buffer_info *buffer = NULL;
773         hammer_off_t scan;
774         int n;
775         int limit_index;
776         u_int32_t seqno;
777
778         /* Only root volume needs formatting */
779         assert(root_vol->vol_no == RootVolNo);
780         ondisk = root_vol->ondisk;
781
782         /*
783          * Size the undo buffer in multiples of HAMMER_BIGBLOCK_SIZE,
784          * up to HAMMER_UNDO_LAYER2 big-blocks.  Size to approximately
785          * 0.1% of the disk.
786          *
787          * The minimum UNDO fifo size is 500MB, or approximately 1% of
788          * the recommended 50G disk.
789          *
790          * Changing this minimum is rather dangerous as complex filesystem
791          * operations can cause the UNDO FIFO to fill up otherwise.
792          */
793         undo_limit = UndoBufferSize;
794         if (undo_limit == 0) {
795                 undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
796                 if (undo_limit < 500*1024*1024)
797                         undo_limit = 500*1024*1024;
798         }
799         undo_limit = (undo_limit + HAMMER_BIGBLOCK_MASK64) &
800                      ~HAMMER_BIGBLOCK_MASK64;
801         if (undo_limit < HAMMER_BIGBLOCK_SIZE)
802                 undo_limit = HAMMER_BIGBLOCK_SIZE;
803         if (undo_limit > HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2)
804                 undo_limit = HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2;
805         UndoBufferSize = undo_limit;
806
807         blockmap = &ondisk->vol0_blockmap[undo_zone];
808         bzero(blockmap, sizeof(*blockmap));
809         blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
810         blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
811         blockmap->next_offset = blockmap->first_offset;
812         blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
813         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
814
815         limit_index = undo_limit / HAMMER_BIGBLOCK_SIZE;
816         assert(limit_index <= HAMMER_UNDO_LAYER2);
817
818         for (n = 0; n < limit_index; ++n) {
819                 ondisk->vol0_undo_array[n] = alloc_bigblock(NULL,
820                                                         HAMMER_ZONE_UNDO_INDEX);
821         }
822         while (n < HAMMER_UNDO_LAYER2) {
823                 ondisk->vol0_undo_array[n++] = HAMMER_BLOCKMAP_UNAVAIL;
824         }
825
826         /*
827          * Pre-initialize the UNDO blocks (HAMMER version 4+)
828          */
829         printf("initializing the undo map (%jd MB)\n",
830                 (intmax_t)(blockmap->alloc_offset & HAMMER_OFF_LONG_MASK) /
831                 (1024 * 1024));
832
833         scan = blockmap->first_offset;
834         seqno = 0;
835
836         while (scan < blockmap->alloc_offset) {
837                 hammer_fifo_head_t head;
838                 hammer_fifo_tail_t tail;
839                 int isnew;
840                 int bytes = HAMMER_UNDO_ALIGN;
841
842                 isnew = ((scan & HAMMER_BUFMASK64) == 0);
843                 head = get_buffer_data(scan, &buffer, isnew);
844                 buffer->cache.modified = 1;
845                 tail = (void *)((char *)head + bytes - sizeof(*tail));
846
847                 bzero(head, bytes);
848                 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
849                 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
850                 head->hdr_size = bytes;
851                 head->hdr_seq = seqno++;
852
853                 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
854                 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
855                 tail->tail_size = bytes;
856
857                 head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
858                                 crc32(head + 1, bytes - sizeof(*head));
859
860                 scan += bytes;
861         }
862         rel_buffer(buffer);
863 }
864
865 /*
866  * Format a new blockmap.  This is mostly a degenerate case because
867  * all allocations are now actually done from the freemap.
868  */
869 void
870 format_blockmap(hammer_blockmap_t blockmap, int zone, hammer_off_t offset)
871 {
872         hammer_off_t zone_base = HAMMER_ZONE_ENCODE(zone, offset);
873
874         bzero(blockmap, sizeof(*blockmap));
875         blockmap->phys_offset = 0;
876         blockmap->first_offset = zone_base;
877         blockmap->next_offset = zone_base;
878         blockmap->alloc_offset = HAMMER_ENCODE(zone, 255, -1);
879         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
880 }
881
882 /*
883  * Allocate a chunk of data out of a blockmap.  This is a simplified
884  * version which uses next_offset as a simple allocation iterator.
885  */
886 static
887 void *
888 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
889                struct buffer_info **bufferp)
890 {
891         struct buffer_info *buffer1 = NULL;
892         struct buffer_info *buffer2 = NULL;
893         struct volume_info *volume;
894         hammer_blockmap_t blockmap;
895         hammer_blockmap_t freemap;
896         struct hammer_blockmap_layer1 *layer1;
897         struct hammer_blockmap_layer2 *layer2;
898         hammer_off_t layer1_offset;
899         hammer_off_t layer2_offset;
900         hammer_off_t chunk_offset;
901         void *ptr;
902
903         volume = get_volume(RootVolNo);
904
905         blockmap = &volume->ondisk->vol0_blockmap[zone];
906         freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
907         assert(HAMMER_ZONE_DECODE(blockmap->next_offset) == zone);
908
909         /*
910          * Alignment and buffer-boundary issues.  If the allocation would
911          * cross a buffer boundary we have to skip to the next buffer.
912          */
913         bytes = (bytes + 15) & ~15;
914         assert(bytes > 0 && bytes <= HAMMER_BUFSIZE);  /* not HAMMER_XBUFSIZE */
915         assert(zone >= HAMMER_ZONE2_MAPPED_INDEX && zone < HAMMER_MAX_ZONES);
916
917 again:
918         assert(blockmap->next_offset != HAMMER_ZONE_ENCODE(zone + 1, 0));
919
920         if ((blockmap->next_offset ^ (blockmap->next_offset + bytes - 1)) &
921             ~HAMMER_BUFMASK64) {
922                 volume->cache.modified = 1;
923                 blockmap->next_offset = (blockmap->next_offset + bytes - 1) &
924                                         ~HAMMER_BUFMASK64;
925         }
926         chunk_offset = blockmap->next_offset & HAMMER_BIGBLOCK_MASK;
927
928         /*
929          * Dive layer 1.
930          */
931         layer1_offset = freemap->phys_offset +
932                         HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
933
934         layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
935         assert(!(chunk_offset == 0 && layer1->blocks_free == 0));
936
937         if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
938                 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
939                 exit(1);
940         }
941
942         /*
943          * Dive layer 2, each entry represents a big-block.
944          */
945         layer2_offset = layer1->phys_offset +
946                         HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
947
948         layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
949
950         if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
951                 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
952                 exit(1);
953         }
954
955         /*
956          * If we are entering a new big-block assign ownership to our
957          * zone.  If the big-block is owned by another zone skip it.
958          */
959         if (layer2->zone == 0) {
960                 --layer1->blocks_free;
961                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
962                 layer2->zone = zone;
963                 --volume->ondisk->vol0_stat_freebigblocks;
964                 assert(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
965                 assert(layer2->append_off == 0);
966         }
967         if (layer2->zone != zone) {
968                 volume->cache.modified = 1;
969                 blockmap->next_offset = (blockmap->next_offset + HAMMER_BIGBLOCK_SIZE) &
970                                         ~HAMMER_BIGBLOCK_MASK64;
971                 goto again;
972         }
973
974         buffer1->cache.modified = 1;
975         buffer2->cache.modified = 1;
976         volume->cache.modified = 1;
977         assert(layer2->append_off == chunk_offset);
978         layer2->bytes_free -= bytes;
979         *result_offp = blockmap->next_offset;
980         blockmap->next_offset += bytes;
981         layer2->append_off = (int)blockmap->next_offset &
982                               HAMMER_BIGBLOCK_MASK;
983
984         layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
985
986         ptr = get_buffer_data(*result_offp, bufferp, 0);
987         (*bufferp)->cache.modified = 1;
988
989         rel_buffer(buffer1);
990         rel_buffer(buffer2);
991         rel_volume(volume);
992         return(ptr);
993 }
994
995 /*
996  * Flush various tracking structures to disk
997  */
998 void
999 flush_all_volumes(void)
1000 {
1001         struct volume_info *vol;
1002
1003         TAILQ_FOREACH(vol, &VolList, entry)
1004                 flush_volume(vol);
1005 }
1006
1007 void
1008 flush_volume(struct volume_info *volume)
1009 {
1010         struct buffer_info *buffer;
1011         int i;
1012
1013         for (i = 0; i < HAMMER_BUFLISTS; ++i) {
1014                 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
1015                         flush_buffer(buffer);
1016         }
1017         writehammerbuf(volume, volume->ondisk, 0);
1018         volume->cache.modified = 0;
1019 }
1020
1021 void
1022 flush_buffer(struct buffer_info *buffer)
1023 {
1024         writehammerbuf(buffer->volume, buffer->ondisk, buffer->raw_offset);
1025         buffer->cache.modified = 0;
1026 }
1027
1028 /*
1029  * Core I/O operations
1030  */
1031 static int
1032 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
1033 {
1034         ssize_t n;
1035
1036         n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
1037         if (n != HAMMER_BUFSIZE)
1038                 return(-1);
1039         return(0);
1040 }
1041
1042 static void
1043 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
1044 {
1045         ssize_t n;
1046
1047         n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
1048         if (n != HAMMER_BUFSIZE)
1049                 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
1050 }
1051
1052 int64_t init_boot_area_size(int64_t value, off_t avg_vol_size)
1053 {
1054         if (value == 0) {
1055                 value = HAMMER_BOOT_NOMBYTES;
1056                 while (value > avg_vol_size / HAMMER_MAX_VOLUMES)
1057                         value >>= 1;
1058                 if (value < HAMMER_BOOT_MINBYTES)
1059                         value = 0;
1060         } else if (value < HAMMER_BOOT_MINBYTES) {
1061                 value = HAMMER_BOOT_MINBYTES;
1062         }
1063
1064         return(value);
1065 }
1066
1067 int64_t init_mem_area_size(int64_t value, off_t avg_vol_size)
1068 {
1069         if (value == 0) {
1070                 value = HAMMER_MEM_NOMBYTES;
1071                 while (value > avg_vol_size / HAMMER_MAX_VOLUMES)
1072                         value >>= 1;
1073                 if (value < HAMMER_MEM_MINBYTES)
1074                         value = 0;
1075         } else if (value < HAMMER_MEM_MINBYTES) {
1076                 value = HAMMER_MEM_MINBYTES;
1077         }
1078
1079         return(value);
1080 }