sbin/hammer: Fix comments
[dragonfly.git] / sbin / hammer / ondisk.c
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include <sys/types.h>
36 #include <sys/stat.h>
37 #include <assert.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <stdarg.h>
41 #include <string.h>
42 #include <unistd.h>
43 #include <stddef.h>
44 #include <err.h>
45 #include <fcntl.h>
46 #include "hammer_util.h"
47
48 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
49                         struct buffer_info **bufferp);
50 static hammer_off_t alloc_bigblock(struct volume_info *volume, int zone);
51 static void get_buffer_readahead(struct buffer_info *base);
52 static __inline void *get_ondisk(hammer_off_t buf_offset,
53                         struct buffer_info **bufferp, int isnew);
54 static int readhammerbuf(struct volume_info *vol, void *data,
55                         int64_t offset);
56 static void writehammerbuf(struct volume_info *vol, const void *data,
57                         int64_t offset);
58
59 int DebugOpt;
60
61 uuid_t Hammer_FSType;
62 uuid_t Hammer_FSId;
63 int64_t BootAreaSize;
64 int64_t MemAreaSize;
65 int64_t UndoBufferSize;
66 int     UsingSuperClusters;
67 int     NumVolumes;
68 int     RootVolNo = -1;
69 int     UseReadBehind = -4;
70 int     UseReadAhead = 4;
71 int     AssertOnFailure = 1;
72 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
73
74 static __inline
75 int
76 buffer_hash(hammer_off_t buf_offset)
77 {
78         int hi;
79
80         hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
81         return(hi);
82 }
83
84 static struct buffer_info*
85 find_buffer(struct volume_info *volume, hammer_off_t buf_offset)
86 {
87         int hi;
88         struct buffer_info *buf;
89
90         hi = buffer_hash(buf_offset);
91         TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry)
92                 if (buf->buf_offset == buf_offset)
93                         return(buf);
94         return(NULL);
95 }
96
97 /*
98  * Lookup the requested information structure and related on-disk buffer.
99  * Missing structures are created.
100  */
101 struct volume_info *
102 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
103 {
104         struct volume_info *vol;
105         struct volume_info *scan;
106         struct hammer_volume_ondisk *ondisk;
107         int i, n;
108         struct stat st1, st2;
109
110         /*
111          * Allocate the volume structure
112          */
113         vol = malloc(sizeof(*vol));
114         bzero(vol, sizeof(*vol));
115         for (i = 0; i < HAMMER_BUFLISTS; ++i)
116                 TAILQ_INIT(&vol->buffer_lists[i]);
117         vol->name = strdup(filename);
118         vol->fd = open(vol->name, oflags);
119         if (vol->fd < 0) {
120                 err(1, "setup_volume: %s: Open failed", vol->name);
121                 free(vol->name);
122                 free(vol);
123         }
124
125         /*
126          * Read or initialize the volume header
127          */
128         vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
129         if (isnew > 0) {
130                 bzero(ondisk, HAMMER_BUFSIZE);
131         } else {
132                 n = readhammerbuf(vol, ondisk, 0);
133                 if (n == -1) {
134                         err(1, "setup_volume: %s: Read failed at offset 0",
135                             vol->name);
136                 }
137                 vol_no = ondisk->vol_no;
138                 if (RootVolNo < 0) {
139                         RootVolNo = ondisk->vol_rootvol;
140                 } else if (RootVolNo != (int)ondisk->vol_rootvol) {
141                         errx(1, "setup_volume: %s: root volume disagreement: "
142                                 "%d vs %d",
143                                 vol->name, RootVolNo, ondisk->vol_rootvol);
144                 }
145
146                 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
147                         errx(1, "setup_volume: %s: Header does not indicate "
148                                 "that this is a hammer volume", vol->name);
149                 }
150                 if (TAILQ_EMPTY(&VolList)) {
151                         Hammer_FSId = vol->ondisk->vol_fsid;
152                 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
153                         errx(1, "setup_volume: %s: FSId does match other "
154                                 "volumes!", vol->name);
155                 }
156         }
157         vol->vol_no = vol_no;
158
159         if (isnew > 0) {
160                 vol->cache.modified = 1;
161         }
162
163         if (fstat(vol->fd, &st1) != 0){
164                 errx(1, "setup_volume: %s: Failed to stat", vol->name);
165         }
166
167         /*
168          * Link the volume structure in
169          */
170         TAILQ_FOREACH(scan, &VolList, entry) {
171                 if (scan->vol_no == vol_no) {
172                         errx(1, "setup_volume: %s: Duplicate volume number %d "
173                                 "against %s", vol->name, vol_no, scan->name);
174                 }
175                 if (fstat(scan->fd, &st2) != 0){
176                         errx(1, "setup_volume: %s: Failed to stat %s",
177                                 vol->name, scan->name);
178                 }
179                 if ((st1.st_ino == st2.st_ino) && (st1.st_dev == st2.st_dev)) {
180                         errx(1, "setup_volume: %s: Specified more than once",
181                                 vol->name);
182                 }
183         }
184         TAILQ_INSERT_TAIL(&VolList, vol, entry);
185         return(vol);
186 }
187
188 struct volume_info *
189 get_volume(int32_t vol_no)
190 {
191         struct volume_info *vol;
192
193         TAILQ_FOREACH(vol, &VolList, entry) {
194                 if (vol->vol_no == vol_no)
195                         break;
196         }
197         if (vol == NULL) {
198                 if (AssertOnFailure)
199                         errx(1, "get_volume: Volume %d does not exist!",
200                                 vol_no);
201                 return(NULL);
202         }
203         ++vol->cache.refs;
204         /* not added to or removed from hammer cache */
205         return(vol);
206 }
207
208 void
209 rel_volume(struct volume_info *volume)
210 {
211         if (volume == NULL)
212                 return;
213         /* not added to or removed from hammer cache */
214         --volume->cache.refs;
215 }
216
217 /*
218  * Acquire the specified buffer.  isnew is -1 only when called
219  * via get_buffer_readahead() to prevent another readahead.
220  */
221 struct buffer_info *
222 get_buffer(hammer_off_t buf_offset, int isnew)
223 {
224         void *ondisk;
225         struct buffer_info *buf;
226         struct volume_info *volume;
227         hammer_off_t orig_offset = buf_offset;
228         int vol_no;
229         int zone;
230         int hi, n;
231         int dora = 0;
232
233         zone = HAMMER_ZONE_DECODE(buf_offset);
234         if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
235                 buf_offset = blockmap_lookup(buf_offset, NULL, NULL, NULL);
236         }
237         if (buf_offset == HAMMER_OFF_BAD)
238                 return(NULL);
239
240         if (AssertOnFailure) {
241                 assert((buf_offset & HAMMER_OFF_ZONE_MASK) ==
242                        HAMMER_ZONE_RAW_BUFFER);
243         }
244         vol_no = HAMMER_VOL_DECODE(buf_offset);
245         volume = get_volume(vol_no);
246         if (volume == NULL)
247                 return(NULL);
248
249         buf_offset &= ~HAMMER_BUFMASK64;
250         buf = find_buffer(volume, buf_offset);
251
252         if (buf == NULL) {
253                 buf = malloc(sizeof(*buf));
254                 bzero(buf, sizeof(*buf));
255                 if (DebugOpt) {
256                         fprintf(stderr, "get_buffer: %016llx %016llx at %p\n",
257                                 (long long)orig_offset, (long long)buf_offset,
258                                 buf);
259                 }
260                 buf->buf_offset = buf_offset;
261                 buf->raw_offset = volume->ondisk->vol_buf_beg +
262                                   (buf_offset & HAMMER_OFF_SHORT_MASK);
263                 buf->volume = volume;
264                 hi = buffer_hash(buf_offset);
265                 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
266                 ++volume->cache.refs;
267                 buf->cache.u.buffer = buf;
268                 hammer_cache_add(&buf->cache, ISBUFFER);
269                 dora = (isnew == 0);
270         } else {
271                 if (DebugOpt) {
272                         fprintf(stderr, "get_buffer: %016llx %016llx at %p *\n",
273                                 (long long)orig_offset, (long long)buf_offset,
274                                 buf);
275                 }
276                 hammer_cache_used(&buf->cache);
277                 ++buf->use_count;
278         }
279         ++buf->cache.refs;
280         hammer_cache_flush();
281         if ((ondisk = buf->ondisk) == NULL) {
282                 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
283                 if (isnew <= 0) {
284                         n = readhammerbuf(volume, ondisk, buf->raw_offset);
285                         if (n == -1) {
286                                 if (AssertOnFailure)
287                                         err(1, "get_buffer: %s:%016llx "
288                                             "Read failed at offset %016llx",
289                                             volume->name,
290                                             (long long)buf->buf_offset,
291                                             (long long)buf->raw_offset);
292                                 bzero(ondisk, HAMMER_BUFSIZE);
293                         }
294                 }
295         }
296         if (isnew > 0) {
297                 bzero(ondisk, HAMMER_BUFSIZE);
298                 buf->cache.modified = 1;
299         }
300         if (dora)
301                 get_buffer_readahead(buf);
302         return(buf);
303 }
304
305 static void
306 get_buffer_readahead(struct buffer_info *base)
307 {
308         struct buffer_info *buf;
309         struct volume_info *vol;
310         hammer_off_t buf_offset;
311         int64_t raw_offset;
312         int ri = UseReadBehind;
313         int re = UseReadAhead;
314
315         raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
316         vol = base->volume;
317
318         while (ri < re) {
319                 if (raw_offset >= vol->ondisk->vol_buf_end)
320                         break;
321                 if (raw_offset < vol->ondisk->vol_buf_beg || ri == 0) {
322                         ++ri;
323                         raw_offset += HAMMER_BUFSIZE;
324                         continue;
325                 }
326                 buf_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no,
327                         raw_offset - vol->ondisk->vol_buf_beg);
328                 buf = find_buffer(vol, buf_offset);
329                 if (buf == NULL) {
330                         buf = get_buffer(buf_offset, -1);
331                         rel_buffer(buf);
332                 }
333                 ++ri;
334                 raw_offset += HAMMER_BUFSIZE;
335         }
336 }
337
338 void
339 rel_buffer(struct buffer_info *buffer)
340 {
341         struct volume_info *volume;
342         int hi;
343
344         if (buffer == NULL)
345                 return;
346         assert(buffer->cache.refs > 0);
347         if (--buffer->cache.refs == 0) {
348                 if (buffer->cache.delete) {
349                         hi = buffer_hash(buffer->buf_offset);
350                         volume = buffer->volume;
351                         if (buffer->cache.modified)
352                                 flush_buffer(buffer);
353                         TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
354                         hammer_cache_del(&buffer->cache);
355                         free(buffer->ondisk);
356                         free(buffer);
357                         rel_volume(volume);
358                 }
359         }
360 }
361
362 /*
363  * Retrieve a pointer to a buffer data given a buffer offset.  The underlying
364  * bufferp is freed if isnew or the offset is out of range of the cached data.
365  * If bufferp is freed a referenced buffer is loaded into it.
366  */
367 void *
368 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
369                 int isnew)
370 {
371         if (*bufferp != NULL) {
372                 if (isnew > 0 ||
373                     (((*bufferp)->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
374                         rel_buffer(*bufferp);
375                         *bufferp = NULL;
376                 }
377         }
378         return(get_ondisk(buf_offset, bufferp, isnew));
379 }
380
381 /*
382  * Retrieve a pointer to a B-Tree node given a cluster offset.  The underlying
383  * bufferp is freed if non-NULL and a referenced buffer is loaded into it.
384  */
385 hammer_node_ondisk_t
386 get_node(hammer_off_t node_offset, struct buffer_info **bufferp)
387 {
388         if (*bufferp != NULL) {
389                 rel_buffer(*bufferp);
390                 *bufferp = NULL;
391         }
392         return(get_ondisk(node_offset, bufferp, 0));
393 }
394
395 /*
396  * Return a pointer to a buffer data given a buffer offset.
397  * If *bufferp is NULL acquire the buffer otherwise use that buffer.
398  */
399 static __inline
400 void *
401 get_ondisk(hammer_off_t buf_offset, struct buffer_info **bufferp,
402         int isnew)
403 {
404         struct buffer_info *buffer;
405
406         buffer = *bufferp;
407         if (buffer == NULL) {
408                 buffer = *bufferp = get_buffer(buf_offset, isnew);
409                 if (buffer == NULL)
410                         return(NULL);
411         }
412
413         return((char *)buffer->ondisk +
414                 ((int32_t)buf_offset & HAMMER_BUFMASK));
415 }
416
417 /*
418  * Allocate HAMMER elements - btree nodes, meta data, data storage
419  */
420 void *
421 alloc_btree_element(hammer_off_t *offp,
422                     struct buffer_info **data_bufferp)
423 {
424         hammer_node_ondisk_t node;
425
426         node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
427                               offp, data_bufferp);
428         bzero(node, sizeof(*node));
429         return (node);
430 }
431
432 void *
433 alloc_meta_element(hammer_off_t *offp, int32_t data_len,
434                    struct buffer_info **data_bufferp)
435 {
436         void *data;
437
438         data = alloc_blockmap(HAMMER_ZONE_META_INDEX, data_len,
439                               offp, data_bufferp);
440         bzero(data, data_len);
441         return (data);
442 }
443
444 void *
445 alloc_data_element(hammer_off_t *offp, int32_t data_len,
446                    struct buffer_info **data_bufferp)
447 {
448         void *data;
449
450         if (data_len >= HAMMER_BUFSIZE) {
451                 assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
452                 data = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
453                                       offp, data_bufferp);
454                 bzero(data, data_len);
455         } else if (data_len) {
456                 data = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
457                                       offp, data_bufferp);
458                 bzero(data, data_len);
459         } else {
460                 data = NULL;
461         }
462         return (data);
463 }
464
465 /*
466  * Format a new freemap.  Set all layer1 entries to UNAVAIL.  The initialize
467  * code will load each volume's freemap.
468  */
469 void
470 format_freemap(struct volume_info *root_vol)
471 {
472         struct buffer_info *buffer = NULL;
473         hammer_off_t layer1_offset;
474         hammer_blockmap_t blockmap;
475         struct hammer_blockmap_layer1 *layer1;
476         int i, isnew;
477
478         /* Only root volume needs formatting */
479         assert(root_vol->vol_no == RootVolNo);
480
481         layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
482         for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
483                 isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
484                 layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
485                                          &buffer, isnew);
486                 bzero(layer1, sizeof(*layer1));
487                 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
488                 layer1->blocks_free = 0;
489                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
490         }
491         rel_buffer(buffer);
492
493         blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
494         bzero(blockmap, sizeof(*blockmap));
495         blockmap->phys_offset = layer1_offset;
496         blockmap->first_offset = 0;
497         blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
498         blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
499         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
500         root_vol->cache.modified = 1;
501 }
502
503 /*
504  * Load the volume's remaining free space into the freemap.
505  *
506  * Returns the number of big-blocks available.
507  */
508 int64_t
509 initialize_freemap(struct volume_info *vol)
510 {
511         struct volume_info *root_vol;
512         struct buffer_info *buffer1 = NULL;
513         struct buffer_info *buffer2 = NULL;
514         struct hammer_blockmap_layer1 *layer1;
515         struct hammer_blockmap_layer2 *layer2;
516         hammer_off_t layer1_base;
517         hammer_off_t layer1_offset;
518         hammer_off_t layer2_offset;
519         hammer_off_t phys_offset;
520         hammer_off_t aligned_vol_free_end;
521         hammer_blockmap_t freemap;
522         int64_t count = 0;
523         int modified1 = 0;
524
525         root_vol = get_volume(RootVolNo);
526         aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
527                                 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
528
529         printf("initialize freemap volume %d\n", vol->vol_no);
530
531         /*
532          * Initialize the freemap.  First preallocate the big-blocks required
533          * to implement layer2.   This preallocation is a bootstrap allocation
534          * using blocks from the target volume.
535          */
536         freemap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
537         layer1_base = freemap->phys_offset;
538
539         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
540              phys_offset < aligned_vol_free_end;
541              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
542                 layer1_offset = layer1_base +
543                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
544                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
545                 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
546                         layer1->phys_offset = alloc_bigblock(vol,
547                                                 HAMMER_ZONE_FREEMAP_INDEX);
548                         layer1->blocks_free = 0;
549                         buffer1->cache.modified = 1;
550                         layer1->layer1_crc = crc32(layer1,
551                                                    HAMMER_LAYER1_CRCSIZE);
552                 }
553         }
554
555         /*
556          * Now fill everything in.
557          */
558         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
559              phys_offset < aligned_vol_free_end;
560              phys_offset += HAMMER_BIGBLOCK_SIZE) {
561                 modified1 = 0;
562                 layer1_offset = layer1_base +
563                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
564                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
565
566                 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
567                 layer2_offset = layer1->phys_offset +
568                                 HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
569
570                 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
571                 bzero(layer2, sizeof(*layer2));
572                 if (phys_offset < vol->vol_free_off) {
573                         /*
574                          * Fixups XXX - big-blocks already allocated as part
575                          * of the freemap bootstrap.
576                          */
577                         if (layer2->zone == 0) {
578                                 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
579                                 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
580                                 layer2->bytes_free = 0;
581                         }
582                 } else if (phys_offset < vol->vol_free_end) {
583                         ++layer1->blocks_free;
584                         buffer1->cache.modified = 1;
585                         layer2->zone = 0;
586                         layer2->append_off = 0;
587                         layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
588                         ++count;
589                         modified1 = 1;
590                 } else {
591                         layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
592                         layer2->append_off = HAMMER_BIGBLOCK_SIZE;
593                         layer2->bytes_free = 0;
594                 }
595                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
596                 buffer2->cache.modified = 1;
597
598                 /*
599                  * Finish-up layer 1
600                  */
601                 if (modified1) {
602                         layer1->layer1_crc = crc32(layer1,
603                                                    HAMMER_LAYER1_CRCSIZE);
604                         buffer1->cache.modified = 1;
605                 }
606         }
607         rel_buffer(buffer1);
608         rel_buffer(buffer2);
609         rel_volume(root_vol);
610         return(count);
611 }
612
613 /*
614  * Returns the number of big-blocks available for filesystem data and undos
615  * without formatting.
616  */
617 int64_t
618 count_freemap(struct volume_info *vol)
619 {
620         hammer_off_t phys_offset;
621         hammer_off_t vol_free_off;
622         hammer_off_t aligned_vol_free_end;
623         int64_t count = 0;
624
625         vol_free_off = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
626         aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
627                                 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
628
629         if (vol->vol_no == RootVolNo)
630                 vol_free_off += HAMMER_BIGBLOCK_SIZE;
631
632         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
633              phys_offset < aligned_vol_free_end;
634              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
635                 vol_free_off += HAMMER_BIGBLOCK_SIZE;
636         }
637
638         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
639              phys_offset < aligned_vol_free_end;
640              phys_offset += HAMMER_BIGBLOCK_SIZE) {
641                 if (phys_offset < vol_free_off) {
642                         ;
643                 } else if (phys_offset < vol->vol_free_end) {
644                         ++count;
645                 }
646         }
647
648         return(count);
649 }
650
651 /*
652  * Allocate big-blocks using our poor-man's volume->vol_free_off.
653  *
654  * If the zone is HAMMER_ZONE_FREEMAP_INDEX we are bootstrapping the freemap
655  * itself and cannot update it yet.
656  */
657 hammer_off_t
658 alloc_bigblock(struct volume_info *volume, int zone)
659 {
660         struct buffer_info *buffer1 = NULL;
661         struct buffer_info *buffer2 = NULL;
662         struct volume_info *root_vol;
663         hammer_off_t result_offset;
664         hammer_off_t layer_offset;
665         hammer_blockmap_t freemap;
666         struct hammer_blockmap_layer1 *layer1;
667         struct hammer_blockmap_layer2 *layer2;
668
669         if (volume == NULL)
670                 volume = get_volume(RootVolNo);
671
672         result_offset = volume->vol_free_off;
673         if (result_offset >= volume->vol_free_end)
674                 errx(1, "alloc_bigblock: Ran out of room, filesystem too small");
675         volume->vol_free_off += HAMMER_BIGBLOCK_SIZE;
676
677         /*
678          * Update the freemap.
679          */
680         if (zone != HAMMER_ZONE_FREEMAP_INDEX) {
681                 root_vol = get_volume(RootVolNo);
682                 freemap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
683                 layer_offset = freemap->phys_offset +
684                                HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
685                 layer1 = get_buffer_data(layer_offset, &buffer1, 0);
686                 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
687                 --layer1->blocks_free;
688                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
689                 buffer1->cache.modified = 1;
690                 layer_offset = layer1->phys_offset +
691                                HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
692                 layer2 = get_buffer_data(layer_offset, &buffer2, 0);
693                 assert(layer2->zone == 0);
694                 layer2->zone = zone;
695                 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
696                 layer2->bytes_free = 0;
697                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
698                 buffer2->cache.modified = 1;
699
700                 --root_vol->ondisk->vol0_stat_freebigblocks;
701                 root_vol->cache.modified = 1;
702
703                 rel_buffer(buffer1);
704                 rel_buffer(buffer2);
705                 rel_volume(root_vol);
706         }
707
708         rel_volume(volume);
709         return(result_offset);
710 }
711
712 /*
713  * Format the undomap for the root volume.
714  */
715 void
716 format_undomap(struct volume_info *root_vol)
717 {
718         const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
719         hammer_off_t undo_limit;
720         hammer_blockmap_t blockmap;
721         struct hammer_volume_ondisk *ondisk;
722         struct buffer_info *buffer = NULL;
723         hammer_off_t scan;
724         int n;
725         int limit_index;
726         u_int32_t seqno;
727
728         /* Only root volume needs formatting */
729         assert(root_vol->vol_no == RootVolNo);
730         ondisk = root_vol->ondisk;
731
732         /*
733          * Size the undo buffer in multiples of HAMMER_BIGBLOCK_SIZE,
734          * up to HAMMER_UNDO_LAYER2 big-blocks.  Size to approximately
735          * 0.1% of the disk.
736          *
737          * The minimum UNDO fifo size is 500MB, or approximately 1% of
738          * the recommended 50G disk.
739          *
740          * Changing this minimum is rather dangerous as complex filesystem
741          * operations can cause the UNDO FIFO to fill up otherwise.
742          */
743         undo_limit = UndoBufferSize;
744         if (undo_limit == 0) {
745                 undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
746                 if (undo_limit < 500*1024*1024)
747                         undo_limit = 500*1024*1024;
748         }
749         undo_limit = (undo_limit + HAMMER_BIGBLOCK_MASK64) &
750                      ~HAMMER_BIGBLOCK_MASK64;
751         if (undo_limit < HAMMER_BIGBLOCK_SIZE)
752                 undo_limit = HAMMER_BIGBLOCK_SIZE;
753         if (undo_limit > HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2)
754                 undo_limit = HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2;
755         UndoBufferSize = undo_limit;
756
757         blockmap = &ondisk->vol0_blockmap[undo_zone];
758         bzero(blockmap, sizeof(*blockmap));
759         blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
760         blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
761         blockmap->next_offset = blockmap->first_offset;
762         blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
763         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
764
765         n = 0;
766         scan = blockmap->next_offset;
767         limit_index = undo_limit / HAMMER_BIGBLOCK_SIZE;
768
769         assert(limit_index <= HAMMER_UNDO_LAYER2);
770
771         for (n = 0; n < limit_index; ++n) {
772                 ondisk->vol0_undo_array[n] = alloc_bigblock(NULL,
773                                                         HAMMER_ZONE_UNDO_INDEX);
774                 scan += HAMMER_BIGBLOCK_SIZE;
775         }
776         while (n < HAMMER_UNDO_LAYER2) {
777                 ondisk->vol0_undo_array[n] = HAMMER_BLOCKMAP_UNAVAIL;
778                 ++n;
779         }
780
781         /*
782          * Pre-initialize the UNDO blocks (HAMMER version 4+)
783          */
784         printf("initializing the undo map (%jd MB)\n",
785                 (intmax_t)(blockmap->alloc_offset & HAMMER_OFF_LONG_MASK) /
786                 (1024 * 1024));
787
788         scan = blockmap->first_offset;
789         seqno = 0;
790
791         while (scan < blockmap->alloc_offset) {
792                 hammer_fifo_head_t head;
793                 hammer_fifo_tail_t tail;
794                 int isnew;
795                 int bytes = HAMMER_UNDO_ALIGN;
796
797                 isnew = ((scan & HAMMER_BUFMASK64) == 0);
798                 head = get_buffer_data(scan, &buffer, isnew);
799                 buffer->cache.modified = 1;
800                 tail = (void *)((char *)head + bytes - sizeof(*tail));
801
802                 bzero(head, bytes);
803                 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
804                 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
805                 head->hdr_size = bytes;
806                 head->hdr_seq = seqno++;
807
808                 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
809                 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
810                 tail->tail_size = bytes;
811
812                 head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
813                                 crc32(head + 1, bytes - sizeof(*head));
814
815                 scan += bytes;
816         }
817         rel_buffer(buffer);
818 }
819
820 /*
821  * Format a new blockmap.  This is mostly a degenerate case because
822  * all allocations are now actually done from the freemap.
823  */
824 void
825 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base)
826 {
827         bzero(blockmap, sizeof(*blockmap));
828         blockmap->phys_offset = 0;
829         blockmap->first_offset = zone_base;
830         blockmap->next_offset = zone_base;
831         blockmap->alloc_offset = HAMMER_ENCODE(zone_base, 255, -1);
832         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
833 }
834
835 /*
836  * Allocate a chunk of data out of a blockmap.  This is a simplified
837  * version which uses next_offset as a simple allocation iterator.
838  */
839 static
840 void *
841 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
842                struct buffer_info **bufferp)
843 {
844         struct buffer_info *buffer1 = NULL;
845         struct buffer_info *buffer2 = NULL;
846         struct volume_info *volume;
847         hammer_blockmap_t blockmap;
848         hammer_blockmap_t freemap;
849         struct hammer_blockmap_layer1 *layer1;
850         struct hammer_blockmap_layer2 *layer2;
851         hammer_off_t layer1_offset;
852         hammer_off_t layer2_offset;
853         hammer_off_t zone2_offset;
854         void *ptr;
855
856         volume = get_volume(RootVolNo);
857
858         blockmap = &volume->ondisk->vol0_blockmap[zone];
859         freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
860
861         /*
862          * Alignment and buffer-boundary issues.  If the allocation would
863          * cross a buffer boundary we have to skip to the next buffer.
864          */
865         bytes = (bytes + 15) & ~15;
866
867 again:
868         if ((blockmap->next_offset ^ (blockmap->next_offset + bytes - 1)) &
869             ~HAMMER_BUFMASK64) {
870                 volume->cache.modified = 1;
871                 blockmap->next_offset = (blockmap->next_offset + bytes) &
872                                         ~HAMMER_BUFMASK64;
873         }
874
875         /*
876          * Dive layer 1.
877          */
878         layer1_offset = freemap->phys_offset +
879                         HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
880
881         layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
882
883         if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
884                 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
885                 exit(1);
886         }
887
888         /*
889          * Dive layer 2, each entry represents a big-block.
890          */
891         layer2_offset = layer1->phys_offset +
892                         HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
893
894         layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
895
896         if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
897                 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
898                 exit(1);
899         }
900
901         /*
902          * If we are entering a new big-block assign ownership to our
903          * zone.  If the big-block is owned by another zone skip it.
904          */
905         if (layer2->zone == 0) {
906                 --layer1->blocks_free;
907                 layer2->zone = zone;
908                 assert(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
909                 assert(layer2->append_off == 0);
910         }
911         if (layer2->zone != zone) {
912                 blockmap->next_offset = (blockmap->next_offset + HAMMER_BIGBLOCK_SIZE) &
913                                         ~HAMMER_BIGBLOCK_MASK64;
914                 goto again;
915         }
916
917         buffer1->cache.modified = 1;
918         buffer2->cache.modified = 1;
919         volume->cache.modified = 1;
920         assert(layer2->append_off ==
921                (blockmap->next_offset & HAMMER_BIGBLOCK_MASK));
922         layer2->bytes_free -= bytes;
923         *result_offp = blockmap->next_offset;
924         blockmap->next_offset += bytes;
925         layer2->append_off = (int)blockmap->next_offset &
926                               HAMMER_BIGBLOCK_MASK;
927
928         layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
929         layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
930
931         zone2_offset = HAMMER_ZONE_ENCODE(zone,
932                         *result_offp & ~HAMMER_OFF_ZONE_MASK);
933
934         ptr = get_buffer_data(zone2_offset, bufferp, 0);
935         (*bufferp)->cache.modified = 1;
936
937         rel_buffer(buffer1);
938         rel_buffer(buffer2);
939         rel_volume(volume);
940         return(ptr);
941 }
942
943 /*
944  * Flush various tracking structures to disk
945  */
946 void
947 flush_all_volumes(void)
948 {
949         struct volume_info *vol;
950
951         TAILQ_FOREACH(vol, &VolList, entry)
952                 flush_volume(vol);
953 }
954
955 void
956 flush_volume(struct volume_info *volume)
957 {
958         struct buffer_info *buffer;
959         int i;
960
961         for (i = 0; i < HAMMER_BUFLISTS; ++i) {
962                 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
963                         flush_buffer(buffer);
964         }
965         writehammerbuf(volume, volume->ondisk, 0);
966         volume->cache.modified = 0;
967 }
968
969 void
970 flush_buffer(struct buffer_info *buffer)
971 {
972         writehammerbuf(buffer->volume, buffer->ondisk, buffer->raw_offset);
973         buffer->cache.modified = 0;
974 }
975
976 /*
977  * Core I/O operations
978  */
979 static int
980 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
981 {
982         ssize_t n;
983
984         n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
985         if (n != HAMMER_BUFSIZE)
986                 return(-1);
987         return(0);
988 }
989
990 static void
991 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
992 {
993         ssize_t n;
994
995         n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
996         if (n != HAMMER_BUFSIZE)
997                 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
998 }