sbin/hammer: Add a function find_buffer()
[dragonfly.git] / sbin / hammer / ondisk.c
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include <sys/types.h>
36 #include <assert.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <stdarg.h>
40 #include <string.h>
41 #include <unistd.h>
42 #include <stddef.h>
43 #include <err.h>
44 #include <fcntl.h>
45 #include "hammer_util.h"
46
47 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
48                         struct buffer_info **bufferp);
49 static hammer_off_t alloc_bigblock(struct volume_info *volume, int zone);
50 static void get_buffer_readahead(struct buffer_info *base);
51 static __inline void *get_ondisk(hammer_off_t buf_offset,
52                         struct buffer_info **bufferp, int isnew);
53 #if 0
54 static void init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type);
55 static void readhammerbuf(struct volume_info *vol, void *data,
56                         int64_t offset);
57 #endif
58 static void writehammerbuf(struct volume_info *vol, const void *data,
59                         int64_t offset);
60
61 int DebugOpt;
62
63 uuid_t Hammer_FSType;
64 uuid_t Hammer_FSId;
65 int64_t BootAreaSize;
66 int64_t MemAreaSize;
67 int64_t UndoBufferSize;
68 int     UsingSuperClusters;
69 int     NumVolumes;
70 int     RootVolNo = -1;
71 int     UseReadBehind = -4;
72 int     UseReadAhead = 4;
73 int     AssertOnFailure = 1;
74 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
75
76 static __inline
77 int
78 buffer_hash(hammer_off_t buf_offset)
79 {
80         int hi;
81
82         hi = (int)(buf_offset / HAMMER_BUFSIZE) & HAMMER_BUFLISTMASK;
83         return(hi);
84 }
85
86 static struct buffer_info*
87 find_buffer(struct volume_info *volume, hammer_off_t buf_offset)
88 {
89         int hi;
90         struct buffer_info *buf;
91
92         hi = buffer_hash(buf_offset);
93         TAILQ_FOREACH(buf, &volume->buffer_lists[hi], entry)
94                 if (buf->buf_offset == buf_offset)
95                         return(buf);
96         return(NULL);
97 }
98
99 /*
100  * Lookup the requested information structure and related on-disk buffer.
101  * Missing structures are created.
102  */
103 struct volume_info *
104 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
105 {
106         struct volume_info *vol;
107         struct volume_info *scan;
108         struct hammer_volume_ondisk *ondisk;
109         int i, n;
110
111         /*
112          * Allocate the volume structure
113          */
114         vol = malloc(sizeof(*vol));
115         bzero(vol, sizeof(*vol));
116         for (i = 0; i < HAMMER_BUFLISTS; ++i)
117                 TAILQ_INIT(&vol->buffer_lists[i]);
118         vol->name = strdup(filename);
119         vol->fd = open(filename, oflags);
120         if (vol->fd < 0) {
121                 free(vol->name);
122                 free(vol);
123                 err(1, "setup_volume: %s: Open failed", filename);
124         }
125
126         /*
127          * Read or initialize the volume header
128          */
129         vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
130         if (isnew > 0) {
131                 bzero(ondisk, HAMMER_BUFSIZE);
132         } else {
133                 n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
134                 if (n != HAMMER_BUFSIZE) {
135                         err(1, "setup_volume: %s: Read failed at offset 0",
136                             filename);
137                 }
138                 vol_no = ondisk->vol_no;
139                 if (RootVolNo < 0) {
140                         RootVolNo = ondisk->vol_rootvol;
141                 } else if (RootVolNo != (int)ondisk->vol_rootvol) {
142                         errx(1, "setup_volume: %s: root volume disagreement: "
143                                 "%d vs %d",
144                                 vol->name, RootVolNo, ondisk->vol_rootvol);
145                 }
146
147                 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
148                         errx(1, "setup_volume: %s: Header does not indicate "
149                                 "that this is a hammer volume", vol->name);
150                 }
151                 if (TAILQ_EMPTY(&VolList)) {
152                         Hammer_FSId = vol->ondisk->vol_fsid;
153                 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
154                         errx(1, "setup_volume: %s: FSId does match other "
155                                 "volumes!", vol->name);
156                 }
157         }
158         vol->vol_no = vol_no;
159
160         if (isnew > 0) {
161                 /*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/
162                 vol->cache.modified = 1;
163         }
164
165         /*
166          * Link the volume structure in
167          */
168         TAILQ_FOREACH(scan, &VolList, entry) {
169                 if (scan->vol_no == vol_no) {
170                         errx(1, "setup_volume %s: Duplicate volume number %d "
171                                 "against %s", filename, vol_no, scan->name);
172                 }
173         }
174         TAILQ_INSERT_TAIL(&VolList, vol, entry);
175         return(vol);
176 }
177
178 struct volume_info *
179 test_volume(int32_t vol_no)
180 {
181         struct volume_info *vol;
182
183         TAILQ_FOREACH(vol, &VolList, entry) {
184                 if (vol->vol_no == vol_no)
185                         break;
186         }
187         if (vol == NULL)
188                 return(NULL);
189         ++vol->cache.refs;
190         /* not added to or removed from hammer cache */
191         return(vol);
192 }
193
194 struct volume_info *
195 get_volume(int32_t vol_no)
196 {
197         struct volume_info *vol;
198
199         TAILQ_FOREACH(vol, &VolList, entry) {
200                 if (vol->vol_no == vol_no)
201                         break;
202         }
203         if (vol == NULL)
204                 errx(1, "get_volume: Volume %d does not exist!", vol_no);
205         ++vol->cache.refs;
206         /* not added to or removed from hammer cache */
207         return(vol);
208 }
209
210 void
211 rel_volume(struct volume_info *volume)
212 {
213         if (volume == NULL)
214                 return;
215         /* not added to or removed from hammer cache */
216         --volume->cache.refs;
217 }
218
219 /*
220  * Acquire the specified buffer.
221  */
222 struct buffer_info *
223 get_buffer(hammer_off_t buf_offset, int isnew)
224 {
225         void *ondisk;
226         struct buffer_info *buf;
227         struct volume_info *volume;
228         hammer_off_t orig_offset = buf_offset;
229         int vol_no;
230         int zone;
231         int hi, n;
232         int dora = 0;
233
234         zone = HAMMER_ZONE_DECODE(buf_offset);
235         if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
236                 buf_offset = blockmap_lookup(buf_offset, NULL, NULL, NULL);
237         }
238         if (buf_offset == HAMMER_OFF_BAD)
239                 return(NULL);
240
241         if (AssertOnFailure) {
242                 assert((buf_offset & HAMMER_OFF_ZONE_MASK) ==
243                        HAMMER_ZONE_RAW_BUFFER);
244         }
245         vol_no = HAMMER_VOL_DECODE(buf_offset);
246         volume = test_volume(vol_no);
247         if (volume == NULL) {
248                 if (AssertOnFailure)
249                         errx(1, "get_buffer: Volume %d not found!", vol_no);
250                 return(NULL);
251         }
252
253         buf_offset &= ~HAMMER_BUFMASK64;
254         buf = find_buffer(volume, buf_offset);
255
256         if (buf == NULL) {
257                 buf = malloc(sizeof(*buf));
258                 bzero(buf, sizeof(*buf));
259                 if (DebugOpt) {
260                         fprintf(stderr, "get_buffer: %016llx %016llx at %p\n",
261                                 (long long)orig_offset, (long long)buf_offset,
262                                 buf);
263                 }
264                 buf->buf_offset = buf_offset;
265                 buf->raw_offset = volume->ondisk->vol_buf_beg +
266                                   (buf_offset & HAMMER_OFF_SHORT_MASK);
267                 buf->volume = volume;
268                 hi = buffer_hash(buf_offset);
269                 TAILQ_INSERT_TAIL(&volume->buffer_lists[hi], buf, entry);
270                 ++volume->cache.refs;
271                 buf->cache.u.buffer = buf;
272                 hammer_cache_add(&buf->cache, ISBUFFER);
273                 dora = (isnew == 0);
274                 if (isnew < 0)
275                         buf->flags |= HAMMER_BUFINFO_READAHEAD;
276         } else {
277                 if (DebugOpt) {
278                         fprintf(stderr, "get_buffer: %016llx %016llx at %p *\n",
279                                 (long long)orig_offset, (long long)buf_offset,
280                                 buf);
281                 }
282                 if (isnew >= 0) {
283                         buf->flags &= ~HAMMER_BUFINFO_READAHEAD;
284                         hammer_cache_used(&buf->cache);
285                 }
286                 ++buf->use_count;
287         }
288         ++buf->cache.refs;
289         hammer_cache_flush();
290         if ((ondisk = buf->ondisk) == NULL) {
291                 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
292                 if (isnew <= 0) {
293                         n = pread(volume->fd, ondisk, HAMMER_BUFSIZE,
294                                   buf->raw_offset);
295                         if (n != HAMMER_BUFSIZE) {
296                                 if (AssertOnFailure)
297                                         err(1, "get_buffer: %s:%016llx "
298                                             "Read failed at offset %016llx",
299                                             volume->name,
300                                             (long long)buf->buf_offset,
301                                             (long long)buf->raw_offset);
302                                 bzero(ondisk, HAMMER_BUFSIZE);
303                         }
304                 }
305         }
306         if (isnew > 0) {
307                 bzero(ondisk, HAMMER_BUFSIZE);
308                 buf->cache.modified = 1;
309         }
310         if (dora)
311                 get_buffer_readahead(buf);
312         return(buf);
313 }
314
315 static void
316 get_buffer_readahead(struct buffer_info *base)
317 {
318         struct buffer_info *buf;
319         struct volume_info *vol;
320         hammer_off_t buf_offset;
321         int64_t raw_offset;
322         int ri = UseReadBehind;
323         int re = UseReadAhead;
324
325         raw_offset = base->raw_offset + ri * HAMMER_BUFSIZE;
326         vol = base->volume;
327
328         while (ri < re) {
329                 if (raw_offset >= vol->ondisk->vol_buf_end)
330                         break;
331                 if (raw_offset < vol->ondisk->vol_buf_beg || ri == 0) {
332                         ++ri;
333                         raw_offset += HAMMER_BUFSIZE;
334                         continue;
335                 }
336                 buf_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no,
337                         raw_offset - vol->ondisk->vol_buf_beg);
338                 buf = find_buffer(vol, buf_offset);
339                 if (buf == NULL) {
340                         buf = get_buffer(buf_offset, -1);
341                         rel_buffer(buf);
342                 }
343                 ++ri;
344                 raw_offset += HAMMER_BUFSIZE;
345         }
346 }
347
348 void
349 rel_buffer(struct buffer_info *buffer)
350 {
351         struct volume_info *volume;
352         int hi;
353
354         if (buffer == NULL)
355                 return;
356         assert(buffer->cache.refs > 0);
357         if (--buffer->cache.refs == 0) {
358                 if (buffer->cache.delete) {
359                         hi = buffer_hash(buffer->buf_offset);
360                         volume = buffer->volume;
361                         if (buffer->cache.modified)
362                                 flush_buffer(buffer);
363                         TAILQ_REMOVE(&volume->buffer_lists[hi], buffer, entry);
364                         hammer_cache_del(&buffer->cache);
365                         free(buffer->ondisk);
366                         free(buffer);
367                         rel_volume(volume);
368                 }
369         }
370 }
371
372 /*
373  * Retrieve a pointer to a buffer data given a buffer offset.  The underlying
374  * bufferp is freed if isnew or the offset is out of range of the cached data.
375  * If bufferp is freed a referenced buffer is loaded into it.
376  */
377 void *
378 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
379                 int isnew)
380 {
381         if (*bufferp != NULL) {
382                 if (isnew > 0 ||
383                     (((*bufferp)->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
384                         rel_buffer(*bufferp);
385                         *bufferp = NULL;
386                 }
387         }
388         return(get_ondisk(buf_offset, bufferp, isnew));
389 }
390
391 /*
392  * Retrieve a pointer to a B-Tree node given a cluster offset.  The underlying
393  * bufferp is freed if non-NULL and a referenced buffer is loaded into it.
394  */
395 hammer_node_ondisk_t
396 get_node(hammer_off_t node_offset, struct buffer_info **bufferp)
397 {
398         if (*bufferp != NULL) {
399                 rel_buffer(*bufferp);
400                 *bufferp = NULL;
401         }
402         return(get_ondisk(node_offset, bufferp, 0));
403 }
404
405 /*
406  * Return a pointer to a buffer data given a buffer offset.
407  * If *bufferp is NULL acquire the buffer otherwise use that buffer.
408  */
409 static __inline
410 void *
411 get_ondisk(hammer_off_t buf_offset, struct buffer_info **bufferp,
412         int isnew)
413 {
414         struct buffer_info *buffer;
415
416         buffer = *bufferp;
417         if (buffer == NULL) {
418                 buffer = *bufferp = get_buffer(buf_offset, isnew);
419                 if (buffer == NULL)
420                         return(NULL);
421         }
422
423         return((char *)buffer->ondisk +
424                 ((int32_t)buf_offset & HAMMER_BUFMASK));
425 }
426
427 /*
428  * Allocate HAMMER elements - btree nodes, meta data, data storage
429  */
430 void *
431 alloc_btree_element(hammer_off_t *offp,
432                     struct buffer_info **data_bufferp)
433 {
434         hammer_node_ondisk_t node;
435
436         node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
437                               offp, data_bufferp);
438         bzero(node, sizeof(*node));
439         return (node);
440 }
441
442 void *
443 alloc_meta_element(hammer_off_t *offp, int32_t data_len,
444                    struct buffer_info **data_bufferp)
445 {
446         void *data;
447
448         data = alloc_blockmap(HAMMER_ZONE_META_INDEX, data_len,
449                               offp, data_bufferp);
450         bzero(data, data_len);
451         return (data);
452 }
453
454 void *
455 alloc_data_element(hammer_off_t *offp, int32_t data_len,
456                    struct buffer_info **data_bufferp)
457 {
458         void *data;
459
460         if (data_len >= HAMMER_BUFSIZE) {
461                 assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
462                 data = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
463                                       offp, data_bufferp);
464                 bzero(data, data_len);
465         } else if (data_len) {
466                 data = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
467                                       offp, data_bufferp);
468                 bzero(data, data_len);
469         } else {
470                 data = NULL;
471         }
472         return (data);
473 }
474
475 /*
476  * Format a new freemap.  Set all layer1 entries to UNAVAIL.  The initialize
477  * code will load each volume's freemap.
478  */
479 void
480 format_freemap(struct volume_info *root_vol, hammer_blockmap_t blockmap)
481 {
482         struct buffer_info *buffer = NULL;
483         hammer_off_t layer1_offset;
484         struct hammer_blockmap_layer1 *layer1;
485         int i, isnew;
486
487         layer1_offset = alloc_bigblock(root_vol, HAMMER_ZONE_FREEMAP_INDEX);
488         for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
489                 isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
490                 layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
491                                          &buffer, isnew);
492                 bzero(layer1, sizeof(*layer1));
493                 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
494                 layer1->blocks_free = 0;
495                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
496         }
497         rel_buffer(buffer);
498
499         blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
500         blockmap->phys_offset = layer1_offset;
501         blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
502         blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
503         blockmap->reserved01 = 0;
504         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
505         root_vol->cache.modified = 1;
506 }
507
508 /*
509  * Load the volume's remaining free space into the freemap.
510  *
511  * Returns the number of bigblocks available.
512  */
513 int64_t
514 initialize_freemap(struct volume_info *vol)
515 {
516         struct volume_info *root_vol;
517         struct buffer_info *buffer1 = NULL;
518         struct buffer_info *buffer2 = NULL;
519         struct hammer_blockmap_layer1 *layer1;
520         struct hammer_blockmap_layer2 *layer2;
521         hammer_off_t layer1_base;
522         hammer_off_t layer1_offset;
523         hammer_off_t layer2_offset;
524         hammer_off_t phys_offset;
525         hammer_off_t aligned_vol_free_end;
526         int64_t count = 0;
527         int modified1 = 0;
528
529         root_vol = get_volume(RootVolNo);
530         aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
531                                 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
532
533         printf("initialize freemap volume %d\n", vol->vol_no);
534
535         /*
536          * Initialize the freemap.  First preallocate the bigblocks required
537          * to implement layer2.   This preallocation is a bootstrap allocation
538          * using blocks from the target volume.
539          */
540         layer1_base = root_vol->ondisk->vol0_blockmap[
541                                         HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
542         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
543              phys_offset < aligned_vol_free_end;
544              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
545                 layer1_offset = layer1_base +
546                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
547                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
548                 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
549                         layer1->phys_offset = alloc_bigblock(vol,
550                                                 HAMMER_ZONE_FREEMAP_INDEX);
551                         layer1->blocks_free = 0;
552                         buffer1->cache.modified = 1;
553                         layer1->layer1_crc = crc32(layer1,
554                                                    HAMMER_LAYER1_CRCSIZE);
555                 }
556         }
557
558         /*
559          * Now fill everything in.
560          */
561         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
562              phys_offset < aligned_vol_free_end;
563              phys_offset += HAMMER_BIGBLOCK_SIZE) {
564                 modified1 = 0;
565                 layer1_offset = layer1_base +
566                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
567                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
568
569                 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
570                 layer2_offset = layer1->phys_offset +
571                                 HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
572
573                 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
574                 bzero(layer2, sizeof(*layer2));
575                 if (phys_offset < vol->vol_free_off) {
576                         /*
577                          * Fixups XXX - bigblocks already allocated as part
578                          * of the freemap bootstrap.
579                          */
580                         if (layer2->zone == 0) {
581                                 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
582                                 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
583                                 layer2->bytes_free = 0;
584                         }
585                 } else if (phys_offset < vol->vol_free_end) {
586                         ++layer1->blocks_free;
587                         buffer1->cache.modified = 1;
588                         layer2->zone = 0;
589                         layer2->append_off = 0;
590                         layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
591                         ++count;
592                         modified1 = 1;
593                 } else {
594                         layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
595                         layer2->append_off = HAMMER_BIGBLOCK_SIZE;
596                         layer2->bytes_free = 0;
597                 }
598                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
599                 buffer2->cache.modified = 1;
600
601                 /*
602                  * Finish-up layer 1
603                  */
604                 if (modified1) {
605                         layer1->layer1_crc = crc32(layer1,
606                                                    HAMMER_LAYER1_CRCSIZE);
607                         buffer1->cache.modified = 1;
608                 }
609         }
610         rel_buffer(buffer1);
611         rel_buffer(buffer2);
612         rel_volume(root_vol);
613         return(count);
614 }
615
616 /*
617  * Allocate big-blocks using our poor-man's volume->vol_free_off.
618  *
619  * If the zone is HAMMER_ZONE_FREEMAP_INDEX we are bootstrapping the freemap
620  * itself and cannot update it yet.
621  */
622 hammer_off_t
623 alloc_bigblock(struct volume_info *volume, int zone)
624 {
625         struct buffer_info *buffer1 = NULL;
626         struct buffer_info *buffer2 = NULL;
627         struct volume_info *root_vol;
628         hammer_off_t result_offset;
629         hammer_off_t layer_offset;
630         struct hammer_blockmap_layer1 *layer1;
631         struct hammer_blockmap_layer2 *layer2;
632
633         if (volume == NULL)
634                 volume = get_volume(RootVolNo);
635
636         result_offset = volume->vol_free_off;
637         if (result_offset >= volume->vol_free_end)
638                 panic("alloc_bigblock: Ran out of room, filesystem too small");
639         volume->vol_free_off += HAMMER_BIGBLOCK_SIZE;
640
641         /*
642          * Update the freemap.
643          */
644         if (zone != HAMMER_ZONE_FREEMAP_INDEX) {
645                 root_vol = get_volume(RootVolNo);
646                 layer_offset = root_vol->ondisk->vol0_blockmap[
647                                         HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
648                 layer_offset += HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
649                 layer1 = get_buffer_data(layer_offset, &buffer1, 0);
650                 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
651                 --layer1->blocks_free;
652                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
653                 buffer1->cache.modified = 1;
654                 layer_offset = layer1->phys_offset +
655                                HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
656                 layer2 = get_buffer_data(layer_offset, &buffer2, 0);
657                 assert(layer2->zone == 0);
658                 layer2->zone = zone;
659                 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
660                 layer2->bytes_free = 0;
661                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
662                 buffer2->cache.modified = 1;
663
664                 --root_vol->ondisk->vol0_stat_freebigblocks;
665                 root_vol->cache.modified = 1;
666
667                 rel_buffer(buffer1);
668                 rel_buffer(buffer2);
669                 rel_volume(root_vol);
670         }
671
672         rel_volume(volume);
673         return(result_offset);
674 }
675
676 /*
677  * Format the undo-map for the root volume.
678  */
679 void
680 format_undomap(hammer_volume_ondisk_t ondisk)
681 {
682         const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
683         hammer_off_t undo_limit;
684         hammer_blockmap_t blockmap;
685         struct buffer_info *buffer = NULL;
686         hammer_off_t scan;
687         int n;
688         int limit_index;
689         u_int32_t seqno;
690
691         /*
692          * Size the undo buffer in multiples of HAMMER_BIGBLOCK_SIZE,
693          * up to HAMMER_UNDO_LAYER2 big blocks.  Size to approximately
694          * 0.1% of the disk.
695          *
696          * The minimum UNDO fifo size is 500MB, or approximately 1% of
697          * the recommended 50G disk.
698          *
699          * Changing this minimum is rather dangerous as complex filesystem
700          * operations can cause the UNDO FIFO to fill up otherwise.
701          */
702         undo_limit = UndoBufferSize;
703         if (undo_limit == 0) {
704                 undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
705                 if (undo_limit < 500*1024*1024)
706                         undo_limit = 500*1024*1024;
707         }
708         undo_limit = (undo_limit + HAMMER_BIGBLOCK_MASK64) &
709                      ~HAMMER_BIGBLOCK_MASK64;
710         if (undo_limit < HAMMER_BIGBLOCK_SIZE)
711                 undo_limit = HAMMER_BIGBLOCK_SIZE;
712         if (undo_limit > HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2)
713                 undo_limit = HAMMER_BIGBLOCK_SIZE * HAMMER_UNDO_LAYER2;
714         UndoBufferSize = undo_limit;
715
716         blockmap = &ondisk->vol0_blockmap[undo_zone];
717         bzero(blockmap, sizeof(*blockmap));
718         blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
719         blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
720         blockmap->next_offset = blockmap->first_offset;
721         blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
722         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
723
724         n = 0;
725         scan = blockmap->next_offset;
726         limit_index = undo_limit / HAMMER_BIGBLOCK_SIZE;
727
728         assert(limit_index <= HAMMER_UNDO_LAYER2);
729
730         for (n = 0; n < limit_index; ++n) {
731                 ondisk->vol0_undo_array[n] = alloc_bigblock(NULL,
732                                                         HAMMER_ZONE_UNDO_INDEX);
733                 scan += HAMMER_BIGBLOCK_SIZE;
734         }
735         while (n < HAMMER_UNDO_LAYER2) {
736                 ondisk->vol0_undo_array[n] = HAMMER_BLOCKMAP_UNAVAIL;
737                 ++n;
738         }
739
740         /*
741          * Pre-initialize the UNDO blocks (HAMMER version 4+)
742          */
743         printf("initializing the undo map (%jd MB)\n",
744                 (intmax_t)(blockmap->alloc_offset & HAMMER_OFF_LONG_MASK) /
745                 (1024 * 1024));
746
747         scan = blockmap->first_offset;
748         seqno = 0;
749
750         while (scan < blockmap->alloc_offset) {
751                 hammer_fifo_head_t head;
752                 hammer_fifo_tail_t tail;
753                 int isnew;
754                 int bytes = HAMMER_UNDO_ALIGN;
755
756                 isnew = ((scan & HAMMER_BUFMASK64) == 0);
757                 head = get_buffer_data(scan, &buffer, isnew);
758                 buffer->cache.modified = 1;
759                 tail = (void *)((char *)head + bytes - sizeof(*tail));
760
761                 bzero(head, bytes);
762                 head->hdr_signature = HAMMER_HEAD_SIGNATURE;
763                 head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
764                 head->hdr_size = bytes;
765                 head->hdr_seq = seqno++;
766
767                 tail->tail_signature = HAMMER_TAIL_SIGNATURE;
768                 tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
769                 tail->tail_size = bytes;
770
771                 head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
772                                 crc32(head + 1, bytes - sizeof(*head));
773
774                 scan += bytes;
775         }
776         rel_buffer(buffer);
777 }
778
779 /*
780  * Format a new blockmap.  This is mostly a degenerate case because
781  * all allocations are now actually done from the freemap.
782  */
783 void
784 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_base)
785 {
786         blockmap->phys_offset = 0;
787         blockmap->alloc_offset = zone_base | HAMMER_VOL_ENCODE(255) |
788                                  HAMMER_SHORT_OFF_ENCODE(-1);
789         blockmap->first_offset = zone_base;
790         blockmap->next_offset = zone_base;
791         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
792 }
793
794 /*
795  * Allocate a chunk of data out of a blockmap.  This is a simplified
796  * version which uses next_offset as a simple allocation iterator.
797  */
798 static
799 void *
800 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
801                struct buffer_info **bufferp)
802 {
803         struct buffer_info *buffer1 = NULL;
804         struct buffer_info *buffer2 = NULL;
805         struct volume_info *volume;
806         hammer_blockmap_t blockmap;
807         hammer_blockmap_t freemap;
808         struct hammer_blockmap_layer1 *layer1;
809         struct hammer_blockmap_layer2 *layer2;
810         hammer_off_t layer1_offset;
811         hammer_off_t layer2_offset;
812         hammer_off_t zone2_offset;
813         void *ptr;
814
815         volume = get_volume(RootVolNo);
816
817         blockmap = &volume->ondisk->vol0_blockmap[zone];
818         freemap = &volume->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
819
820         /*
821          * Alignment and buffer-boundary issues.  If the allocation would
822          * cross a buffer boundary we have to skip to the next buffer.
823          */
824         bytes = (bytes + 15) & ~15;
825
826 again:
827         if ((blockmap->next_offset ^ (blockmap->next_offset + bytes - 1)) &
828             ~HAMMER_BUFMASK64) {
829                 volume->cache.modified = 1;
830                 blockmap->next_offset = (blockmap->next_offset + bytes) &
831                                         ~HAMMER_BUFMASK64;
832         }
833
834         /*
835          * Dive layer 1.  For now we can't allocate data outside of volume 0.
836          */
837         layer1_offset = freemap->phys_offset +
838                         HAMMER_BLOCKMAP_LAYER1_OFFSET(blockmap->next_offset);
839
840         layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
841
842         if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
843                 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
844                 exit(1);
845         }
846
847         /*
848          * Dive layer 2
849          */
850         layer2_offset = layer1->phys_offset +
851                         HAMMER_BLOCKMAP_LAYER2_OFFSET(blockmap->next_offset);
852
853         layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
854
855         if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
856                 fprintf(stderr, "alloc_blockmap: ran out of space!\n");
857                 exit(1);
858         }
859
860         /*
861          * If we are entering a new bigblock assign ownership to our
862          * zone.  If the bigblock is owned by another zone skip it.
863          */
864         if (layer2->zone == 0) {
865                 --layer1->blocks_free;
866                 layer2->zone = zone;
867                 assert(layer2->bytes_free == HAMMER_BIGBLOCK_SIZE);
868                 assert(layer2->append_off == 0);
869         }
870         if (layer2->zone != zone) {
871                 blockmap->next_offset = (blockmap->next_offset + HAMMER_BIGBLOCK_SIZE) &
872                                         ~HAMMER_BIGBLOCK_MASK64;
873                 goto again;
874         }
875
876         buffer1->cache.modified = 1;
877         buffer2->cache.modified = 1;
878         volume->cache.modified = 1;
879         assert(layer2->append_off ==
880                (blockmap->next_offset & HAMMER_BIGBLOCK_MASK));
881         layer2->bytes_free -= bytes;
882         *result_offp = blockmap->next_offset;
883         blockmap->next_offset += bytes;
884         layer2->append_off = (int)blockmap->next_offset &
885                               HAMMER_BIGBLOCK_MASK;
886
887         layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
888         layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
889
890         zone2_offset = HAMMER_ZONE_ENCODE(zone,
891                         *result_offp & ~HAMMER_OFF_ZONE_MASK);
892
893         ptr = get_buffer_data(zone2_offset, bufferp, 0);
894         (*bufferp)->cache.modified = 1;
895
896         rel_buffer(buffer1);
897         rel_buffer(buffer2);
898         rel_volume(volume);
899         return(ptr);
900 }
901
902 /*
903  * Flush various tracking structures to disk
904  */
905 void
906 flush_all_volumes(void)
907 {
908         struct volume_info *vol;
909
910         TAILQ_FOREACH(vol, &VolList, entry)
911                 flush_volume(vol);
912 }
913
914 void
915 flush_volume(struct volume_info *volume)
916 {
917         struct buffer_info *buffer;
918         int i;
919
920         for (i = 0; i < HAMMER_BUFLISTS; ++i) {
921                 TAILQ_FOREACH(buffer, &volume->buffer_lists[i], entry)
922                         flush_buffer(buffer);
923         }
924         writehammerbuf(volume, volume->ondisk, 0);
925         volume->cache.modified = 0;
926 }
927
928 void
929 flush_buffer(struct buffer_info *buffer)
930 {
931         writehammerbuf(buffer->volume, buffer->ondisk, buffer->raw_offset);
932         buffer->cache.modified = 0;
933 }
934
935 #if 0
936 /*
937  * Generic buffer initialization
938  */
939 static void
940 init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type)
941 {
942         head->hdr_signature = HAMMER_HEAD_SIGNATURE;
943         head->hdr_type = hdr_type;
944         head->hdr_size = 0;
945         head->hdr_crc = 0;
946         head->hdr_seq = 0;
947 }
948
949 #endif
950
951 #if 0
952 /*
953  * Core I/O operations
954  */
955 static void
956 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
957 {
958         ssize_t n;
959
960         n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
961         if (n != HAMMER_BUFSIZE)
962                 err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
963 }
964
965 #endif
966
967 static void
968 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
969 {
970         ssize_t n;
971
972         n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
973         if (n != HAMMER_BUFSIZE)
974                 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
975 }
976
977 void
978 panic(const char *ctl, ...)
979 {
980         va_list va;
981
982         va_start(va, ctl);
983         vfprintf(stderr, ctl, va);
984         va_end(va);
985         fprintf(stderr, "\n");
986         exit(1);
987 }
988