0f0d12eacc90ec3dc2b232384b8db263a7b2945b
[dragonfly.git] / sbin / hammer / ondisk.c
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sbin/hammer/ondisk.c,v 1.16 2008/05/05 20:34:52 dillon Exp $
35  */
36
37 #include <sys/types.h>
38 #include <assert.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <stdarg.h>
42 #include <string.h>
43 #include <unistd.h>
44 #include <stddef.h>
45 #include <err.h>
46 #include <fcntl.h>
47 #include "hammer_util.h"
48
49 static void *alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
50                         struct buffer_info **bufferp);
51 static hammer_off_t alloc_bigblock(struct volume_info *volume,
52                         hammer_off_t owner);
53 #if 0
54 static void init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type);
55 static hammer_off_t hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes,
56                         struct buffer_info **bufp, u_int16_t hdr_type);
57 static void readhammerbuf(struct volume_info *vol, void *data,
58                         int64_t offset);
59 #endif
60 static void writehammerbuf(struct volume_info *vol, const void *data,
61                         int64_t offset);
62
63
64 uuid_t Hammer_FSType;
65 uuid_t Hammer_FSId;
66 int64_t BootAreaSize;
67 int64_t MemAreaSize;
68 int64_t UndoBufferSize;
69 int     UsingSuperClusters;
70 int     NumVolumes;
71 int     RootVolNo = -1;
72 struct volume_list VolList = TAILQ_HEAD_INITIALIZER(VolList);
73
74 /*
75  * Lookup the requested information structure and related on-disk buffer.
76  * Missing structures are created.
77  */
78 struct volume_info *
79 setup_volume(int32_t vol_no, const char *filename, int isnew, int oflags)
80 {
81         struct volume_info *vol;
82         struct volume_info *scan;
83         struct hammer_volume_ondisk *ondisk;
84         int n;
85
86         /*
87          * Allocate the volume structure
88          */
89         vol = malloc(sizeof(*vol));
90         bzero(vol, sizeof(*vol));
91         TAILQ_INIT(&vol->buffer_list);
92         vol->name = strdup(filename);
93         vol->fd = open(filename, oflags);
94         if (vol->fd < 0) {
95                 free(vol->name);
96                 free(vol);
97                 err(1, "setup_volume: %s: Open failed", filename);
98         }
99
100         /*
101          * Read or initialize the volume header
102          */
103         vol->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
104         if (isnew) {
105                 bzero(ondisk, HAMMER_BUFSIZE);
106         } else {
107                 n = pread(vol->fd, ondisk, HAMMER_BUFSIZE, 0);
108                 if (n != HAMMER_BUFSIZE) {
109                         err(1, "setup_volume: %s: Read failed at offset 0",
110                             filename);
111                 }
112                 vol_no = ondisk->vol_no;
113                 if (RootVolNo < 0) {
114                         RootVolNo = ondisk->vol_rootvol;
115                 } else if (RootVolNo != (int)ondisk->vol_rootvol) {
116                         errx(1, "setup_volume: %s: root volume disagreement: "
117                                 "%d vs %d",
118                                 vol->name, RootVolNo, ondisk->vol_rootvol);
119                 }
120
121                 if (bcmp(&Hammer_FSType, &ondisk->vol_fstype, sizeof(Hammer_FSType)) != 0) {
122                         errx(1, "setup_volume: %s: Header does not indicate "
123                                 "that this is a hammer volume", vol->name);
124                 }
125                 if (TAILQ_EMPTY(&VolList)) {
126                         Hammer_FSId = vol->ondisk->vol_fsid;
127                 } else if (bcmp(&Hammer_FSId, &ondisk->vol_fsid, sizeof(Hammer_FSId)) != 0) {
128                         errx(1, "setup_volume: %s: FSId does match other "
129                                 "volumes!", vol->name);
130                 }
131         }
132         vol->vol_no = vol_no;
133
134         if (isnew) {
135                 /*init_fifo_head(&ondisk->head, HAMMER_HEAD_TYPE_VOL);*/
136                 vol->cache.modified = 1;
137         }
138
139         /*
140          * Link the volume structure in
141          */
142         TAILQ_FOREACH(scan, &VolList, entry) {
143                 if (scan->vol_no == vol_no) {
144                         errx(1, "setup_volume %s: Duplicate volume number %d "
145                                 "against %s", filename, vol_no, scan->name);
146                 }
147         }
148         TAILQ_INSERT_TAIL(&VolList, vol, entry);
149         return(vol);
150 }
151
152 struct volume_info *
153 get_volume(int32_t vol_no)
154 {
155         struct volume_info *vol;
156
157         TAILQ_FOREACH(vol, &VolList, entry) {
158                 if (vol->vol_no == vol_no)
159                         break;
160         }
161         if (vol == NULL)
162                 errx(1, "get_volume: Volume %d does not exist!", vol_no);
163         ++vol->cache.refs;
164         /* not added to or removed from hammer cache */
165         return(vol);
166 }
167
168 void
169 rel_volume(struct volume_info *volume)
170 {
171         /* not added to or removed from hammer cache */
172         --volume->cache.refs;
173 }
174
175 /*
176  * Acquire the specified buffer.
177  */
178 struct buffer_info *
179 get_buffer(hammer_off_t buf_offset, int isnew)
180 {
181         void *ondisk;
182         struct buffer_info *buf;
183         struct volume_info *volume;
184         int vol_no;
185         int zone;
186         int n;
187
188         zone = HAMMER_ZONE_DECODE(buf_offset);
189         if (zone > HAMMER_ZONE_RAW_BUFFER_INDEX) {
190                 buf_offset = blockmap_lookup(buf_offset, NULL, NULL);
191         }
192         assert((buf_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER);
193         vol_no = HAMMER_VOL_DECODE(buf_offset);
194         volume = get_volume(vol_no);
195         buf_offset &= ~HAMMER_BUFMASK64;
196
197         TAILQ_FOREACH(buf, &volume->buffer_list, entry) {
198                 if (buf->buf_offset == buf_offset)
199                         break;
200         }
201         if (buf == NULL) {
202                 buf = malloc(sizeof(*buf));
203                 bzero(buf, sizeof(*buf));
204                 buf->buf_offset = buf_offset;
205                 buf->buf_disk_offset = volume->ondisk->vol_buf_beg +
206                                         (buf_offset & HAMMER_OFF_SHORT_MASK);
207                 buf->volume = volume;
208                 TAILQ_INSERT_TAIL(&volume->buffer_list, buf, entry);
209                 ++volume->cache.refs;
210                 buf->cache.u.buffer = buf;
211                 hammer_cache_add(&buf->cache, ISBUFFER);
212         }
213         ++buf->cache.refs;
214         hammer_cache_flush();
215         if ((ondisk = buf->ondisk) == NULL) {
216                 buf->ondisk = ondisk = malloc(HAMMER_BUFSIZE);
217                 if (isnew == 0) {
218                         n = pread(volume->fd, ondisk, HAMMER_BUFSIZE,
219                                   buf->buf_disk_offset);
220                         if (n != HAMMER_BUFSIZE) {
221                                 err(1, "get_buffer: %s:%016llx Read failed at "
222                                        "offset %lld",
223                                     volume->name, buf->buf_offset,
224                                     buf->buf_disk_offset);
225                         }
226                 }
227         }
228         if (isnew) {
229                 bzero(ondisk, HAMMER_BUFSIZE);
230                 buf->cache.modified = 1;
231         }
232         return(buf);
233 }
234
235 void
236 rel_buffer(struct buffer_info *buffer)
237 {
238         struct volume_info *volume;
239
240         assert(buffer->cache.refs > 0);
241         if (--buffer->cache.refs == 0) {
242                 if (buffer->cache.delete) {
243                         volume = buffer->volume;
244                         if (buffer->cache.modified)
245                                 flush_buffer(buffer);
246                         TAILQ_REMOVE(&volume->buffer_list, buffer, entry);
247                         hammer_cache_del(&buffer->cache);
248                         free(buffer->ondisk);
249                         free(buffer);
250                         rel_volume(volume);
251                 }
252         }
253 }
254
255 void *
256 get_buffer_data(hammer_off_t buf_offset, struct buffer_info **bufferp,
257                 int isnew)
258 {
259         struct buffer_info *buffer;
260
261         if ((buffer = *bufferp) != NULL) {
262                 if (isnew || 
263                     ((buffer->buf_offset ^ buf_offset) & ~HAMMER_BUFMASK64)) {
264                         rel_buffer(buffer);
265                         buffer = *bufferp = NULL;
266                 }
267         }
268         if (buffer == NULL)
269                 buffer = *bufferp = get_buffer(buf_offset, isnew);
270         return((char *)buffer->ondisk + ((int32_t)buf_offset & HAMMER_BUFMASK));
271 }
272
273 /*
274  * Retrieve a pointer to a B-Tree node given a cluster offset.  The underlying
275  * bufp is freed if non-NULL and a referenced buffer is loaded into it.
276  */
277 hammer_node_ondisk_t
278 get_node(hammer_off_t node_offset, struct buffer_info **bufp)
279 {
280         struct buffer_info *buf;
281
282         if (*bufp)
283                 rel_buffer(*bufp);
284         *bufp = buf = get_buffer(node_offset, 0);
285         return((void *)((char *)buf->ondisk +
286                         (int32_t)(node_offset & HAMMER_BUFMASK)));
287 }
288
289 /*
290  * Allocate HAMMER elements - btree nodes, data storage, and record elements
291  *
292  * NOTE: hammer_alloc_fifo() initializes the fifo header for the returned
293  * item and zero's out the remainder, so don't bzero() it.
294  */
295 void *
296 alloc_btree_element(hammer_off_t *offp)
297 {
298         struct buffer_info *buffer = NULL;
299         hammer_node_ondisk_t node;
300
301         node = alloc_blockmap(HAMMER_ZONE_BTREE_INDEX, sizeof(*node),
302                               offp, &buffer);
303         bzero(node, sizeof(*node));
304         /* XXX buffer not released, pointer remains valid */
305         return(node);
306 }
307
308 hammer_record_ondisk_t
309 alloc_record_element(hammer_off_t *offp, int32_t data_len, void **datap)
310 {
311         struct buffer_info *record_buffer = NULL;
312         struct buffer_info *data_buffer = NULL;
313         hammer_record_ondisk_t rec;
314
315         rec = alloc_blockmap(HAMMER_ZONE_RECORD_INDEX, sizeof(*rec),
316                              offp, &record_buffer);
317         bzero(rec, sizeof(*rec));
318
319         if (data_len >= HAMMER_BUFSIZE) {
320                 assert(data_len <= HAMMER_BUFSIZE); /* just one buffer */
321                 *datap = alloc_blockmap(HAMMER_ZONE_LARGE_DATA_INDEX, data_len,
322                                         &rec->base.data_off, &data_buffer);
323                 rec->base.data_len = data_len;
324                 bzero(*datap, data_len);
325         } else if (data_len) {
326                 *datap = alloc_blockmap(HAMMER_ZONE_SMALL_DATA_INDEX, data_len,
327                                         &rec->base.data_off, &data_buffer);
328                 rec->base.data_len = data_len;
329                 bzero(*datap, data_len);
330         } else {
331                 *datap = NULL;
332         }
333         /* XXX buf not released, ptr remains valid */
334         return(rec);
335 }
336
337 /*
338  * Format a new freemap.  Set all layer1 entries to UNAVAIL.  The initialize
339  * code will load each volume's freemap.
340  */
341 void
342 format_freemap(struct volume_info *root_vol, hammer_blockmap_t blockmap)
343 {
344         struct buffer_info *buffer = NULL;
345         hammer_off_t layer1_offset;
346         struct hammer_blockmap_layer1 *layer1;
347         int i, isnew;
348
349         layer1_offset = alloc_bigblock(root_vol, 0);
350         for (i = 0; i < (int)HAMMER_BLOCKMAP_RADIX1; ++i) {
351                 isnew = ((i % HAMMER_BLOCKMAP_RADIX1_PERBUFFER) == 0);
352                 layer1 = get_buffer_data(layer1_offset + i * sizeof(*layer1),
353                                          &buffer, isnew);
354                 bzero(layer1, sizeof(*layer1));
355                 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
356                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
357         }
358         rel_buffer(buffer);
359
360         blockmap = &root_vol->ondisk->vol0_blockmap[HAMMER_ZONE_FREEMAP_INDEX];
361         blockmap->phys_offset = layer1_offset;
362         blockmap->alloc_offset = HAMMER_ENCODE_RAW_BUFFER(255, -1);
363         blockmap->next_offset = HAMMER_ENCODE_RAW_BUFFER(0, 0);
364         blockmap->reserved01 = 0;
365         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
366         root_vol->cache.modified = 1;
367 }
368
369 /*
370  * Load the volume's remaining free space into the freemap.  If this is
371  * the root volume, initialize the freemap owner for the layer1 bigblock.
372  *
373  * Returns the number of bigblocks available.
374  */
375 int64_t
376 initialize_freemap(struct volume_info *vol)
377 {
378         struct volume_info *root_vol;
379         struct buffer_info *buffer1 = NULL;
380         struct buffer_info *buffer2 = NULL;
381         struct hammer_blockmap_layer1 *layer1;
382         struct hammer_blockmap_layer2 *layer2;
383         hammer_off_t layer1_base;
384         hammer_off_t layer1_offset;
385         hammer_off_t layer2_offset;
386         hammer_off_t phys_offset;
387         hammer_off_t aligned_vol_free_end;
388         int64_t count = 0;
389         int modified1 = 0;
390
391         root_vol = get_volume(RootVolNo);
392         aligned_vol_free_end = (vol->vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
393                                 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
394
395         printf("initialize freemap volume %d\n", vol->vol_no);
396
397         /*
398          * Initialize the freemap.  First preallocate the bigblocks required
399          * to implement layer2.   This preallocation is a bootstrap allocation
400          * using blocks from the target volume.
401          */
402         layer1_base = root_vol->ondisk->vol0_blockmap[
403                                         HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
404         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
405              phys_offset < aligned_vol_free_end;
406              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
407                 layer1_offset = layer1_base +
408                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
409                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
410                 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
411                         layer1->phys_offset = alloc_bigblock(vol, 0);
412                         layer1->blocks_free = 0;
413                         buffer1->cache.modified = 1;
414                         layer1->layer1_crc = crc32(layer1,
415                                                    HAMMER_LAYER1_CRCSIZE);
416                 }
417         }
418
419         /*
420          * Now fill everything in.
421          */
422         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(vol->vol_no, 0);
423              phys_offset < aligned_vol_free_end;
424              phys_offset += HAMMER_LARGEBLOCK_SIZE) {
425                 modified1 = 0;
426                 layer1_offset = layer1_base +
427                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
428                 layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
429
430                 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
431                 layer2_offset = layer1->phys_offset +
432                                 HAMMER_BLOCKMAP_LAYER2_OFFSET(phys_offset);
433
434                 layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
435                 if (phys_offset < vol->vol_free_off) {
436                         /*
437                          * Fixups XXX - bigblocks already allocated as part
438                          * of the freemap bootstrap.
439                          */
440                         layer2->u.owner = HAMMER_ENCODE_FREEMAP(0, 0); /* XXX */
441                 } else if (phys_offset < vol->vol_free_end) {
442                         ++layer1->blocks_free;
443                         buffer1->cache.modified = 1;
444                         layer2->u.owner = HAMMER_BLOCKMAP_FREE;
445                         ++count;
446                         modified1 = 1;
447                 } else {
448                         layer2->u.owner = HAMMER_BLOCKMAP_UNAVAIL;
449                 }
450                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
451                 buffer2->cache.modified = 1;
452
453                 /*
454                  * Finish-up layer 1
455                  */
456                 if (modified1) {
457                         layer1->layer1_crc = crc32(layer1,
458                                                    HAMMER_LAYER1_CRCSIZE);
459                         buffer1->cache.modified = 1;
460                 }
461         }
462         rel_buffer(buffer1);
463         rel_buffer(buffer2);
464         rel_volume(root_vol);
465         return(count);
466 }
467
468 /*
469  * Allocate big-blocks using our poor-man's volume->vol_free_off and
470  * update the freemap if owner != 0.
471  */
472 hammer_off_t
473 alloc_bigblock(struct volume_info *volume, hammer_off_t owner)
474 {
475         struct buffer_info *buffer = NULL;
476         struct volume_info *root_vol;
477         hammer_off_t result_offset;
478         hammer_off_t layer_offset;
479         struct hammer_blockmap_layer1 *layer1;
480         struct hammer_blockmap_layer2 *layer2;
481         int didget;
482
483         if (volume == NULL) {
484                 volume = get_volume(RootVolNo);
485                 didget = 1;
486         } else {
487                 didget = 0;
488         }
489         result_offset = volume->vol_free_off;
490         if (result_offset >= volume->vol_free_end)
491                 panic("alloc_bigblock: Ran out of room, filesystem too small");
492         volume->vol_free_off += HAMMER_LARGEBLOCK_SIZE;
493
494         /*
495          * Update the freemap
496          */
497         if (owner) {
498                 root_vol = get_volume(RootVolNo);
499                 layer_offset = root_vol->ondisk->vol0_blockmap[
500                                         HAMMER_ZONE_FREEMAP_INDEX].phys_offset;
501                 layer_offset += HAMMER_BLOCKMAP_LAYER1_OFFSET(result_offset);
502                 layer1 = get_buffer_data(layer_offset, &buffer, 0);
503                 assert(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
504                 --layer1->blocks_free;
505                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
506                 buffer->cache.modified = 1;
507                 layer_offset = layer1->phys_offset +
508                                HAMMER_BLOCKMAP_LAYER2_OFFSET(result_offset);
509                 layer2 = get_buffer_data(layer_offset, &buffer, 0);
510                 assert(layer2->u.owner == HAMMER_BLOCKMAP_FREE);
511                 layer2->u.owner = owner;
512                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
513                 buffer->cache.modified = 1;
514
515                 rel_buffer(buffer);
516                 rel_volume(root_vol);
517         }
518
519         if (didget)
520                 rel_volume(volume);
521         return(result_offset);
522 }
523
524 /*
525  * Format the undo-map for the root volume.
526  */
527 void
528 format_undomap(hammer_volume_ondisk_t ondisk)
529 {
530         const int undo_zone = HAMMER_ZONE_UNDO_INDEX;
531         hammer_off_t undo_limit;
532         hammer_blockmap_t blockmap;
533         hammer_off_t scan;
534         struct hammer_blockmap_layer2 *layer2;
535         int n;
536         int limit_index;
537
538         /*
539          * Size the undo buffer in multiples of HAMMER_LARGEBLOCK_SIZE,
540          * up to HAMMER_UNDO_LAYER2 large blocks.  Size to approximately
541          * 0.1% of the disk.
542          */
543         undo_limit = UndoBufferSize;
544         if (undo_limit == 0)
545                 undo_limit = (ondisk->vol_buf_end - ondisk->vol_buf_beg) / 1000;
546         undo_limit = (undo_limit + HAMMER_LARGEBLOCK_MASK64) &
547                      ~HAMMER_LARGEBLOCK_MASK64;
548         if (undo_limit < HAMMER_LARGEBLOCK_SIZE)
549                 undo_limit = HAMMER_LARGEBLOCK_SIZE;
550         if (undo_limit > HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2)
551                 undo_limit = HAMMER_LARGEBLOCK_SIZE * HAMMER_UNDO_LAYER2;
552         UndoBufferSize = undo_limit;
553
554         blockmap = &ondisk->vol0_blockmap[undo_zone];
555         bzero(blockmap, sizeof(*blockmap));
556         blockmap->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
557         blockmap->first_offset = HAMMER_ZONE_ENCODE(undo_zone, 0);
558         blockmap->next_offset = blockmap->first_offset;
559         blockmap->alloc_offset = HAMMER_ZONE_ENCODE(undo_zone, undo_limit);
560         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
561
562         layer2 = &ondisk->vol0_undo_array[0];
563         n = 0;
564         scan = blockmap->next_offset;
565         limit_index = undo_limit / HAMMER_LARGEBLOCK_SIZE;
566
567         assert(limit_index < HAMMER_UNDO_LAYER2);
568
569         for (n = 0; n < limit_index; ++n) {
570                 layer2->u.phys_offset = alloc_bigblock(NULL, scan);
571                 layer2->bytes_free = -1;        /* not used */
572                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
573
574                 scan += HAMMER_LARGEBLOCK_SIZE;
575                 ++layer2;
576         }
577         while (n < HAMMER_UNDO_LAYER2) {
578                 layer2->u.phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
579                 layer2->bytes_free = -1;
580                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
581                 ++layer2;
582                 ++n;
583         }
584 }
585
586 /*
587  * Format a new blockmap.  Set the owner to the base of the blockmap
588  * (meaning either the blockmap layer1 bigblock, layer2 bigblock, or
589  * target bigblock).
590  */
591 void
592 format_blockmap(hammer_blockmap_t blockmap, hammer_off_t zone_off)
593 {
594         blockmap->phys_offset = alloc_bigblock(NULL, zone_off);
595         blockmap->alloc_offset = zone_off;
596         blockmap->first_offset = zone_off;
597         blockmap->next_offset = zone_off;
598         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
599 }
600
601 static
602 void *
603 alloc_blockmap(int zone, int bytes, hammer_off_t *result_offp,
604                struct buffer_info **bufferp)
605 {
606         struct buffer_info *buffer1 = NULL;
607         struct buffer_info *buffer2 = NULL;
608         struct volume_info *volume;
609         hammer_blockmap_t rootmap;
610         struct hammer_blockmap_layer1 *layer1;
611         struct hammer_blockmap_layer2 *layer2;
612         hammer_off_t layer1_offset;
613         hammer_off_t layer2_offset;
614         hammer_off_t bigblock_offset;
615         void *ptr;
616
617         volume = get_volume(RootVolNo);
618
619         rootmap = &volume->ondisk->vol0_blockmap[zone];
620
621         /*
622          * Alignment and buffer-boundary issues
623          */
624         bytes = (bytes + 7) & ~7;
625         if ((rootmap->phys_offset ^ (rootmap->phys_offset + bytes - 1)) &
626             ~HAMMER_BUFMASK64) {
627                 volume->cache.modified = 1;
628                 rootmap->phys_offset = (rootmap->phys_offset + bytes) &
629                                        ~HAMMER_BUFMASK64;
630         }
631
632         /*
633          * Dive layer 1
634          */
635         layer1_offset = rootmap->phys_offset +
636                         HAMMER_BLOCKMAP_LAYER1_OFFSET(rootmap->alloc_offset);
637
638         layer1 = get_buffer_data(layer1_offset, &buffer1, 0);
639         if ((rootmap->alloc_offset & HAMMER_BLOCKMAP_LAYER2_MASK) == 0) {
640                 buffer1->cache.modified = 1;
641                 bzero(layer1, sizeof(*layer1));
642                 layer1->blocks_free = HAMMER_BLOCKMAP_RADIX2;
643                 layer1->phys_offset = alloc_bigblock(NULL,
644                                                      rootmap->alloc_offset);
645         }
646
647         /*
648          * Dive layer 2
649          */
650         layer2_offset = layer1->phys_offset +
651                         HAMMER_BLOCKMAP_LAYER2_OFFSET(rootmap->alloc_offset);
652
653         layer2 = get_buffer_data(layer2_offset, &buffer2, 0);
654
655         if ((rootmap->alloc_offset & HAMMER_LARGEBLOCK_MASK64) == 0) {
656                 buffer2->cache.modified = 1;
657                 bzero(layer2, sizeof(*layer2));
658                 layer2->u.phys_offset = alloc_bigblock(NULL,
659                                                        rootmap->alloc_offset);
660                 layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
661                 --layer1->blocks_free;
662         }
663
664         buffer1->cache.modified = 1;
665         buffer2->cache.modified = 1;
666         volume->cache.modified = 1;
667         layer2->bytes_free -= bytes;
668         *result_offp = rootmap->alloc_offset;
669         rootmap->alloc_offset += bytes;
670         rootmap->next_offset = rootmap->alloc_offset;
671
672         bigblock_offset = layer2->u.phys_offset + 
673                           (*result_offp & HAMMER_LARGEBLOCK_MASK);
674
675         layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
676         layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
677
678         ptr = get_buffer_data(bigblock_offset, bufferp, 0);
679         (*bufferp)->cache.modified = 1;
680
681         if (buffer1)
682                 rel_buffer(buffer1);
683         if (buffer2)
684                 rel_buffer(buffer2);
685
686         rel_volume(volume);
687         return(ptr);
688 }
689
690 #if 0
691 /*
692  * Reserve space from the FIFO.  Make sure that bytes does not cross a 
693  * record boundary.
694  *
695  * Zero out base_bytes and initialize the fifo head and tail.  The
696  * data area is not zerod.
697  */
698 static
699 hammer_off_t
700 hammer_alloc_fifo(int32_t base_bytes, int32_t ext_bytes,
701                   struct buffer_info **bufp, u_int16_t hdr_type)
702 {
703         struct buffer_info *buf;
704         struct volume_info *volume;
705         hammer_fifo_head_t head;
706         hammer_fifo_tail_t tail;
707         hammer_off_t off;
708         int32_t aligned_bytes;
709
710         aligned_bytes = (base_bytes + ext_bytes + HAMMER_TAIL_ONDISK_SIZE +
711                          HAMMER_HEAD_ALIGN_MASK) & ~HAMMER_HEAD_ALIGN_MASK;
712
713         volume = get_volume(RootVolNo);
714         off = volume->ondisk->vol0_fifo_end;
715
716         /*
717          * For now don't deal with transitions across buffer boundaries,
718          * only newfs_hammer uses this function.
719          */
720         assert((off & ~HAMMER_BUFMASK64) ==
721                 ((off + aligned_bytes) & ~HAMMER_BUFMASK));
722
723         *bufp = buf = get_buffer(off, 0);
724
725         buf->cache.modified = 1;
726         volume->cache.modified = 1;
727
728         head = (void *)((char *)buf->ondisk + ((int32_t)off & HAMMER_BUFMASK));
729         bzero(head, base_bytes);
730
731         head->hdr_signature = HAMMER_HEAD_SIGNATURE;
732         head->hdr_type = hdr_type;
733         head->hdr_size = aligned_bytes;
734         head->hdr_seq = volume->ondisk->vol0_next_seq++;
735
736         tail = (void*)((char *)head + aligned_bytes - HAMMER_TAIL_ONDISK_SIZE);
737         tail->tail_signature = HAMMER_TAIL_SIGNATURE;
738         tail->tail_type = hdr_type;
739         tail->tail_size = aligned_bytes;
740
741         volume->ondisk->vol0_fifo_end += aligned_bytes;
742         volume->cache.modified = 1;
743
744         rel_volume(volume);
745
746         return(off);
747 }
748
749 #endif
750
751 /*
752  * Flush various tracking structures to disk
753  */
754
755 /*
756  * Flush various tracking structures to disk
757  */
758 void
759 flush_all_volumes(void)
760 {
761         struct volume_info *vol;
762
763         TAILQ_FOREACH(vol, &VolList, entry)
764                 flush_volume(vol);
765 }
766
767 void
768 flush_volume(struct volume_info *volume)
769 {
770         struct buffer_info *buffer;
771
772         TAILQ_FOREACH(buffer, &volume->buffer_list, entry)
773                 flush_buffer(buffer);
774         writehammerbuf(volume, volume->ondisk, 0);
775         volume->cache.modified = 0;
776 }
777
778 void
779 flush_buffer(struct buffer_info *buffer)
780 {
781         writehammerbuf(buffer->volume, buffer->ondisk, buffer->buf_disk_offset);
782         buffer->cache.modified = 0;
783 }
784
785 #if 0
786 /*
787  * Generic buffer initialization
788  */
789 static void
790 init_fifo_head(hammer_fifo_head_t head, u_int16_t hdr_type)
791 {
792         head->hdr_signature = HAMMER_HEAD_SIGNATURE;
793         head->hdr_type = hdr_type;
794         head->hdr_size = 0;
795         head->hdr_crc = 0;
796         head->hdr_seq = 0;
797 }
798
799 #endif
800
801 #if 0
802 /*
803  * Core I/O operations
804  */
805 static void
806 readhammerbuf(struct volume_info *vol, void *data, int64_t offset)
807 {
808         ssize_t n;
809
810         n = pread(vol->fd, data, HAMMER_BUFSIZE, offset);
811         if (n != HAMMER_BUFSIZE)
812                 err(1, "Read volume %d (%s)", vol->vol_no, vol->name);
813 }
814
815 #endif
816
817 static void
818 writehammerbuf(struct volume_info *vol, const void *data, int64_t offset)
819 {
820         ssize_t n;
821
822         n = pwrite(vol->fd, data, HAMMER_BUFSIZE, offset);
823         if (n != HAMMER_BUFSIZE)
824                 err(1, "Write volume %d (%s)", vol->vol_no, vol->name);
825 }
826
827 void
828 panic(const char *ctl, ...)
829 {
830         va_list va;
831
832         va_start(va, ctl);
833         vfprintf(stderr, ctl, va);
834         va_end(va);
835         fprintf(stderr, "\n");
836         exit(1);
837 }
838