sys/vfs/hammer: Add sanity check to volume-del
[dragonfly.git] / sys / vfs / hammer / hammer_volume.c
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com> and
6  * Michael Neumann <mneumann@ntecs.de>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  */
36
37 #include "hammer.h"
38
39 struct bigblock_stat {
40         int64_t total_bigblocks;
41         int64_t total_free_bigblocks;
42         int64_t counter;
43 };
44
45 static int
46 hammer_format_volume_header(struct hammer_mount *hmp,
47         struct hammer_volume_ondisk *ondisk,
48         const char *vol_name, int vol_no, int vol_count,
49         int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size);
50
51 static int
52 hammer_update_volumes_header(hammer_transaction_t trans,
53         struct bigblock_stat *stat);
54
55 static int
56 hammer_do_reblock(hammer_transaction_t trans, hammer_inode_t ip);
57
58 static int
59 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume,
60         struct bigblock_stat *stat);
61
62 static int
63 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume,
64         struct bigblock_stat *stat);
65
66 static int
67 hammer_test_free_freemap(hammer_transaction_t trans, hammer_volume_t volume);
68
69 int
70 hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
71                 struct hammer_ioc_volume *ioc)
72 {
73         struct hammer_mount *hmp = trans->hmp;
74         struct mount *mp = hmp->mp;
75         struct hammer_volume_ondisk ondisk;
76         struct bigblock_stat stat;
77         hammer_volume_t volume;
78         int free_vol_no = 0;
79         int error;
80
81         if (mp->mnt_flag & MNT_RDONLY) {
82                 hmkprintf(hmp, "Cannot add volume to read-only HAMMER filesystem\n");
83                 return (EINVAL);
84         }
85
86         if (hmp->nvolumes >= HAMMER_MAX_VOLUMES) {
87                 hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n");
88                 return (EINVAL);
89         }
90
91         if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
92                 hmkprintf(hmp, "Another volume operation is in progress!\n");
93                 return (EAGAIN);
94         }
95
96         /*
97          * Find an unused volume number.
98          */
99         while (free_vol_no < HAMMER_MAX_VOLUMES &&
100                 HAMMER_VOLUME_NUMBER_IS_SET(hmp, free_vol_no)) {
101                 ++free_vol_no;
102         }
103         if (free_vol_no >= HAMMER_MAX_VOLUMES) {
104                 hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n");
105                 error = EINVAL;
106                 goto end;
107         }
108
109         error = hammer_format_volume_header(
110                 hmp,
111                 &ondisk,
112                 hmp->rootvol->ondisk->vol_name,
113                 free_vol_no,
114                 hmp->nvolumes+1,
115                 ioc->vol_size,
116                 ioc->boot_area_size,
117                 ioc->mem_area_size);
118         if (error)
119                 goto end;
120
121         error = hammer_install_volume(hmp, ioc->device_name, NULL, &ondisk);
122         if (error)
123                 goto end;
124
125         hammer_sync_lock_sh(trans);
126         hammer_lock_ex(&hmp->blkmap_lock);
127
128         volume = hammer_get_volume(hmp, free_vol_no, &error);
129         KKASSERT(volume != NULL && error == 0);
130
131         error = hammer_format_freemap(trans, volume, &stat);
132         KKASSERT(error == 0);
133         hammer_rel_volume(volume, 0);
134
135         ++hmp->nvolumes;
136         error = hammer_update_volumes_header(trans, &stat);
137         KKASSERT(error == 0);
138
139         hammer_unlock(&hmp->blkmap_lock);
140         hammer_sync_unlock(trans);
141
142         KKASSERT(error == 0);
143 end:
144         hammer_unlock(&hmp->volume_lock);
145         if (error)
146                 hmkprintf(hmp, "An error occurred: %d\n", error);
147         return (error);
148 }
149
150
151 /*
152  * Remove a volume.
153  */
154 int
155 hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
156                 struct hammer_ioc_volume *ioc)
157 {
158         struct hammer_mount *hmp = trans->hmp;
159         struct mount *mp = hmp->mp;
160         struct hammer_volume_ondisk *ondisk;
161         struct bigblock_stat stat;
162         hammer_volume_t volume;
163         int vol_no;
164         int error = 0;
165
166         if (mp->mnt_flag & MNT_RDONLY) {
167                 hmkprintf(hmp, "Cannot del volume from read-only HAMMER filesystem\n");
168                 return (EINVAL);
169         }
170
171         if (hmp->nvolumes <= 1) {
172                 hmkprintf(hmp, "No HAMMER volume to delete\n");
173                 return (EINVAL);
174         }
175
176         if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
177                 hmkprintf(hmp, "Another volume operation is in progress!\n");
178                 return (EAGAIN);
179         }
180
181         /*
182          * find volume by volname
183          */
184         volume = NULL;
185         HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) {
186                 volume = hammer_get_volume(hmp, vol_no, &error);
187                 KKASSERT(volume != NULL && error == 0);
188                 if (strcmp(volume->vol_name, ioc->device_name) == 0) {
189                         break;
190                 }
191                 hammer_rel_volume(volume, 0);
192                 volume = NULL;
193         }
194
195         if (volume == NULL) {
196                 hmkprintf(hmp, "Couldn't find volume\n");
197                 error = EINVAL;
198                 goto end;
199         }
200
201         if (volume == trans->rootvol) {
202                 hmkprintf(hmp, "Cannot remove root-volume\n");
203                 hammer_rel_volume(volume, 0);
204                 error = EINVAL;
205                 goto end;
206         }
207
208         /*
209          * Reblock filesystem if the volume is not empty
210          */
211         hmp->volume_to_remove = volume->vol_no;
212
213         if (hammer_test_free_freemap(trans, volume)) {
214                 error = hammer_do_reblock(trans, ip);
215                 if (error) {
216                         hmp->volume_to_remove = -1;
217                         hammer_rel_volume(volume, 0);
218                         goto end;
219                 }
220         }
221
222         /*
223          * Sync filesystem
224          */
225         hammer_flush_dirty(hmp, 30);
226
227         hammer_sync_lock_sh(trans);
228         hammer_lock_ex(&hmp->blkmap_lock);
229
230         /*
231          * We use stat later to update rootvol's big-block stats
232          */
233         error = hammer_free_freemap(trans, volume, &stat);
234         if (error) {
235                 hmkprintf(hmp, "Failed to free volume: ");
236                 if (error == EBUSY)
237                         kprintf("Volume %d not empty\n", volume->vol_no);
238                 else
239                         kprintf("%d\n", error);
240                 hmp->volume_to_remove = -1;
241                 hammer_rel_volume(volume, 0);
242                 goto end1;
243         }
244         hammer_rel_volume(volume, 0);
245
246         /*
247          * Unload buffers
248          */
249         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
250                 hammer_unload_buffer, volume);
251
252         bzero(&ondisk, sizeof(ondisk));
253         error = hammer_unload_volume(volume, &ondisk);
254         if (error == -1) {
255                 hmkprintf(hmp, "Failed to unload volume\n");
256                 goto end1;
257         }
258
259         --hmp->nvolumes;
260         error = hammer_update_volumes_header(trans, &stat);
261         KKASSERT(error == 0);
262         hmp->volume_to_remove = -1;
263
264 end1:
265         hammer_unlock(&hmp->blkmap_lock);
266         hammer_sync_unlock(trans);
267
268 end:
269         hammer_unlock(&hmp->volume_lock);
270         if (error)
271                 hmkprintf(hmp, "An error occurred: %d\n", error);
272         return (error);
273 }
274
275
276 int
277 hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip,
278     struct hammer_ioc_volume_list *ioc)
279 {
280         struct hammer_mount *hmp = trans->hmp;
281         hammer_volume_t volume;
282         int error = 0;
283         int i, len, cnt = 0;
284
285         if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
286                 hmkprintf(hmp, "Another volume operation is in progress!\n");
287                 return (EAGAIN);
288         }
289
290         HAMMER_VOLUME_NUMBER_FOREACH(hmp, i) {
291                 if (cnt >= ioc->nvols)
292                         break;
293                 volume = hammer_get_volume(hmp, i, &error);
294                 KKASSERT(volume != NULL && error == 0);
295
296                 len = strlen(volume->vol_name) + 1;
297                 KKASSERT(len <= MAXPATHLEN);
298
299                 error = copyout(volume->vol_name, ioc->vols[cnt].device_name,
300                                 len);
301                 hammer_rel_volume(volume, 0);
302                 if (error)
303                         goto end;
304                 cnt++;
305         }
306         ioc->nvols = cnt;
307
308 end:
309         hammer_unlock(&hmp->volume_lock);
310         return (error);
311 }
312
313 static
314 int
315 hammer_do_reblock(hammer_transaction_t trans, hammer_inode_t ip)
316 {
317         struct hammer_mount *hmp = trans->hmp;
318         int error;
319         int vol_no;
320
321         struct hammer_ioc_reblock reblock;
322         bzero(&reblock, sizeof(reblock));
323
324         vol_no = trans->hmp->volume_to_remove;
325         KKASSERT(vol_no != -1);
326
327         reblock.key_beg.localization = HAMMER_MIN_LOCALIZATION;
328         reblock.key_beg.obj_id = HAMMER_MIN_OBJID;
329         reblock.key_end.localization = HAMMER_MAX_LOCALIZATION;
330         reblock.key_end.obj_id = HAMMER_MAX_OBJID;
331         reblock.head.flags = HAMMER_IOC_DO_FLAGS;
332         reblock.free_level = 0; /* reblock all big-blocks */
333         reblock.allpfs = 1;     /* reblock all PFS */
334         reblock.vol_no = vol_no;
335
336         hmkprintf(hmp, "reblock started\n");
337         error = hammer_ioc_reblock(trans, ip, &reblock);
338
339         if (reblock.head.flags & HAMMER_IOC_HEAD_INTR) {
340                 error = EINTR;
341         }
342
343         if (error) {
344                 if (error == EINTR) {
345                         hmkprintf(hmp, "reblock was interrupted\n");
346                 } else {
347                         hmkprintf(hmp, "reblock failed: %d\n", error);
348                 }
349                 return(error);
350         }
351
352         return(0);
353 }
354
355 /*
356  * Iterate over all usable L1 entries of the volume and
357  * the corresponding L2 entries.
358  */
359 static int
360 hammer_iterate_l1l2_entries(hammer_transaction_t trans, hammer_volume_t volume,
361         int (*callback)(hammer_transaction_t, hammer_volume_t, hammer_buffer_t*,
362                 struct hammer_blockmap_layer1*, struct hammer_blockmap_layer2*,
363                 hammer_off_t, hammer_off_t, void*),
364         void *data)
365 {
366         struct hammer_mount *hmp = trans->hmp;
367         hammer_blockmap_t freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
368         int error = 0;
369         hammer_off_t phys_off;
370         hammer_off_t block_off;
371         hammer_off_t layer1_off;
372         hammer_off_t layer2_off;
373         hammer_off_t aligned_buf_end_off;
374         hammer_off_t aligned_vol_end_off;
375         struct hammer_blockmap_layer1 *layer1;
376         struct hammer_blockmap_layer2 *layer2;
377         hammer_buffer_t buffer1 = NULL;
378         hammer_buffer_t buffer2 = NULL;
379
380         /*
381          * Calculate the usable size of the volume, which
382          * must be aligned at a big-block (8 MB) boundary.
383          */
384         aligned_buf_end_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
385                 (volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
386                 & ~HAMMER_BIGBLOCK_MASK64);
387         aligned_vol_end_off = (aligned_buf_end_off + HAMMER_BLOCKMAP_LAYER2_MASK)
388                 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
389
390         /*
391          * Iterate the volume's address space in chunks of 4 TB, where each
392          * chunk consists of at least one physically available 8 MB big-block.
393          *
394          * For each chunk we need one L1 entry and one L2 big-block.
395          * We use the first big-block of each chunk as L2 block.
396          */
397         for (phys_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 0);
398              phys_off < aligned_vol_end_off;
399              phys_off += HAMMER_BLOCKMAP_LAYER2) {
400                 for (block_off = 0;
401                      block_off < HAMMER_BLOCKMAP_LAYER2;
402                      block_off += HAMMER_BIGBLOCK_SIZE) {
403                         layer2_off = phys_off +
404                                 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_off);
405                         layer2 = hammer_bread(hmp, layer2_off, &error, &buffer2);
406                         if (error)
407                                 goto end;
408
409                         error = callback(trans, volume, &buffer2, NULL,
410                                          layer2, phys_off, block_off, data);
411                         if (error)
412                                 goto end;
413                 }
414
415                 layer1_off = freemap->phys_offset +
416                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_off);
417                 layer1 = hammer_bread(hmp, layer1_off, &error, &buffer1);
418                 if (error)
419                         goto end;
420
421                 error = callback(trans, volume, &buffer1, layer1, NULL,
422                                  phys_off, 0, data);
423                 if (error)
424                         goto end;
425         }
426
427 end:
428         if (buffer1)
429                 hammer_rel_buffer(buffer1, 0);
430         if (buffer2)
431                 hammer_rel_buffer(buffer2, 0);
432
433         return error;
434 }
435
436
437 static int
438 format_callback(hammer_transaction_t trans, hammer_volume_t volume,
439         hammer_buffer_t *bufferp,
440         struct hammer_blockmap_layer1 *layer1,
441         struct hammer_blockmap_layer2 *layer2,
442         hammer_off_t phys_off,
443         hammer_off_t block_off,
444         void *data)
445 {
446         struct bigblock_stat *stat = (struct bigblock_stat*)data;
447
448         /*
449          * Calculate the usable size of the volume, which must be aligned
450          * at a big-block (8 MB) boundary.
451          */
452         hammer_off_t aligned_buf_end_off;
453         aligned_buf_end_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
454                 (volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
455                 & ~HAMMER_BIGBLOCK_MASK64);
456
457         if (layer1) {
458                 KKASSERT(layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL);
459
460                 hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1));
461                 bzero(layer1, sizeof(*layer1));
462                 layer1->phys_offset = phys_off;
463                 layer1->blocks_free = stat->counter;
464                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
465                 hammer_modify_buffer_done(*bufferp);
466                 stat->counter = 0; /* reset */
467         } else if (layer2) {
468                 hammer_modify_buffer(trans, *bufferp, layer2, sizeof(*layer2));
469                 bzero(layer2, sizeof(*layer2));
470
471                 if (block_off == 0) {
472                         /*
473                          * The first entry represents the L2 big-block itself.
474                          * Note that the first entry represents the L1 big-block
475                          * and the second entry represents the L2 big-block for
476                          * root volume, but this function assumes the volume is
477                          * non-root given that we can't add a new root volume.
478                          */
479                         KKASSERT(trans->rootvol && trans->rootvol != volume);
480                         layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
481                         layer2->append_off = HAMMER_BIGBLOCK_SIZE;
482                         layer2->bytes_free = 0;
483                 } else if (phys_off + block_off < aligned_buf_end_off) {
484                         /*
485                          * Available big-block
486                          */
487                         layer2->zone = 0;
488                         layer2->append_off = 0;
489                         layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
490                         ++stat->total_bigblocks;
491                         ++stat->total_free_bigblocks;
492                         ++stat->counter;
493                 } else {
494                         /*
495                          * Big-block outside of physically available
496                          * space
497                          */
498                         layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
499                         layer2->append_off = HAMMER_BIGBLOCK_SIZE;
500                         layer2->bytes_free = 0;
501                 }
502
503                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
504                 hammer_modify_buffer_done(*bufferp);
505         } else {
506                 KKASSERT(0);
507         }
508
509         return 0;
510 }
511
512 static int
513 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume,
514         struct bigblock_stat *stat)
515 {
516         stat->total_bigblocks = 0;
517         stat->total_free_bigblocks = 0;
518         stat->counter = 0;
519         return hammer_iterate_l1l2_entries(trans, volume, format_callback, stat);
520 }
521
522 static int
523 free_callback(hammer_transaction_t trans, hammer_volume_t volume __unused,
524         hammer_buffer_t *bufferp,
525         struct hammer_blockmap_layer1 *layer1,
526         struct hammer_blockmap_layer2 *layer2,
527         hammer_off_t phys_off,
528         hammer_off_t block_off __unused,
529         void *data)
530 {
531         struct bigblock_stat *stat = (struct bigblock_stat*)data;
532
533         if (layer1) {
534                 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
535                         /*
536                          * This layer1 entry is already free.
537                          */
538                         return 0;
539                 }
540
541                 KKASSERT(HAMMER_VOL_DECODE(layer1->phys_offset) ==
542                         trans->hmp->volume_to_remove);
543
544                 /*
545                  * Free the L1 entry
546                  */
547                 hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1));
548                 bzero(layer1, sizeof(*layer1));
549                 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
550                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
551                 hammer_modify_buffer_done(*bufferp);
552
553                 return 0;
554         } else if (layer2) {
555                 if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
556                         return 0;
557                 }
558
559                 if (layer2->zone == HAMMER_ZONE_FREEMAP_INDEX) {
560                         return 0;
561                 }
562
563                 if (layer2->append_off == 0 &&
564                     layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
565                         --stat->total_bigblocks;
566                         --stat->total_free_bigblocks;
567                         return 0;
568                 }
569
570                 /*
571                  * We found a layer2 entry that is not empty!
572                  */
573                 return EBUSY;
574         } else {
575                 KKASSERT(0);
576         }
577
578         return EINVAL;
579 }
580
581 /*
582  * Non-zero return value means we can't free the volume.
583  */
584 static int
585 test_free_callback(hammer_transaction_t trans, hammer_volume_t volume __unused,
586         hammer_buffer_t *bufferp,
587         struct hammer_blockmap_layer1 *layer1,
588         struct hammer_blockmap_layer2 *layer2,
589         hammer_off_t phys_off,
590         hammer_off_t block_off __unused,
591         void *data)
592 {
593         if (layer2 == NULL) {
594                 return(0);  /* only layer2 needs to be tested */
595         }
596
597         if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
598                 return(0);  /* beyond physically available space */
599         }
600         if (layer2->zone == HAMMER_ZONE_FREEMAP_INDEX) {
601                 return(0);  /* big-block for layer1/2 */
602         }
603         if (layer2->append_off == 0 &&
604             layer2->bytes_free == HAMMER_BIGBLOCK_SIZE) {
605                 return(0);  /* big-block is 0% used */
606         }
607
608         return(EBUSY);  /* big-block has data */
609 }
610
611 static int
612 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume,
613         struct bigblock_stat *stat)
614 {
615         int error;
616
617         error = hammer_test_free_freemap(trans, volume);
618         if (error)
619                 return error;  /* not ready to free */
620
621         stat->total_bigblocks = 0;
622         stat->total_free_bigblocks = 0;
623         stat->counter = 0;
624         return hammer_iterate_l1l2_entries(trans, volume, free_callback, stat);
625 }
626
627 static int
628 hammer_test_free_freemap(hammer_transaction_t trans, hammer_volume_t volume)
629 {
630         return hammer_iterate_l1l2_entries(trans, volume, test_free_callback, NULL);
631 }
632
633 static int
634 hammer_format_volume_header(struct hammer_mount *hmp,
635         struct hammer_volume_ondisk *ondisk,
636         const char *vol_name, int vol_no, int vol_count,
637         int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size)
638 {
639         int64_t vol_alloc;
640
641         KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk));
642
643         bzero(ondisk, sizeof(struct hammer_volume_ondisk));
644         ksnprintf(ondisk->vol_name, sizeof(ondisk->vol_name), "%s", vol_name);
645         ondisk->vol_fstype = hmp->rootvol->ondisk->vol_fstype;
646         ondisk->vol_signature = HAMMER_FSBUF_VOLUME;
647         ondisk->vol_fsid = hmp->fsid;
648         ondisk->vol_rootvol = hmp->rootvol->vol_no;
649         ondisk->vol_no = vol_no;
650         ondisk->vol_count = vol_count;
651         ondisk->vol_version = hmp->version;
652
653         /*
654          * Reserve space for (future) header junk, copy volume relative
655          * offset from the existing root volume.
656          */
657         vol_alloc = hmp->rootvol->ondisk->vol_bot_beg;
658         ondisk->vol_bot_beg = vol_alloc;
659         vol_alloc += boot_area_size;
660         ondisk->vol_mem_beg = vol_alloc;
661         vol_alloc += mem_area_size;
662
663         /*
664          * The remaining area is the zone 2 buffer allocation area.
665          */
666         ondisk->vol_buf_beg = vol_alloc;
667         ondisk->vol_buf_end = vol_size & ~(int64_t)HAMMER_BUFMASK;
668
669         if (ondisk->vol_buf_end < ondisk->vol_buf_beg) {
670                 hmkprintf(hmp, "volume %d %s is too small to hold the volume header\n",
671                      ondisk->vol_no, ondisk->vol_name);
672                 return(EFTYPE);
673         }
674
675         ondisk->vol_nblocks = (ondisk->vol_buf_end - ondisk->vol_buf_beg) /
676                               HAMMER_BUFSIZE;
677         ondisk->vol_blocksize = HAMMER_BUFSIZE;
678         return(0);
679 }
680
681 static int
682 hammer_update_volumes_header(hammer_transaction_t trans,
683         struct bigblock_stat *stat)
684 {
685         struct hammer_mount *hmp = trans->hmp;
686         struct mount *mp = hmp->mp;
687         hammer_volume_t volume;
688         int vol_no;
689         int error = 0;
690
691         /*
692          * Set each volume's new value of the vol_count field.
693          */
694         HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) {
695                 volume = hammer_get_volume(hmp, vol_no, &error);
696                 KKASSERT(volume != NULL && error == 0);
697                 hammer_modify_volume_field(trans, volume, vol_count);
698                 volume->ondisk->vol_count = hmp->nvolumes;
699                 hammer_modify_volume_done(volume);
700
701                 /*
702                  * Only changes to the header of the root volume
703                  * are automatically flushed to disk. For all
704                  * other volumes that we modify we do it here.
705                  *
706                  * No interlock is needed, volume buffers are not
707                  * messed with by bioops.
708                  */
709                 if (volume != trans->rootvol && volume->io.modified) {
710                         hammer_crc_set_volume(volume->ondisk);
711                         hammer_io_flush(&volume->io, 0);
712                 }
713
714                 hammer_rel_volume(volume, 0);
715         }
716
717         /*
718          * Update the total number of big-blocks.
719          */
720         hammer_modify_volume_field(trans, trans->rootvol, vol0_stat_bigblocks);
721         trans->rootvol->ondisk->vol0_stat_bigblocks += stat->total_bigblocks;
722         hammer_modify_volume_done(trans->rootvol);
723
724         /*
725          * Big-block count changed so recompute the total number of blocks.
726          */
727         mp->mnt_stat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
728                                 HAMMER_BUFFERS_PER_BIGBLOCK;
729         mp->mnt_vstat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
730                                 HAMMER_BUFFERS_PER_BIGBLOCK;
731
732         /*
733          * Update the total number of free big-blocks.
734          */
735         hammer_modify_volume_field(trans, trans->rootvol,
736                 vol0_stat_freebigblocks);
737         trans->rootvol->ondisk->vol0_stat_freebigblocks +=
738                 stat->total_free_bigblocks;
739         hammer_modify_volume_done(trans->rootvol);
740
741         /*
742          * Update the copy in hmp.
743          */
744         hmp->copy_stat_freebigblocks =
745                 trans->rootvol->ondisk->vol0_stat_freebigblocks;
746
747         return(error);
748 }