HAMMER - Fix reblocking operation of volume removal
[dragonfly.git] / sys / vfs / hammer / hammer_volume.c
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com> and
6  * Michael Neumann <mneumann@ntecs.de>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  */
36
37 #include "hammer.h"
38 #include <sys/fcntl.h>
39 #include <sys/nlookup.h>
40 #include <sys/buf.h>
41
42 static int
43 hammer_setup_device(struct vnode **devvpp, const char *dev_path, int ronly);
44
45 static void
46 hammer_close_device(struct vnode **devvpp, int ronly);
47
48 static int
49 hammer_format_volume_header(struct hammer_mount *hmp, struct vnode *devvp,
50         const char *vol_name, int vol_no, int vol_count,
51         int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size);
52
53 static uint64_t
54 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume);
55
56 static int
57 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume);
58
59
60 int
61 hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
62                 struct hammer_ioc_volume *ioc)
63 {
64         struct hammer_mount *hmp = trans->hmp;
65         struct mount *mp = hmp->mp;
66         hammer_volume_t volume;
67         int error;
68
69         if (mp->mnt_flag & MNT_RDONLY) {
70                 kprintf("Cannot add volume to read-only HAMMER filesystem\n");
71                 return (EINVAL);
72         }
73
74         if (hmp->nvolumes + 1 >= HAMMER_MAX_VOLUMES) {
75                 kprintf("Max number of HAMMER volumes exceeded\n");
76                 return (EINVAL);
77         }
78
79         /*
80          * Find an unused volume number.
81          */
82         int free_vol_no = 0;
83         while (free_vol_no < HAMMER_MAX_VOLUMES &&
84                RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, free_vol_no)) {
85                 ++free_vol_no;
86         }
87         if (free_vol_no >= HAMMER_MAX_VOLUMES) {
88                 kprintf("Max number of HAMMER volumes exceeded\n");
89                 return (EINVAL);
90         }
91
92         struct vnode *devvp = NULL;
93         error = hammer_setup_device(&devvp, ioc->device_name, 0);
94         if (error)
95                 goto end;
96         KKASSERT(devvp);
97         error = hammer_format_volume_header(
98                 hmp,
99                 devvp,
100                 hmp->rootvol->ondisk->vol_name,
101                 free_vol_no,
102                 hmp->nvolumes+1,
103                 ioc->vol_size,
104                 ioc->boot_area_size,
105                 ioc->mem_area_size);
106         hammer_close_device(&devvp, 0);
107         if (error)
108                 goto end;
109
110         error = hammer_install_volume(hmp, ioc->device_name, NULL);
111         if (error)
112                 goto end;
113
114         hammer_sync_lock_sh(trans);
115         hammer_lock_ex(&hmp->blkmap_lock);
116
117         ++hmp->nvolumes;
118
119         /*
120          * Set each volumes new value of the vol_count field.
121          */
122         for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
123                 volume = hammer_get_volume(hmp, vol_no, &error);
124                 if (volume == NULL && error == ENOENT) {
125                         /*
126                          * Skip unused volume numbers
127                          */
128                         error = 0;
129                         continue;
130                 }
131                 KKASSERT(volume != NULL && error == 0);
132                 hammer_modify_volume_field(trans, volume, vol_count);
133                 volume->ondisk->vol_count = hmp->nvolumes;
134                 hammer_modify_volume_done(volume);
135
136                 /*
137                  * Only changes to the header of the root volume
138                  * are automatically flushed to disk. For all
139                  * other volumes that we modify we do it here.
140                  */
141                 if (volume != trans->rootvol && volume->io.modified) {
142                         hammer_crc_set_volume(volume->ondisk);
143                         hammer_io_flush(&volume->io, 0);
144                 }
145
146                 hammer_rel_volume(volume, 0);
147         }
148
149         volume = hammer_get_volume(hmp, free_vol_no, &error);
150         KKASSERT(volume != NULL && error == 0);
151
152         uint64_t total_free_bigblocks =
153                 hammer_format_freemap(trans, volume);
154
155         /*
156          * Increase the total number of bigblocks
157          */
158         hammer_modify_volume_field(trans, trans->rootvol,
159                 vol0_stat_bigblocks);
160         trans->rootvol->ondisk->vol0_stat_bigblocks += total_free_bigblocks;
161         hammer_modify_volume_done(trans->rootvol);
162
163         /*
164          * Increase the number of free bigblocks
165          * (including the copy in hmp)
166          */
167         hammer_modify_volume_field(trans, trans->rootvol,
168                 vol0_stat_freebigblocks);
169         trans->rootvol->ondisk->vol0_stat_freebigblocks += total_free_bigblocks;
170         hmp->copy_stat_freebigblocks =
171                 trans->rootvol->ondisk->vol0_stat_freebigblocks;
172         hammer_modify_volume_done(trans->rootvol);
173
174         hammer_rel_volume(volume, 0);
175
176         hammer_unlock(&hmp->blkmap_lock);
177         hammer_sync_unlock(trans);
178
179 end:
180         if (error)
181                 kprintf("An error occurred: %d\n", error);
182         return (error);
183 }
184
185
186 /*
187  * Remove a volume.
188  */
189 int
190 hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
191                 struct hammer_ioc_volume *ioc)
192 {
193         struct hammer_mount *hmp = trans->hmp;
194         struct mount *mp = hmp->mp;
195         hammer_volume_t volume;
196         int error = 0;
197
198         if (mp->mnt_flag & MNT_RDONLY) {
199                 kprintf("Cannot del volume from read-only HAMMER filesystem\n");
200                 return (EINVAL);
201         }
202
203
204         volume = NULL;
205
206         /*
207          * find volume by volname
208          */
209         for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
210                 volume = hammer_get_volume(hmp, vol_no, &error);
211                 if (volume == NULL && error == ENOENT) {
212                         /*
213                          * Skip unused volume numbers
214                          */
215                         error = 0;
216                         continue;
217                 }
218                 KKASSERT(volume != NULL && error == 0);
219                 if (strcmp(volume->vol_name, ioc->device_name) == 0) {
220                         break;
221                 }
222                 hammer_rel_volume(volume, 0);
223                 volume = NULL;
224         }
225
226         if (volume == NULL) {
227                 kprintf("Couldn't find volume\n");
228                 return (EINVAL);
229         }
230
231         if (volume == trans->rootvol) {
232                 kprintf("Cannot remove root-volume\n");
233                 hammer_rel_volume(volume, 0);
234                 return (EINVAL);
235         }
236
237         /*
238          *
239          */
240
241         hmp->volume_to_remove = volume->vol_no;
242
243         struct hammer_ioc_reblock reblock;
244         bzero(&reblock, sizeof(reblock));
245
246         reblock.key_beg.localization = HAMMER_MIN_LOCALIZATION;
247         reblock.key_beg.obj_id = HAMMER_MIN_OBJID;
248         reblock.key_end.localization = HAMMER_MAX_LOCALIZATION;
249         reblock.key_end.obj_id = HAMMER_MAX_OBJID;
250         reblock.head.flags = HAMMER_IOC_DO_FLAGS;
251         reblock.free_level = 0;
252
253         error = hammer_ioc_reblock(trans, ip, &reblock);
254
255         if (reblock.head.flags & HAMMER_IOC_HEAD_INTR) {
256                 error = EINTR;
257         }
258
259         if (error) {
260                 if (error == EINTR) {
261                         kprintf("reblock was interrupted\n");
262                 } else {
263                         kprintf("reblock failed: %d\n", error);
264                 }
265                 hmp->volume_to_remove = -1;
266                 hammer_rel_volume(volume, 0);
267                 return (error);
268         }
269
270         /*
271          * Sync filesystem
272          */
273         int count = 0;
274         while (hammer_flusher_haswork(hmp)) {
275                 hammer_flusher_sync(hmp);
276                 ++count;
277                 if (count >= 5) {
278                         if (count == 5)
279                                 kprintf("HAMMER: flushing.");
280                         else
281                                 kprintf(".");
282                         tsleep(&count, 0, "hmrufl", hz);
283                 }
284                 if (count == 30) {
285                         kprintf("giving up");
286                         break;
287                 }
288         }
289         kprintf("\n");
290
291         hammer_sync_lock_sh(trans);
292         hammer_lock_ex(&hmp->blkmap_lock);
293
294         error = hammer_free_freemap(trans, volume);
295         if (error) {
296                 kprintf("Failed to free volume\n");
297                 hmp->volume_to_remove = -1;
298                 hammer_rel_volume(volume, 0);
299                 hammer_unlock(&hmp->blkmap_lock);
300                 hammer_sync_unlock(trans);
301                 return (error);
302         }
303
304         hmp->volume_to_remove = -1;
305
306         hammer_rel_volume(volume, 0);
307
308         /*
309          * Unload buffers
310          */
311         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
312                 hammer_unload_buffer, volume);
313
314         error = hammer_unload_volume(volume, NULL);
315         if (error == -1) {
316                 kprintf("Failed to unload volume\n");
317                 hammer_unlock(&hmp->blkmap_lock);
318                 hammer_sync_unlock(trans);
319                 return (error);
320         }
321
322         volume = NULL;
323         --hmp->nvolumes;
324
325         /*
326          * Set each volume's new value of the vol_count field.
327          */
328         for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
329                 volume = hammer_get_volume(hmp, vol_no, &error);
330                 if (volume == NULL && error == ENOENT) {
331                         /*
332                          * Skip unused volume numbers
333                          */
334                         error = 0;
335                         continue;
336                 }
337
338                 KKASSERT(volume != NULL && error == 0);
339                 hammer_modify_volume_field(trans, volume, vol_count);
340                 volume->ondisk->vol_count = hmp->nvolumes;
341                 hammer_modify_volume_done(volume);
342
343                 /*
344                  * Only changes to the header of the root volume
345                  * are automatically flushed to disk. For all
346                  * other volumes that we modify we do it here.
347                  */
348                 if (volume != trans->rootvol && volume->io.modified) {
349                         hammer_crc_set_volume(volume->ondisk);
350                         hammer_io_flush(&volume->io, 0);
351                 }
352
353                 hammer_rel_volume(volume, 0);
354         }
355
356         hammer_unlock(&hmp->blkmap_lock);
357         hammer_sync_unlock(trans);
358
359         return (0);
360 }
361
362
363 /*
364  * Iterate over all usable L1 entries of the volume and
365  * the corresponding L2 entries.
366  */
367 static int
368 hammer_iterate_l1l2_entries(hammer_transaction_t trans, hammer_volume_t volume,
369         int (*callback)(hammer_transaction_t, hammer_buffer_t *,
370                 struct hammer_blockmap_layer1*, struct hammer_blockmap_layer2 *,
371                 hammer_off_t, int, void *),
372         void *data)
373 {
374         struct hammer_mount *hmp = trans->hmp;
375         hammer_blockmap_t freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
376         hammer_buffer_t buffer = NULL;
377         int error = 0;
378
379         hammer_off_t phys_off;
380         hammer_off_t block_off;
381         hammer_off_t layer1_off;
382         hammer_off_t layer2_off;
383         hammer_off_t aligned_buf_end_off;
384         struct hammer_blockmap_layer1 *layer1;
385         struct hammer_blockmap_layer2 *layer2;
386
387         /*
388          * Calculate the usable size of the volume, which
389          * must be aligned at a bigblock (8 MB) boundary.
390          */
391         aligned_buf_end_off = (HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
392                 (volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
393                 & ~HAMMER_LARGEBLOCK_MASK64));
394
395         /*
396          * Iterate the volume's address space in chunks of 4 TB, where each
397          * chunk consists of at least one physically available 8 MB bigblock.
398          *
399          * For each chunk we need one L1 entry and one L2 bigblock.
400          * We use the first bigblock of each chunk as L2 block.
401          */
402         for (phys_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 0);
403              phys_off < aligned_buf_end_off;
404              phys_off += HAMMER_BLOCKMAP_LAYER2) {
405                 for (block_off = 0;
406                      block_off < HAMMER_BLOCKMAP_LAYER2;
407                      block_off += HAMMER_LARGEBLOCK_SIZE) {
408                         layer2_off = phys_off +
409                                 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_off);
410                         layer2 = hammer_bread(hmp, layer2_off, &error,
411                                                 &buffer);
412                         if (error)
413                                 goto end;
414
415                         int zone;
416                         if (block_off == 0) {
417                                 /*
418                                  * The first entry represents the L2 bigblock
419                                  * itself.
420                                  */
421                                 zone = HAMMER_ZONE_FREEMAP_INDEX;
422                         } else if (phys_off + block_off < aligned_buf_end_off) {
423                                 /*
424                                  * Available bigblock
425                                  */
426                                 zone = 0;
427                         } else {
428                                 /*
429                                  * Bigblock outside of physically available
430                                  * space
431                                  */
432                                 zone = HAMMER_ZONE_UNAVAIL_INDEX;
433                         }
434
435                         error = callback(trans, &buffer, NULL, layer2, 0, zone,
436                                         data);
437                         if (error)
438                                 goto end;
439                 }
440
441                 layer1_off = freemap->phys_offset +
442                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_off);
443                 layer1 = hammer_bread(hmp, layer1_off, &error, &buffer);
444                 if (error)
445                         goto end;
446
447                 error = callback(trans, &buffer, layer1, NULL, phys_off, 0,
448                                 data);
449                 if (error)
450                         goto end;
451         }
452
453 end:
454         if (buffer) {
455                 hammer_rel_buffer(buffer, 0);
456                 buffer = NULL;
457         }
458
459         return error;
460 }
461
462 struct format_bigblock_stat {
463         uint64_t total_free_bigblocks;
464         uint64_t free_bigblocks;
465 };
466
467 static int
468 format_callback(hammer_transaction_t trans, hammer_buffer_t *bufferp,
469         struct hammer_blockmap_layer1 *layer1,
470         struct hammer_blockmap_layer2 *layer2,
471         hammer_off_t phys_off,
472         int layer2_zone,
473         void *data)
474 {
475         struct format_bigblock_stat *stat = (struct format_bigblock_stat*)data;
476
477         if (layer1) {
478                 KKASSERT(layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL);
479
480                 hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1));
481                 bzero(layer1, sizeof(layer1));
482                 layer1->phys_offset = phys_off;
483                 layer1->blocks_free = stat->free_bigblocks;
484                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
485                 hammer_modify_buffer_done(*bufferp);
486
487                 stat->total_free_bigblocks += stat->free_bigblocks;
488                 stat->free_bigblocks = 0; /* reset */
489         } else if (layer2) {
490                 hammer_modify_buffer(trans, *bufferp, layer2, sizeof(*layer2));
491                 bzero(layer2, sizeof(*layer2));
492
493                 layer2->zone = layer2_zone;
494
495                 switch (layer2->zone) {
496                 case HAMMER_ZONE_FREEMAP_INDEX:
497                         /*
498                          * The first entry represents the L2 bigblock itself.
499                          */
500                         layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
501                         layer2->bytes_free = 0;
502                         break;
503
504                 case 0:
505                         /*
506                          * Available bigblock
507                          */
508                         layer2->append_off = 0;
509                         layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
510                         ++stat->free_bigblocks;
511                         break;
512
513                 case HAMMER_ZONE_UNAVAIL_INDEX:
514                         /*
515                          * Bigblock outside of physically available space
516                          */
517                         layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
518                         layer2->bytes_free = 0;
519                         break;
520                 default:
521                         KKASSERT(0);
522                 }
523
524                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
525                 hammer_modify_buffer_done(*bufferp);
526         } else {
527                 KKASSERT(0);
528         }
529
530         return 0;
531 }
532
533 static uint64_t
534 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume)
535 {
536         int error = 0;
537
538         struct format_bigblock_stat stat;
539         stat.total_free_bigblocks = 0;
540         stat.free_bigblocks = 0;
541
542         error = hammer_iterate_l1l2_entries(trans, volume, format_callback,
543                                         (void*)&stat);
544         KKASSERT(error == 0);
545
546         return stat.total_free_bigblocks;
547 }
548
549 static int
550 free_callback(hammer_transaction_t trans, hammer_buffer_t *bufferp,
551         struct hammer_blockmap_layer1 *layer1,
552         struct hammer_blockmap_layer2 *layer2,
553         hammer_off_t phys_off,
554         int layer2_zone,
555         void *data)
556 {
557         /*
558          * No modifications to ondisk structures
559          */
560         int testonly = (data != NULL);
561
562         if (layer1) {
563                 KKASSERT((int)HAMMER_VOL_DECODE(layer1->phys_offset) ==
564                         trans->hmp->volume_to_remove);
565
566                 if (testonly)
567                         return 0;
568
569                 /*
570                  * Free the L1 entry
571                  */
572                 hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1));
573                 bzero(layer1, sizeof(layer1));
574                 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
575                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
576                 hammer_modify_buffer_done(*bufferp);
577
578                 return 0;
579         } else if (layer2) {
580                 switch (layer2->zone) {
581                 case HAMMER_ZONE_FREEMAP_INDEX:
582                 case HAMMER_ZONE_UNAVAIL_INDEX:
583                         return 0;
584                 case 0:
585                         if (layer2->append_off == 0 &&
586                             layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
587                                 return 0;
588                         } else {
589                                 return EBUSY;
590                         }
591                 default:
592                         return EBUSY;
593                 }
594         } else {
595                 KKASSERT(0);
596         }
597
598         return EINVAL;
599 }
600
601 static int
602 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume)
603 {
604         int error;
605         error = hammer_iterate_l1l2_entries(trans, volume, free_callback,
606                                             (void*)1);
607         if (error)
608                 return error;
609
610         error = hammer_iterate_l1l2_entries(trans, volume, free_callback, NULL);
611         return error;
612 }
613
614 /************************************************************************
615  *                              MISC                                    *
616  ************************************************************************
617  */
618
619 static int
620 hammer_setup_device(struct vnode **devvpp, const char *dev_path, int ronly)
621 {
622         int error;
623         struct nlookupdata nd;
624
625         /*
626          * Get the device vnode
627          */
628         if (*devvpp == NULL) {
629                 error = nlookup_init(&nd, dev_path, UIO_SYSSPACE, NLC_FOLLOW);
630                 if (error == 0)
631                         error = nlookup(&nd);
632                 if (error == 0)
633                         error = cache_vref(&nd.nl_nch, nd.nl_cred, devvpp);
634                 nlookup_done(&nd);
635         } else {
636                 error = 0;
637         }
638
639         if (error == 0) {
640                 if (vn_isdisk(*devvpp, &error)) {
641                         error = vfs_mountedon(*devvpp);
642                 }
643         }
644         if (error == 0 && vcount(*devvpp) > 0)
645                 error = EBUSY;
646         if (error == 0) {
647                 vn_lock(*devvpp, LK_EXCLUSIVE | LK_RETRY);
648                 error = vinvalbuf(*devvpp, V_SAVE, 0, 0);
649                 if (error == 0) {
650                         error = VOP_OPEN(*devvpp,
651                                          (ronly ? FREAD : FREAD|FWRITE),
652                                          FSCRED, NULL);
653                 }
654                 vn_unlock(*devvpp);
655         }
656         if (error && *devvpp) {
657                 vrele(*devvpp);
658                 *devvpp = NULL;
659         }
660         return (error);
661 }
662
663 static void
664 hammer_close_device(struct vnode **devvpp, int ronly)
665 {
666         VOP_CLOSE(*devvpp, (ronly ? FREAD : FREAD|FWRITE));
667         if (*devvpp) {
668                 vinvalbuf(*devvpp, ronly ? 0 : V_SAVE, 0, 0);
669                 vrele(*devvpp);
670                 *devvpp = NULL;
671         }
672 }
673
674 static int
675 hammer_format_volume_header(struct hammer_mount *hmp, struct vnode *devvp,
676         const char *vol_name, int vol_no, int vol_count,
677         int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size)
678 {
679         struct buf *bp = NULL;
680         struct hammer_volume_ondisk *ondisk;
681         int error;
682
683         /*
684          * Extract the volume number from the volume header and do various
685          * sanity checks.
686          */
687         KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk));
688         error = bread(devvp, 0LL, HAMMER_BUFSIZE, &bp);
689         if (error || bp->b_bcount < sizeof(struct hammer_volume_ondisk))
690                 goto late_failure;
691
692         ondisk = (struct hammer_volume_ondisk*) bp->b_data;
693
694         /*
695          * Note that we do NOT allow to use a device that contains
696          * a valid HAMMER signature. It has to be cleaned up with dd
697          * before.
698          */
699         if (ondisk->vol_signature == HAMMER_FSBUF_VOLUME) {
700                 kprintf("hammer_volume_add: Formatting of valid HAMMER volume "
701                         "%s denied. Erase with dd!\n", vol_name);
702                 error = EFTYPE;
703                 goto late_failure;
704         }
705
706         bzero(ondisk, sizeof(struct hammer_volume_ondisk));
707         ksnprintf(ondisk->vol_name, sizeof(ondisk->vol_name), "%s", vol_name);
708         ondisk->vol_fstype = hmp->rootvol->ondisk->vol_fstype;
709         ondisk->vol_signature = HAMMER_FSBUF_VOLUME;
710         ondisk->vol_fsid = hmp->fsid;
711         ondisk->vol_rootvol = hmp->rootvol->vol_no;
712         ondisk->vol_no = vol_no;
713         ondisk->vol_count = vol_count;
714         ondisk->vol_version = hmp->version;
715
716         /*
717          * Reserve space for (future) header junk, setup our poor-man's
718          * bigblock allocator.
719          */
720         int64_t vol_alloc = HAMMER_BUFSIZE * 16;
721
722         ondisk->vol_bot_beg = vol_alloc;
723         vol_alloc += boot_area_size;
724         ondisk->vol_mem_beg = vol_alloc;
725         vol_alloc += mem_area_size;
726
727         /*
728          * The remaining area is the zone 2 buffer allocation area.  These
729          * buffers
730          */
731         ondisk->vol_buf_beg = vol_alloc;
732         ondisk->vol_buf_end = vol_size & ~(int64_t)HAMMER_BUFMASK;
733
734         if (ondisk->vol_buf_end < ondisk->vol_buf_beg) {
735                 kprintf("volume %d %s is too small to hold the volume header",
736                      ondisk->vol_no, ondisk->vol_name);
737                 error = EFTYPE;
738                 goto late_failure;
739         }
740
741         ondisk->vol_nblocks = (ondisk->vol_buf_end - ondisk->vol_buf_beg) /
742                               HAMMER_BUFSIZE;
743         ondisk->vol_blocksize = HAMMER_BUFSIZE;
744
745         /*
746          * Write volume header to disk
747          */
748         error = bwrite(bp);
749         bp = NULL;
750
751 late_failure:
752         if (bp)
753                 brelse(bp);
754         return (error);
755 }