Merge branch 'vendor/OPENSSL'
[dragonfly.git] / sys / vfs / hammer / hammer_volume.c
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com> and
6  * Michael Neumann <mneumann@ntecs.de>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  */
36
37 #include "hammer.h"
38 #include <sys/fcntl.h>
39 #include <sys/nlookup.h>
40 #include <sys/buf.h>
41
42 static int
43 hammer_setup_device(struct vnode **devvpp, const char *dev_path, int ronly);
44
45 static void
46 hammer_close_device(struct vnode **devvpp, int ronly);
47
48 static int
49 hammer_format_volume_header(struct hammer_mount *hmp, struct vnode *devvp,
50         const char *vol_name, int vol_no, int vol_count,
51         int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size);
52
53 static int
54 hammer_clear_volume_header(struct vnode *devvp);
55
56 static uint64_t
57 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume);
58
59 static int
60 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume);
61
62
63 int
64 hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
65                 struct hammer_ioc_volume *ioc)
66 {
67         struct hammer_mount *hmp = trans->hmp;
68         struct mount *mp = hmp->mp;
69         hammer_volume_t volume;
70         int error;
71
72         if (mp->mnt_flag & MNT_RDONLY) {
73                 kprintf("Cannot add volume to read-only HAMMER filesystem\n");
74                 return (EINVAL);
75         }
76
77         if (hmp->nvolumes + 1 >= HAMMER_MAX_VOLUMES) {
78                 kprintf("Max number of HAMMER volumes exceeded\n");
79                 return (EINVAL);
80         }
81
82         if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
83                 kprintf("Another volume operation is in progress!\n");
84                 return (EAGAIN);
85         }
86
87         /*
88          * Find an unused volume number.
89          */
90         int free_vol_no = 0;
91         while (free_vol_no < HAMMER_MAX_VOLUMES &&
92                RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, free_vol_no)) {
93                 ++free_vol_no;
94         }
95         if (free_vol_no >= HAMMER_MAX_VOLUMES) {
96                 kprintf("Max number of HAMMER volumes exceeded\n");
97                 hammer_unlock(&hmp->volume_lock);
98                 return (EINVAL);
99         }
100
101         struct vnode *devvp = NULL;
102         error = hammer_setup_device(&devvp, ioc->device_name, 0);
103         if (error)
104                 goto end;
105         KKASSERT(devvp);
106         error = hammer_format_volume_header(
107                 hmp,
108                 devvp,
109                 hmp->rootvol->ondisk->vol_name,
110                 free_vol_no,
111                 hmp->nvolumes+1,
112                 ioc->vol_size,
113                 ioc->boot_area_size,
114                 ioc->mem_area_size);
115         hammer_close_device(&devvp, 0);
116         if (error)
117                 goto end;
118
119         error = hammer_install_volume(hmp, ioc->device_name, NULL);
120         if (error)
121                 goto end;
122
123         hammer_sync_lock_sh(trans);
124         hammer_lock_ex(&hmp->blkmap_lock);
125
126         ++hmp->nvolumes;
127
128         /*
129          * Set each volumes new value of the vol_count field.
130          */
131         for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
132                 volume = hammer_get_volume(hmp, vol_no, &error);
133                 if (volume == NULL && error == ENOENT) {
134                         /*
135                          * Skip unused volume numbers
136                          */
137                         error = 0;
138                         continue;
139                 }
140                 KKASSERT(volume != NULL && error == 0);
141                 hammer_modify_volume_field(trans, volume, vol_count);
142                 volume->ondisk->vol_count = hmp->nvolumes;
143                 hammer_modify_volume_done(volume);
144
145                 /*
146                  * Only changes to the header of the root volume
147                  * are automatically flushed to disk. For all
148                  * other volumes that we modify we do it here.
149                  */
150                 if (volume != trans->rootvol && volume->io.modified) {
151                         hammer_crc_set_volume(volume->ondisk);
152                         hammer_io_flush(&volume->io, 0);
153                 }
154
155                 hammer_rel_volume(volume, 0);
156         }
157
158         volume = hammer_get_volume(hmp, free_vol_no, &error);
159         KKASSERT(volume != NULL && error == 0);
160
161         uint64_t total_free_bigblocks =
162                 hammer_format_freemap(trans, volume);
163
164         /*
165          * Increase the total number of bigblocks
166          */
167         hammer_modify_volume_field(trans, trans->rootvol,
168                 vol0_stat_bigblocks);
169         trans->rootvol->ondisk->vol0_stat_bigblocks += total_free_bigblocks;
170         hammer_modify_volume_done(trans->rootvol);
171
172         /*
173          * Increase the number of free bigblocks
174          * (including the copy in hmp)
175          */
176         hammer_modify_volume_field(trans, trans->rootvol,
177                 vol0_stat_freebigblocks);
178         trans->rootvol->ondisk->vol0_stat_freebigblocks += total_free_bigblocks;
179         hmp->copy_stat_freebigblocks =
180                 trans->rootvol->ondisk->vol0_stat_freebigblocks;
181         hammer_modify_volume_done(trans->rootvol);
182
183         hammer_rel_volume(volume, 0);
184
185         hammer_unlock(&hmp->blkmap_lock);
186         hammer_sync_unlock(trans);
187
188         KKASSERT(error == 0);
189 end:
190         hammer_unlock(&hmp->volume_lock);
191         if (error)
192                 kprintf("An error occurred: %d\n", error);
193         return (error);
194 }
195
196
197 /*
198  * Remove a volume.
199  */
200 int
201 hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
202                 struct hammer_ioc_volume *ioc)
203 {
204         struct hammer_mount *hmp = trans->hmp;
205         struct mount *mp = hmp->mp;
206         hammer_volume_t volume;
207         int error = 0;
208
209         if (mp->mnt_flag & MNT_RDONLY) {
210                 kprintf("Cannot del volume from read-only HAMMER filesystem\n");
211                 return (EINVAL);
212         }
213
214         if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
215                 kprintf("Another volume operation is in progress!\n");
216                 return (EAGAIN);
217         }
218
219         volume = NULL;
220
221         /*
222          * find volume by volname
223          */
224         for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
225                 volume = hammer_get_volume(hmp, vol_no, &error);
226                 if (volume == NULL && error == ENOENT) {
227                         /*
228                          * Skip unused volume numbers
229                          */
230                         error = 0;
231                         continue;
232                 }
233                 KKASSERT(volume != NULL && error == 0);
234                 if (strcmp(volume->vol_name, ioc->device_name) == 0) {
235                         break;
236                 }
237                 hammer_rel_volume(volume, 0);
238                 volume = NULL;
239         }
240
241         if (volume == NULL) {
242                 kprintf("Couldn't find volume\n");
243                 error = EINVAL;
244                 goto end;
245         }
246
247         if (volume == trans->rootvol) {
248                 kprintf("Cannot remove root-volume\n");
249                 hammer_rel_volume(volume, 0);
250                 error = EINVAL;
251                 goto end;
252         }
253
254         /*
255          *
256          */
257
258         hmp->volume_to_remove = volume->vol_no;
259
260         struct hammer_ioc_reblock reblock;
261         bzero(&reblock, sizeof(reblock));
262
263         reblock.key_beg.localization = HAMMER_MIN_LOCALIZATION;
264         reblock.key_beg.obj_id = HAMMER_MIN_OBJID;
265         reblock.key_end.localization = HAMMER_MAX_LOCALIZATION;
266         reblock.key_end.obj_id = HAMMER_MAX_OBJID;
267         reblock.head.flags = HAMMER_IOC_DO_FLAGS;
268         reblock.free_level = 0;
269
270         error = hammer_ioc_reblock(trans, ip, &reblock);
271
272         if (reblock.head.flags & HAMMER_IOC_HEAD_INTR) {
273                 error = EINTR;
274         }
275
276         if (error) {
277                 if (error == EINTR) {
278                         kprintf("reblock was interrupted\n");
279                 } else {
280                         kprintf("reblock failed: %d\n", error);
281                 }
282                 hmp->volume_to_remove = -1;
283                 hammer_rel_volume(volume, 0);
284                 goto end;
285         }
286
287         /*
288          * Sync filesystem
289          */
290         int count = 0;
291         while (hammer_flusher_haswork(hmp)) {
292                 hammer_flusher_sync(hmp);
293                 ++count;
294                 if (count >= 5) {
295                         if (count == 5)
296                                 kprintf("HAMMER: flushing.");
297                         else
298                                 kprintf(".");
299                         tsleep(&count, 0, "hmrufl", hz);
300                 }
301                 if (count == 30) {
302                         kprintf("giving up");
303                         break;
304                 }
305         }
306         kprintf("\n");
307
308         hammer_sync_lock_sh(trans);
309         hammer_lock_ex(&hmp->blkmap_lock);
310
311         error = hammer_free_freemap(trans, volume);
312         if (error) {
313                 kprintf("Failed to free volume. Volume not empty!\n");
314                 hmp->volume_to_remove = -1;
315                 hammer_rel_volume(volume, 0);
316                 hammer_unlock(&hmp->blkmap_lock);
317                 hammer_sync_unlock(trans);
318                 goto end;
319         }
320
321         hmp->volume_to_remove = -1;
322
323         hammer_rel_volume(volume, 0);
324
325         /*
326          * Unload buffers
327          */
328         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
329                 hammer_unload_buffer, volume);
330
331         error = hammer_unload_volume(volume, NULL);
332         if (error == -1) {
333                 kprintf("Failed to unload volume\n");
334                 hammer_unlock(&hmp->blkmap_lock);
335                 hammer_sync_unlock(trans);
336                 goto end;
337         }
338
339         volume = NULL;
340         --hmp->nvolumes;
341
342         /*
343          * Set each volume's new value of the vol_count field.
344          */
345         for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
346                 volume = hammer_get_volume(hmp, vol_no, &error);
347                 if (volume == NULL && error == ENOENT) {
348                         /*
349                          * Skip unused volume numbers
350                          */
351                         error = 0;
352                         continue;
353                 }
354
355                 KKASSERT(volume != NULL && error == 0);
356                 hammer_modify_volume_field(trans, volume, vol_count);
357                 volume->ondisk->vol_count = hmp->nvolumes;
358                 hammer_modify_volume_done(volume);
359
360                 /*
361                  * Only changes to the header of the root volume
362                  * are automatically flushed to disk. For all
363                  * other volumes that we modify we do it here.
364                  */
365                 if (volume != trans->rootvol && volume->io.modified) {
366                         hammer_crc_set_volume(volume->ondisk);
367                         hammer_io_flush(&volume->io, 0);
368                 }
369
370                 hammer_rel_volume(volume, 0);
371         }
372
373         hammer_unlock(&hmp->blkmap_lock);
374         hammer_sync_unlock(trans);
375
376         /*
377          * Erase the volume header of the removed device.
378          *
379          * This is to not accidentally mount the volume again.
380          */
381         struct vnode *devvp = NULL;
382         error = hammer_setup_device(&devvp, ioc->device_name, 0);
383         if (error) {
384                 kprintf("Failed to open device: %s\n", ioc->device_name);
385                 goto end;
386         }
387         KKASSERT(devvp);
388         error = hammer_clear_volume_header(devvp);
389         if (error) {
390                 kprintf("Failed to clear volume header of device: %s\n",
391                         ioc->device_name);
392                 goto end;
393         }
394         hammer_close_device(&devvp, 0);
395
396         KKASSERT(error == 0);
397 end:
398         hammer_unlock(&hmp->volume_lock);
399         return (error);
400 }
401
402
403 /*
404  * Iterate over all usable L1 entries of the volume and
405  * the corresponding L2 entries.
406  */
407 static int
408 hammer_iterate_l1l2_entries(hammer_transaction_t trans, hammer_volume_t volume,
409         int (*callback)(hammer_transaction_t, hammer_volume_t, hammer_buffer_t*,
410                 struct hammer_blockmap_layer1*, struct hammer_blockmap_layer2*,
411                 hammer_off_t, hammer_off_t, void*),
412         void *data)
413 {
414         struct hammer_mount *hmp = trans->hmp;
415         hammer_blockmap_t freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
416         hammer_buffer_t buffer = NULL;
417         int error = 0;
418
419         hammer_off_t phys_off;
420         hammer_off_t block_off;
421         hammer_off_t layer1_off;
422         hammer_off_t layer2_off;
423         hammer_off_t aligned_buf_end_off;
424         struct hammer_blockmap_layer1 *layer1;
425         struct hammer_blockmap_layer2 *layer2;
426
427         /*
428          * Calculate the usable size of the volume, which
429          * must be aligned at a bigblock (8 MB) boundary.
430          */
431         aligned_buf_end_off = (HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
432                 (volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
433                 & ~HAMMER_LARGEBLOCK_MASK64));
434
435         /*
436          * Iterate the volume's address space in chunks of 4 TB, where each
437          * chunk consists of at least one physically available 8 MB bigblock.
438          *
439          * For each chunk we need one L1 entry and one L2 bigblock.
440          * We use the first bigblock of each chunk as L2 block.
441          */
442         for (phys_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 0);
443              phys_off < aligned_buf_end_off;
444              phys_off += HAMMER_BLOCKMAP_LAYER2) {
445                 for (block_off = 0;
446                      block_off < HAMMER_BLOCKMAP_LAYER2;
447                      block_off += HAMMER_LARGEBLOCK_SIZE) {
448                         layer2_off = phys_off +
449                                 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_off);
450                         layer2 = hammer_bread(hmp, layer2_off, &error, &buffer);
451                         if (error)
452                                 goto end;
453
454                         error = callback(trans, volume, &buffer, NULL,
455                                          layer2, phys_off, block_off, data);
456                         if (error)
457                                 goto end;
458                 }
459
460                 layer1_off = freemap->phys_offset +
461                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_off);
462                 layer1 = hammer_bread(hmp, layer1_off, &error, &buffer);
463                 if (error)
464                         goto end;
465
466                 error = callback(trans, volume, &buffer, layer1, NULL,
467                                  phys_off, 0, data);
468                 if (error)
469                         goto end;
470         }
471
472 end:
473         if (buffer) {
474                 hammer_rel_buffer(buffer, 0);
475                 buffer = NULL;
476         }
477
478         return error;
479 }
480
481 struct format_bigblock_stat {
482         uint64_t total_free_bigblocks;
483         uint64_t free_bigblocks;
484 };
485
486 static int
487 format_callback(hammer_transaction_t trans, hammer_volume_t volume,
488         hammer_buffer_t *bufferp,
489         struct hammer_blockmap_layer1 *layer1,
490         struct hammer_blockmap_layer2 *layer2,
491         hammer_off_t phys_off,
492         hammer_off_t block_off,
493         void *data)
494 {
495         struct format_bigblock_stat *stat = (struct format_bigblock_stat*)data;
496
497         /*
498          * Calculate the usable size of the volume, which must be aligned
499          * at a bigblock (8 MB) boundary.
500          */
501         hammer_off_t aligned_buf_end_off;
502         aligned_buf_end_off = (HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
503                 (volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
504                 & ~HAMMER_LARGEBLOCK_MASK64));
505
506         if (layer1) {
507                 KKASSERT(layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL);
508
509                 hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1));
510                 bzero(layer1, sizeof(layer1));
511                 layer1->phys_offset = phys_off;
512                 layer1->blocks_free = stat->free_bigblocks;
513                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
514                 hammer_modify_buffer_done(*bufferp);
515
516                 stat->total_free_bigblocks += stat->free_bigblocks;
517                 stat->free_bigblocks = 0; /* reset */
518         } else if (layer2) {
519                 hammer_modify_buffer(trans, *bufferp, layer2, sizeof(*layer2));
520                 bzero(layer2, sizeof(*layer2));
521
522                 if (block_off == 0) {
523                         /*
524                          * The first entry represents the L2 bigblock itself.
525                          */
526                         layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
527                         layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
528                         layer2->bytes_free = 0;
529                 } else if (phys_off + block_off < aligned_buf_end_off) {
530                         /*
531                          * Available bigblock
532                          */
533                         layer2->zone = 0;
534                         layer2->append_off = 0;
535                         layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
536                         ++stat->free_bigblocks;
537                 } else {
538                         /*
539                          * Bigblock outside of physically available
540                          * space
541                          */
542                         layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
543                         layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
544                         layer2->bytes_free = 0;
545                 }
546
547                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
548                 hammer_modify_buffer_done(*bufferp);
549         } else {
550                 KKASSERT(0);
551         }
552
553         return 0;
554 }
555
556 static uint64_t
557 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume)
558 {
559         int error = 0;
560
561         struct format_bigblock_stat stat;
562         stat.total_free_bigblocks = 0;
563         stat.free_bigblocks = 0;
564
565         error = hammer_iterate_l1l2_entries(trans, volume, format_callback,
566                                         (void*)&stat);
567         KKASSERT(error == 0);
568
569         return stat.total_free_bigblocks;
570 }
571
572 static int
573 free_callback(hammer_transaction_t trans, hammer_volume_t volume __unused,
574         hammer_buffer_t *bufferp,
575         struct hammer_blockmap_layer1 *layer1,
576         struct hammer_blockmap_layer2 *layer2,
577         hammer_off_t phys_off,
578         hammer_off_t block_off __unused,
579         void *data)
580 {
581         /*
582          * No modifications to ondisk structures
583          */
584         int testonly = (data != NULL);
585
586         if (layer1) {
587                 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
588                         /*
589                          * This layer1 entry is already free.
590                          */
591                         return 0;
592                 }
593
594                 KKASSERT((int)HAMMER_VOL_DECODE(layer1->phys_offset) ==
595                         trans->hmp->volume_to_remove);
596
597                 if (testonly)
598                         return 0;
599
600                 /*
601                  * Free the L1 entry
602                  */
603                 hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1));
604                 bzero(layer1, sizeof(layer1));
605                 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
606                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
607                 hammer_modify_buffer_done(*bufferp);
608
609                 return 0;
610         } else if (layer2) {
611                 if (layer2->zone == HAMMER_ZONE_FREEMAP_INDEX ||
612                     layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX)
613                         return 0;
614
615                 if (layer2->append_off == 0 &&
616                     layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE)
617                         return 0;
618                 /*
619                  * We found a layer2 entry that is not empty!
620                  */
621                 return EBUSY;
622         } else {
623                 KKASSERT(0);
624         }
625
626         return EINVAL;
627 }
628
629 static int
630 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume)
631 {
632         int error;
633         error = hammer_iterate_l1l2_entries(trans, volume, free_callback,
634                                             (void*)1);
635         if (error)
636                 return error;
637
638         error = hammer_iterate_l1l2_entries(trans, volume, free_callback, NULL);
639         return error;
640 }
641
642 /************************************************************************
643  *                              MISC                                    *
644  ************************************************************************
645  */
646
647 static int
648 hammer_setup_device(struct vnode **devvpp, const char *dev_path, int ronly)
649 {
650         int error;
651         struct nlookupdata nd;
652
653         /*
654          * Get the device vnode
655          */
656         if (*devvpp == NULL) {
657                 error = nlookup_init(&nd, dev_path, UIO_SYSSPACE, NLC_FOLLOW);
658                 if (error == 0)
659                         error = nlookup(&nd);
660                 if (error == 0)
661                         error = cache_vref(&nd.nl_nch, nd.nl_cred, devvpp);
662                 nlookup_done(&nd);
663         } else {
664                 error = 0;
665         }
666
667         if (error == 0) {
668                 if (vn_isdisk(*devvpp, &error)) {
669                         error = vfs_mountedon(*devvpp);
670                 }
671         }
672         if (error == 0 && vcount(*devvpp) > 0)
673                 error = EBUSY;
674         if (error == 0) {
675                 vn_lock(*devvpp, LK_EXCLUSIVE | LK_RETRY);
676                 error = vinvalbuf(*devvpp, V_SAVE, 0, 0);
677                 if (error == 0) {
678                         error = VOP_OPEN(*devvpp,
679                                          (ronly ? FREAD : FREAD|FWRITE),
680                                          FSCRED, NULL);
681                 }
682                 vn_unlock(*devvpp);
683         }
684         if (error && *devvpp) {
685                 vrele(*devvpp);
686                 *devvpp = NULL;
687         }
688         return (error);
689 }
690
691 static void
692 hammer_close_device(struct vnode **devvpp, int ronly)
693 {
694         VOP_CLOSE(*devvpp, (ronly ? FREAD : FREAD|FWRITE));
695         if (*devvpp) {
696                 vinvalbuf(*devvpp, ronly ? 0 : V_SAVE, 0, 0);
697                 vrele(*devvpp);
698                 *devvpp = NULL;
699         }
700 }
701
702 static int
703 hammer_format_volume_header(struct hammer_mount *hmp, struct vnode *devvp,
704         const char *vol_name, int vol_no, int vol_count,
705         int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size)
706 {
707         struct buf *bp = NULL;
708         struct hammer_volume_ondisk *ondisk;
709         int error;
710
711         /*
712          * Extract the volume number from the volume header and do various
713          * sanity checks.
714          */
715         KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk));
716         error = bread(devvp, 0LL, HAMMER_BUFSIZE, &bp);
717         if (error || bp->b_bcount < sizeof(struct hammer_volume_ondisk))
718                 goto late_failure;
719
720         ondisk = (struct hammer_volume_ondisk*) bp->b_data;
721
722         /*
723          * Note that we do NOT allow to use a device that contains
724          * a valid HAMMER signature. It has to be cleaned up with dd
725          * before.
726          */
727         if (ondisk->vol_signature == HAMMER_FSBUF_VOLUME) {
728                 kprintf("hammer_volume_add: Formatting of valid HAMMER volume "
729                         "%s denied. Erase with dd!\n", vol_name);
730                 error = EFTYPE;
731                 goto late_failure;
732         }
733
734         bzero(ondisk, sizeof(struct hammer_volume_ondisk));
735         ksnprintf(ondisk->vol_name, sizeof(ondisk->vol_name), "%s", vol_name);
736         ondisk->vol_fstype = hmp->rootvol->ondisk->vol_fstype;
737         ondisk->vol_signature = HAMMER_FSBUF_VOLUME;
738         ondisk->vol_fsid = hmp->fsid;
739         ondisk->vol_rootvol = hmp->rootvol->vol_no;
740         ondisk->vol_no = vol_no;
741         ondisk->vol_count = vol_count;
742         ondisk->vol_version = hmp->version;
743
744         /*
745          * Reserve space for (future) header junk, setup our poor-man's
746          * bigblock allocator.
747          */
748         int64_t vol_alloc = HAMMER_BUFSIZE * 16;
749
750         ondisk->vol_bot_beg = vol_alloc;
751         vol_alloc += boot_area_size;
752         ondisk->vol_mem_beg = vol_alloc;
753         vol_alloc += mem_area_size;
754
755         /*
756          * The remaining area is the zone 2 buffer allocation area.  These
757          * buffers
758          */
759         ondisk->vol_buf_beg = vol_alloc;
760         ondisk->vol_buf_end = vol_size & ~(int64_t)HAMMER_BUFMASK;
761
762         if (ondisk->vol_buf_end < ondisk->vol_buf_beg) {
763                 kprintf("volume %d %s is too small to hold the volume header",
764                      ondisk->vol_no, ondisk->vol_name);
765                 error = EFTYPE;
766                 goto late_failure;
767         }
768
769         ondisk->vol_nblocks = (ondisk->vol_buf_end - ondisk->vol_buf_beg) /
770                               HAMMER_BUFSIZE;
771         ondisk->vol_blocksize = HAMMER_BUFSIZE;
772
773         /*
774          * Write volume header to disk
775          */
776         error = bwrite(bp);
777         bp = NULL;
778
779 late_failure:
780         if (bp)
781                 brelse(bp);
782         return (error);
783 }
784
785 /*
786  * Invalidates the volume header. Used by volume-del.
787  */
788 static int
789 hammer_clear_volume_header(struct vnode *devvp)
790 {
791         struct buf *bp = NULL;
792         struct hammer_volume_ondisk *ondisk;
793         int error;
794
795         KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk));
796         error = bread(devvp, 0LL, HAMMER_BUFSIZE, &bp);
797         if (error || bp->b_bcount < sizeof(struct hammer_volume_ondisk))
798                 goto late_failure;
799
800         ondisk = (struct hammer_volume_ondisk*) bp->b_data;
801         bzero(ondisk, sizeof(struct hammer_volume_ondisk));
802
803         error = bwrite(bp);
804         bp = NULL;
805
806 late_failure:
807         if (bp)
808                 brelse(bp);
809         return (error);
810 }