Merge branch 'vendor/DHCPCD'
[dragonfly.git] / sys / vfs / hammer / hammer_volume.c
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com> and
6  * Michael Neumann <mneumann@ntecs.de>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  */
36
37 #include "hammer.h"
38
39 static int
40 hammer_format_volume_header(hammer_mount_t hmp,
41         struct hammer_ioc_volume *ioc,
42         hammer_volume_ondisk_t ondisk,
43         int vol_no);
44
45 static int
46 hammer_update_volumes_header(hammer_transaction_t trans,
47         int64_t total_bigblocks, int64_t empty_bigblocks);
48
49 static int
50 hammer_do_reblock(hammer_transaction_t trans, hammer_inode_t ip);
51
52 static int
53 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume);
54
55 static int
56 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume);
57
58 static int
59 hammer_count_bigblocks(hammer_mount_t hmp, hammer_volume_t volume,
60         int64_t *total_bigblocks, int64_t *empty_bigblocks);
61
62 int
63 hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
64                 struct hammer_ioc_volume *ioc)
65 {
66         hammer_mount_t hmp = trans->hmp;
67         struct mount *mp = hmp->mp;
68         struct hammer_volume_ondisk ondisk;
69         hammer_volume_t volume;
70         int64_t total_bigblocks, empty_bigblocks;
71         int free_vol_no = 0;
72         int error;
73
74         if (mp->mnt_flag & MNT_RDONLY) {
75                 hmkprintf(hmp, "Cannot add volume to read-only HAMMER filesystem\n");
76                 return (EINVAL);
77         }
78
79         if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
80                 hmkprintf(hmp, "Another volume operation is in progress!\n");
81                 return (EAGAIN);
82         }
83
84         if (hmp->nvolumes >= HAMMER_MAX_VOLUMES) {
85                 hammer_unlock(&hmp->volume_lock);
86                 hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n");
87                 return (EINVAL);
88         }
89
90         /*
91          * Find an unused volume number.
92          */
93         while (free_vol_no < HAMMER_MAX_VOLUMES &&
94                 hammer_volume_number_test(hmp, free_vol_no)) {
95                 ++free_vol_no;
96         }
97         if (free_vol_no >= HAMMER_MAX_VOLUMES) {
98                 hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n");
99                 error = EINVAL;
100                 goto end;
101         }
102
103         error = hammer_format_volume_header(hmp, ioc, &ondisk, free_vol_no);
104         if (error)
105                 goto end;
106
107         error = hammer_install_volume(hmp, ioc->device_name, NULL, &ondisk);
108         if (error)
109                 goto end;
110
111         hammer_sync_lock_sh(trans);
112         hammer_lock_ex(&hmp->blkmap_lock);
113
114         volume = hammer_get_volume(hmp, free_vol_no, &error);
115         KKASSERT(volume != NULL && error == 0);
116
117         error = hammer_format_freemap(trans, volume);
118         KKASSERT(error == 0);
119
120         error = hammer_count_bigblocks(hmp, volume,
121                         &total_bigblocks, &empty_bigblocks);
122         KKASSERT(error == 0);
123         KKASSERT(total_bigblocks == empty_bigblocks);
124
125         hammer_rel_volume(volume, 0);
126
127         ++hmp->nvolumes;
128         error = hammer_update_volumes_header(trans,
129                         total_bigblocks, empty_bigblocks);
130         KKASSERT(error == 0);
131
132         hammer_unlock(&hmp->blkmap_lock);
133         hammer_sync_unlock(trans);
134
135         KKASSERT(error == 0);
136 end:
137         hammer_unlock(&hmp->volume_lock);
138         if (error)
139                 hmkprintf(hmp, "An error occurred: %d\n", error);
140         return (error);
141 }
142
143
144 /*
145  * Remove a volume.
146  */
147 int
148 hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
149                 struct hammer_ioc_volume *ioc)
150 {
151         hammer_mount_t hmp = trans->hmp;
152         struct mount *mp = hmp->mp;
153         struct hammer_volume_ondisk ondisk;
154         hammer_volume_t volume;
155         int64_t total_bigblocks, empty_bigblocks;
156         int vol_no;
157         int error = 0;
158
159         if (mp->mnt_flag & MNT_RDONLY) {
160                 hmkprintf(hmp, "Cannot del volume from read-only HAMMER filesystem\n");
161                 return (EINVAL);
162         }
163
164         if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
165                 hmkprintf(hmp, "Another volume operation is in progress!\n");
166                 return (EAGAIN);
167         }
168
169         if (hmp->nvolumes <= 1) {
170                 hammer_unlock(&hmp->volume_lock);
171                 hmkprintf(hmp, "No HAMMER volume to delete\n");
172                 return (EINVAL);
173         }
174
175         /*
176          * find volume by volname
177          */
178         volume = NULL;
179         HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) {
180                 volume = hammer_get_volume(hmp, vol_no, &error);
181                 KKASSERT(volume != NULL && error == 0);
182                 if (strcmp(volume->vol_name, ioc->device_name) == 0) {
183                         break;
184                 }
185                 hammer_rel_volume(volume, 0);
186                 volume = NULL;
187         }
188
189         if (volume == NULL) {
190                 hmkprintf(hmp, "Couldn't find volume\n");
191                 error = EINVAL;
192                 goto end;
193         }
194
195         if (volume == trans->rootvol) {
196                 hmkprintf(hmp, "Cannot remove root-volume\n");
197                 hammer_rel_volume(volume, 0);
198                 error = EINVAL;
199                 goto end;
200         }
201
202         /*
203          * Reblock filesystem if the volume is not empty
204          */
205         hmp->volume_to_remove = volume->vol_no;
206
207         error = hammer_count_bigblocks(hmp, volume,
208                         &total_bigblocks, &empty_bigblocks);
209         KKASSERT(error == 0);
210
211         if (total_bigblocks == empty_bigblocks) {
212                 hmkprintf(hmp, "%s is already empty\n", volume->vol_name);
213         } else if (ioc->flag & HAMMER_IOC_VOLUME_REBLOCK) {
214                 error = hammer_do_reblock(trans, ip);
215                 if (error) {
216                         hmp->volume_to_remove = -1;
217                         hammer_rel_volume(volume, 0);
218                         goto end;
219                 }
220         } else {
221                 hmkprintf(hmp, "%s is not empty\n", volume->vol_name);
222                 hammer_rel_volume(volume, 0);
223                 error = ENOTEMPTY;
224                 goto end;
225         }
226
227         hammer_sync_lock_sh(trans);
228         hammer_lock_ex(&hmp->blkmap_lock);
229
230         error = hammer_count_bigblocks(hmp, volume,
231                         &total_bigblocks, &empty_bigblocks);
232         KKASSERT(error == 0);
233
234         error = hammer_free_freemap(trans, volume);
235         if (error) {
236                 hmkprintf(hmp, "Failed to free volume: ");
237                 if (error == EBUSY)
238                         kprintf("Volume %d not empty\n", volume->vol_no);
239                 else
240                         kprintf("%d\n", error);
241                 hmp->volume_to_remove = -1;
242                 hammer_rel_volume(volume, 0);
243                 goto end1;
244         }
245         hammer_rel_volume(volume, 0);
246
247         /*
248          * XXX: Temporary solution for
249          * http://lists.dragonflybsd.org/pipermail/kernel/2015-August/175027.html
250          */
251         hammer_unlock(&hmp->blkmap_lock);
252         hammer_sync_unlock(trans);
253         hammer_flusher_sync(hmp); /* 1 */
254         hammer_flusher_sync(hmp); /* 2 */
255         hammer_flusher_sync(hmp); /* 3 */
256         hammer_sync_lock_sh(trans);
257         hammer_lock_ex(&hmp->blkmap_lock);
258
259         /*
260          * Unload buffers
261          */
262         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
263                 hammer_unload_buffer, volume);
264
265         bzero(&ondisk, sizeof(ondisk));
266         error = hammer_unload_volume(volume, &ondisk);
267         if (error == -1) {
268                 hmkprintf(hmp, "Failed to unload volume\n");
269                 goto end1;
270         }
271
272         --hmp->nvolumes;
273         error = hammer_update_volumes_header(trans,
274                         -total_bigblocks, -empty_bigblocks);
275         KKASSERT(error == 0);
276         hmp->volume_to_remove = -1;
277
278 end1:
279         hammer_unlock(&hmp->blkmap_lock);
280         hammer_sync_unlock(trans);
281
282 end:
283         hammer_unlock(&hmp->volume_lock);
284         if (error)
285                 hmkprintf(hmp, "An error occurred: %d\n", error);
286         return (error);
287 }
288
289
290 int
291 hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip,
292     struct hammer_ioc_volume_list *ioc)
293 {
294         hammer_mount_t hmp = trans->hmp;
295         hammer_volume_t volume;
296         int32_t vol_no;
297         int error = 0;
298         int i, len, cnt = 0;
299
300         if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
301                 hmkprintf(hmp, "Another volume operation is in progress!\n");
302                 return (EAGAIN);
303         }
304
305         HAMMER_VOLUME_NUMBER_FOREACH(hmp, i) {
306                 if (cnt >= ioc->nvols)
307                         break;
308                 volume = hammer_get_volume(hmp, i, &error);
309                 KKASSERT(volume != NULL && error == 0);
310
311                 len = strlen(volume->vol_name) + 1;
312                 KKASSERT(len <= MAXPATHLEN);
313
314                 vol_no = volume->vol_no;
315                 error = copyout(&vol_no, &ioc->vols[cnt].vol_no,
316                                 sizeof(ioc->vols[cnt].vol_no));
317                 if (error == 0)
318                         error = copyout(volume->vol_name,
319                                         &ioc->vols[cnt].device_name[0], len);
320                 hammer_rel_volume(volume, 0);
321                 if (error)
322                         goto end;
323                 cnt++;
324         }
325         ioc->nvols = cnt;
326
327 end:
328         hammer_unlock(&hmp->volume_lock);
329         return (error);
330 }
331
332 static
333 int
334 hammer_do_reblock(hammer_transaction_t trans, hammer_inode_t ip)
335 {
336         hammer_mount_t hmp = trans->hmp;
337         int error;
338         int vol_no;
339
340         struct hammer_ioc_reblock reblock;
341         bzero(&reblock, sizeof(reblock));
342
343         vol_no = trans->hmp->volume_to_remove;
344         KKASSERT(vol_no != -1);
345
346         reblock.key_beg.localization = HAMMER_MIN_LOCALIZATION;
347         reblock.key_beg.obj_id = HAMMER_MIN_OBJID;
348         reblock.key_end.localization = HAMMER_MAX_LOCALIZATION;
349         reblock.key_end.obj_id = HAMMER_MAX_OBJID;
350         reblock.head.flags = HAMMER_IOC_DO_FLAGS;
351         reblock.free_level = 0; /* reblock all big-blocks */
352         reblock.allpfs = 1;     /* reblock all PFS */
353         reblock.vol_no = vol_no;
354
355         hmkprintf(hmp, "reblock started\n");
356         error = hammer_ioc_reblock(trans, ip, &reblock);
357
358         if (reblock.head.flags & HAMMER_IOC_HEAD_INTR) {
359                 error = EINTR;
360         }
361
362         if (error) {
363                 if (error == EINTR) {
364                         hmkprintf(hmp, "reblock was interrupted\n");
365                 } else {
366                         hmkprintf(hmp, "reblock failed: %d\n", error);
367                 }
368                 return(error);
369         }
370
371         return(0);
372 }
373
374 /*
375  * XXX This somehow needs to stop doing hammer_modify_buffer() for
376  * layer2 entries.  In theory adding a large block device could
377  * blow away UNDO fifo.  The best way is to format layer2 entries
378  * in userspace without UNDO getting involved before the device is
379  * safely added to the filesystem.  HAMMER has no interest in what
380  * has happened to the device before it safely joins the filesystem.
381  */
382 static int
383 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume)
384 {
385         hammer_mount_t hmp = trans->hmp;
386         hammer_volume_ondisk_t ondisk;
387         hammer_blockmap_t freemap;
388         hammer_off_t alloc_offset;
389         hammer_off_t phys_offset;
390         hammer_off_t block_offset;
391         hammer_off_t layer1_offset;
392         hammer_off_t layer2_offset;
393         hammer_off_t vol_free_end;
394         hammer_off_t aligned_vol_free_end;
395         hammer_blockmap_layer1_t layer1;
396         hammer_blockmap_layer2_t layer2;
397         hammer_buffer_t buffer1 = NULL;
398         hammer_buffer_t buffer2 = NULL;
399         int64_t vol_buf_size;
400         int64_t layer1_count = 0;
401         int error = 0;
402
403         KKASSERT(volume->vol_no != HAMMER_ROOT_VOLNO);
404
405         ondisk = volume->ondisk;
406         vol_buf_size = HAMMER_VOL_BUF_SIZE(ondisk);
407         KKASSERT((vol_buf_size & ~HAMMER_OFF_SHORT_MASK) == 0);
408         vol_free_end = HAMMER_ENCODE_RAW_BUFFER(ondisk->vol_no,
409                         vol_buf_size & ~HAMMER_BIGBLOCK_MASK64);
410         aligned_vol_free_end = HAMMER_BLOCKMAP_LAYER2_DOALIGN(vol_free_end);
411
412         freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
413         alloc_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
414
415         hmkprintf(hmp, "Initialize freemap volume %d\n", volume->vol_no);
416
417         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
418              phys_offset < aligned_vol_free_end;
419              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
420                 layer1_offset = freemap->phys_offset +
421                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
422                 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
423                 if (error)
424                         goto end;
425                 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
426                         hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
427                         bzero(layer1, sizeof(*layer1));
428                         layer1->phys_offset = alloc_offset;
429                         layer1->blocks_free = 0;
430                         hammer_crc_set_layer1(hmp->version, layer1);
431                         hammer_modify_buffer_done(buffer1);
432                         alloc_offset += HAMMER_BIGBLOCK_SIZE;
433                 }
434         }
435
436         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
437              phys_offset < aligned_vol_free_end;
438              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
439                 layer1_count = 0;
440                 layer1_offset = freemap->phys_offset +
441                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
442                 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
443                 if (error)
444                         goto end;
445                 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
446
447                 for (block_offset = 0;
448                      block_offset < HAMMER_BLOCKMAP_LAYER2;
449                      block_offset += HAMMER_BIGBLOCK_SIZE) {
450                         layer2_offset = layer1->phys_offset +
451                                         HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
452                         layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
453                         if (error)
454                                 goto end;
455
456                         hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
457                         bzero(layer2, sizeof(*layer2));
458
459                         if (phys_offset + block_offset < alloc_offset) {
460                                 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
461                                 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
462                                 layer2->bytes_free = 0;
463                         } else if (phys_offset + block_offset < vol_free_end) {
464                                 layer2->zone = 0;
465                                 layer2->append_off = 0;
466                                 layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
467                                 ++layer1_count;
468                         } else {
469                                 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
470                                 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
471                                 layer2->bytes_free = 0;
472                         }
473
474                         hammer_crc_set_layer2(hmp->version, layer2);
475                         hammer_modify_buffer_done(buffer2);
476                 }
477
478                 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
479                 layer1->blocks_free += layer1_count;
480                 hammer_crc_set_layer1(hmp->version, layer1);
481                 hammer_modify_buffer_done(buffer1);
482         }
483
484 end:
485         if (buffer1)
486                 hammer_rel_buffer(buffer1, 0);
487         if (buffer2)
488                 hammer_rel_buffer(buffer2, 0);
489
490         return error;
491 }
492
493 /*
494  * XXX This somehow needs to stop doing hammer_modify_buffer() for
495  * layer2 entries.  In theory removing a large block device could
496  * blow away UNDO fifo.  The best way is to erase layer2 entries
497  * in userspace without UNDO getting involved after the device has
498  * been safely removed from the filesystem.  HAMMER has no interest
499  * in what happens to the device once it's safely removed.
500  */
501 static int
502 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume)
503 {
504         hammer_mount_t hmp = trans->hmp;
505         hammer_volume_ondisk_t ondisk;
506         hammer_blockmap_t freemap;
507         hammer_off_t phys_offset;
508         hammer_off_t block_offset;
509         hammer_off_t layer1_offset;
510         hammer_off_t layer2_offset;
511         hammer_off_t vol_free_end;
512         hammer_off_t aligned_vol_free_end;
513         hammer_blockmap_layer1_t layer1;
514         hammer_blockmap_layer2_t layer2;
515         hammer_buffer_t buffer1 = NULL;
516         hammer_buffer_t buffer2 = NULL;
517         int64_t vol_buf_size;
518         int error = 0;
519
520         KKASSERT(volume->vol_no != HAMMER_ROOT_VOLNO);
521
522         ondisk = volume->ondisk;
523         vol_buf_size = HAMMER_VOL_BUF_SIZE(ondisk);
524         KKASSERT((vol_buf_size & ~HAMMER_OFF_SHORT_MASK) == 0);
525         vol_free_end = HAMMER_ENCODE_RAW_BUFFER(ondisk->vol_no,
526                         vol_buf_size & ~HAMMER_BIGBLOCK_MASK64);
527         aligned_vol_free_end = HAMMER_BLOCKMAP_LAYER2_DOALIGN(vol_free_end);
528
529         freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
530
531         hmkprintf(hmp, "Free freemap volume %d\n", volume->vol_no);
532
533         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
534              phys_offset < aligned_vol_free_end;
535              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
536                 layer1_offset = freemap->phys_offset +
537                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
538                 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
539                 if (error)
540                         goto end;
541                 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
542
543                 for (block_offset = 0;
544                      block_offset < HAMMER_BLOCKMAP_LAYER2;
545                      block_offset += HAMMER_BIGBLOCK_SIZE) {
546                         layer2_offset = layer1->phys_offset +
547                                         HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
548                         layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
549                         if (error)
550                                 goto end;
551
552                         switch (layer2->zone) {
553                         case HAMMER_ZONE_UNDO_INDEX:
554                                 KKASSERT(0);
555                         case HAMMER_ZONE_FREEMAP_INDEX:
556                         case HAMMER_ZONE_UNAVAIL_INDEX:
557                                 continue;
558                         default:
559                                 KKASSERT(phys_offset + block_offset < aligned_vol_free_end);
560                                 if (layer2->append_off == 0 &&
561                                     layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
562                                         continue;
563                                 break;
564                         }
565                         return EBUSY;  /* Not empty */
566                 }
567         }
568
569         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
570              phys_offset < aligned_vol_free_end;
571              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
572                 layer1_offset = freemap->phys_offset +
573                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
574                 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
575                 if (error)
576                         goto end;
577                 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
578
579                 for (block_offset = 0;
580                      block_offset < HAMMER_BLOCKMAP_LAYER2;
581                      block_offset += HAMMER_BIGBLOCK_SIZE) {
582                         layer2_offset = layer1->phys_offset +
583                                         HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
584                         layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
585                         if (error)
586                                 goto end;
587
588                         switch (layer2->zone) {
589                         case HAMMER_ZONE_UNDO_INDEX:
590                                 KKASSERT(0);
591                         default:
592                                 KKASSERT(phys_offset + block_offset < aligned_vol_free_end);
593                                 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
594                                 bzero(layer2, sizeof(*layer2));
595                                 hammer_modify_buffer_done(buffer2);
596                                 break;
597                         }
598                 }
599
600                 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
601                 bzero(layer1, sizeof(*layer1));
602                 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
603                 hammer_crc_set_layer1(hmp->version, layer1);
604                 hammer_modify_buffer_done(buffer1);
605         }
606
607 end:
608         if (buffer1)
609                 hammer_rel_buffer(buffer1, 0);
610         if (buffer2)
611                 hammer_rel_buffer(buffer2, 0);
612
613         return error;
614 }
615
616 static int
617 hammer_format_volume_header(hammer_mount_t hmp,
618         struct hammer_ioc_volume *ioc,
619         hammer_volume_ondisk_t ondisk,
620         int vol_no)
621 {
622         hammer_volume_ondisk_t root_ondisk;
623         int64_t vol_alloc;
624
625         KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk));
626
627         /*
628          * Just copy from the root volume header.
629          */
630         root_ondisk = hmp->rootvol->ondisk;
631         bzero(ondisk, sizeof(struct hammer_volume_ondisk));
632         ondisk->vol_fsid = root_ondisk->vol_fsid;
633         ondisk->vol_fstype = root_ondisk->vol_fstype;
634         ksnprintf(ondisk->vol_label, sizeof(ondisk->vol_label), "%s",
635                 root_ondisk->vol_label);
636         ondisk->vol_version = root_ondisk->vol_version;
637         ondisk->vol_rootvol = root_ondisk->vol_no;
638         ondisk->vol_signature = root_ondisk->vol_signature;
639
640         KKASSERT(ondisk->vol_rootvol == HAMMER_ROOT_VOLNO);
641         KKASSERT(ondisk->vol_signature == HAMMER_FSBUF_VOLUME);
642
643         /*
644          * Assign the new vol_no and vol_count.
645          */
646         ondisk->vol_no = vol_no;
647         ondisk->vol_count = root_ondisk->vol_count + 1;
648
649         /*
650          * Reserve space for (future) header junk.
651          */
652         vol_alloc = root_ondisk->vol_bot_beg;
653         ondisk->vol_bot_beg = vol_alloc;
654         vol_alloc += ioc->boot_area_size;
655         ondisk->vol_mem_beg = vol_alloc;
656         vol_alloc += ioc->memory_log_size;
657
658         /*
659          * The remaining area is the zone 2 buffer allocation area.
660          */
661         ondisk->vol_buf_beg = vol_alloc;
662         ondisk->vol_buf_end = ioc->vol_size & ~(int64_t)HAMMER_BUFMASK;
663
664         if (HAMMER_VOL_BUF_SIZE(ondisk) < 0) { /* int64_t */
665                 hmkprintf(hmp, "volume %d is too small to hold the volume header\n",
666                         ondisk->vol_no);
667                 return(EFTYPE);
668         }
669
670         return(0);
671 }
672
673 static int
674 hammer_update_volumes_header(hammer_transaction_t trans,
675         int64_t total_bigblocks, int64_t empty_bigblocks)
676 {
677         hammer_mount_t hmp = trans->hmp;
678         struct mount *mp = hmp->mp;
679         hammer_volume_t volume;
680         int vol_no;
681         int error = 0;
682
683         /*
684          * Set each volume's new value of the vol_count field.
685          */
686         HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) {
687                 volume = hammer_get_volume(hmp, vol_no, &error);
688                 KKASSERT(volume != NULL && error == 0);
689                 hammer_modify_volume_field(trans, volume, vol_count);
690                 volume->ondisk->vol_count = hmp->nvolumes;
691                 hammer_modify_volume_done(volume);
692
693                 /*
694                  * Only changes to the header of the root volume
695                  * are automatically flushed to disk. For all
696                  * other volumes that we modify we do it here.
697                  *
698                  * No interlock is needed, volume buffers are not
699                  * messed with by bioops.
700                  */
701                 if (volume != trans->rootvol && volume->io.modified) {
702                         hammer_crc_set_volume(hmp->version, volume->ondisk);
703                         hammer_io_flush(&volume->io, 0);
704                 }
705
706                 hammer_rel_volume(volume, 0);
707         }
708
709         /*
710          * Update the total number of big-blocks.
711          */
712         hammer_modify_volume_field(trans, trans->rootvol, vol0_stat_bigblocks);
713         trans->rootvol->ondisk->vol0_stat_bigblocks += total_bigblocks;
714         hammer_modify_volume_done(trans->rootvol);
715
716         /*
717          * Big-block count changed so recompute the total number of blocks.
718          */
719         mp->mnt_stat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
720                                 HAMMER_BUFFERS_PER_BIGBLOCK;
721         mp->mnt_vstat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
722                                 HAMMER_BUFFERS_PER_BIGBLOCK;
723
724         /*
725          * Update the total number of free big-blocks.
726          */
727         hammer_modify_volume_field(trans, trans->rootvol,
728                 vol0_stat_freebigblocks);
729         trans->rootvol->ondisk->vol0_stat_freebigblocks += empty_bigblocks;
730         hammer_modify_volume_done(trans->rootvol);
731
732         /*
733          * Update the copy in hmp.
734          */
735         hmp->copy_stat_freebigblocks =
736                 trans->rootvol->ondisk->vol0_stat_freebigblocks;
737
738         return(error);
739 }
740
741 /*
742  * Count total big-blocks and empty big-blocks within the volume.
743  * The volume must be a non-root volume.
744  *
745  * Note that total big-blocks doesn't include big-blocks for layer2
746  * (and obviously layer1 and undomap).  This is requirement of the
747  * volume header and this function is to retrieve that information.
748  */
749 static int
750 hammer_count_bigblocks(hammer_mount_t hmp, hammer_volume_t volume,
751         int64_t *total_bigblocks, int64_t *empty_bigblocks)
752 {
753         hammer_volume_ondisk_t ondisk;
754         hammer_blockmap_t freemap;
755         hammer_off_t phys_offset;
756         hammer_off_t block_offset;
757         hammer_off_t layer1_offset;
758         hammer_off_t layer2_offset;
759         hammer_off_t vol_free_end;
760         hammer_off_t aligned_vol_free_end;
761         hammer_blockmap_layer1_t layer1;
762         hammer_blockmap_layer2_t layer2;
763         hammer_buffer_t buffer1 = NULL;
764         hammer_buffer_t buffer2 = NULL;
765         int64_t vol_buf_size;
766         int64_t total = 0;
767         int64_t empty = 0;
768         int error = 0;
769
770         KKASSERT(volume->vol_no != HAMMER_ROOT_VOLNO);
771
772         *total_bigblocks = 0;   /* avoid gcc warnings */
773         *empty_bigblocks = 0;   /* avoid gcc warnings */
774
775         ondisk = volume->ondisk;
776         vol_buf_size = HAMMER_VOL_BUF_SIZE(ondisk);
777         KKASSERT((vol_buf_size & ~HAMMER_OFF_SHORT_MASK) == 0);
778         vol_free_end = HAMMER_ENCODE_RAW_BUFFER(ondisk->vol_no,
779                         vol_buf_size & ~HAMMER_BIGBLOCK_MASK64);
780         aligned_vol_free_end = HAMMER_BLOCKMAP_LAYER2_DOALIGN(vol_free_end);
781
782         freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
783
784         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 0);
785              phys_offset < aligned_vol_free_end;
786              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
787                 layer1_offset = freemap->phys_offset +
788                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
789                 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
790                 if (error)
791                         goto end;
792
793                 for (block_offset = 0;
794                      block_offset < HAMMER_BLOCKMAP_LAYER2;
795                      block_offset += HAMMER_BIGBLOCK_SIZE) {
796                         layer2_offset = layer1->phys_offset +
797                                         HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
798                         layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
799                         if (error)
800                                 goto end;
801
802                         switch (layer2->zone) {
803                         case HAMMER_ZONE_UNDO_INDEX:
804                                 KKASSERT(0);
805                         case HAMMER_ZONE_FREEMAP_INDEX:
806                         case HAMMER_ZONE_UNAVAIL_INDEX:
807                                 continue;
808                         default:
809                                 KKASSERT(phys_offset + block_offset < aligned_vol_free_end);
810                                 total++;
811                                 if (layer2->append_off == 0 &&
812                                     layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
813                                         empty++;
814                                 break;
815                         }
816                 }
817         }
818
819         hmkprintf(hmp, "big-blocks total=%jd empty=%jd\n", total, empty);
820         *total_bigblocks = total;
821         *empty_bigblocks = empty;
822 end:
823         if (buffer1)
824                 hammer_rel_buffer(buffer1, 0);
825         if (buffer2)
826                 hammer_rel_buffer(buffer2, 0);
827
828         return error;
829 }