sys/vfs/hammer: Temporary fix for kernel panic on volume-del
[dragonfly.git] / sys / vfs / hammer / hammer_volume.c
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com> and
6  * Michael Neumann <mneumann@ntecs.de>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  */
36
37 #include "hammer.h"
38
39 static int
40 hammer_format_volume_header(struct hammer_mount *hmp,
41         struct hammer_volume_ondisk *ondisk,
42         const char *vol_name, int vol_no, int vol_count,
43         int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size);
44
45 static int
46 hammer_update_volumes_header(hammer_transaction_t trans,
47         int64_t total_bigblocks, int64_t empty_bigblocks);
48
49 static int
50 hammer_do_reblock(hammer_transaction_t trans, hammer_inode_t ip);
51
52 static int
53 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume);
54
55 static int
56 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume);
57
58 static int
59 hammer_count_bigblocks(hammer_mount_t hmp, hammer_volume_t volume,
60         int64_t *total_bigblocks, int64_t *empty_bigblocks);
61
62 int
63 hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
64                 struct hammer_ioc_volume *ioc)
65 {
66         struct hammer_mount *hmp = trans->hmp;
67         struct mount *mp = hmp->mp;
68         struct hammer_volume_ondisk ondisk;
69         hammer_volume_t volume;
70         int64_t total_bigblocks, empty_bigblocks;
71         int free_vol_no = 0;
72         int error;
73
74         if (mp->mnt_flag & MNT_RDONLY) {
75                 hmkprintf(hmp, "Cannot add volume to read-only HAMMER filesystem\n");
76                 return (EINVAL);
77         }
78
79         if (hmp->nvolumes >= HAMMER_MAX_VOLUMES) {
80                 hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n");
81                 return (EINVAL);
82         }
83
84         if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
85                 hmkprintf(hmp, "Another volume operation is in progress!\n");
86                 return (EAGAIN);
87         }
88
89         /*
90          * Find an unused volume number.
91          */
92         while (free_vol_no < HAMMER_MAX_VOLUMES &&
93                 HAMMER_VOLUME_NUMBER_IS_SET(hmp, free_vol_no)) {
94                 ++free_vol_no;
95         }
96         if (free_vol_no >= HAMMER_MAX_VOLUMES) {
97                 hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n");
98                 error = EINVAL;
99                 goto end;
100         }
101
102         error = hammer_format_volume_header(
103                 hmp,
104                 &ondisk,
105                 hmp->rootvol->ondisk->vol_name,
106                 free_vol_no,
107                 hmp->nvolumes+1,
108                 ioc->vol_size,
109                 ioc->boot_area_size,
110                 ioc->mem_area_size);
111         if (error)
112                 goto end;
113
114         error = hammer_install_volume(hmp, ioc->device_name, NULL, &ondisk);
115         if (error)
116                 goto end;
117
118         hammer_sync_lock_sh(trans);
119         hammer_lock_ex(&hmp->blkmap_lock);
120
121         volume = hammer_get_volume(hmp, free_vol_no, &error);
122         KKASSERT(volume != NULL && error == 0);
123
124         error = hammer_format_freemap(trans, volume);
125         KKASSERT(error == 0);
126
127         error = hammer_count_bigblocks(hmp, volume,
128                         &total_bigblocks, &empty_bigblocks);
129         KKASSERT(error == 0);
130         KKASSERT(total_bigblocks == empty_bigblocks);
131
132         hammer_rel_volume(volume, 0);
133
134         ++hmp->nvolumes;
135         error = hammer_update_volumes_header(trans,
136                         total_bigblocks, empty_bigblocks);
137         KKASSERT(error == 0);
138
139         hammer_unlock(&hmp->blkmap_lock);
140         hammer_sync_unlock(trans);
141
142         KKASSERT(error == 0);
143 end:
144         hammer_unlock(&hmp->volume_lock);
145         if (error)
146                 hmkprintf(hmp, "An error occurred: %d\n", error);
147         return (error);
148 }
149
150
151 /*
152  * Remove a volume.
153  */
154 int
155 hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
156                 struct hammer_ioc_volume *ioc)
157 {
158         struct hammer_mount *hmp = trans->hmp;
159         struct mount *mp = hmp->mp;
160         struct hammer_volume_ondisk ondisk;
161         hammer_volume_t volume;
162         int64_t total_bigblocks, empty_bigblocks;
163         int vol_no;
164         int error = 0;
165
166         if (mp->mnt_flag & MNT_RDONLY) {
167                 hmkprintf(hmp, "Cannot del volume from read-only HAMMER filesystem\n");
168                 return (EINVAL);
169         }
170
171         if (hmp->nvolumes <= 1) {
172                 hmkprintf(hmp, "No HAMMER volume to delete\n");
173                 return (EINVAL);
174         }
175
176         if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
177                 hmkprintf(hmp, "Another volume operation is in progress!\n");
178                 return (EAGAIN);
179         }
180
181         /*
182          * find volume by volname
183          */
184         volume = NULL;
185         HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) {
186                 volume = hammer_get_volume(hmp, vol_no, &error);
187                 KKASSERT(volume != NULL && error == 0);
188                 if (strcmp(volume->vol_name, ioc->device_name) == 0) {
189                         break;
190                 }
191                 hammer_rel_volume(volume, 0);
192                 volume = NULL;
193         }
194
195         if (volume == NULL) {
196                 hmkprintf(hmp, "Couldn't find volume\n");
197                 error = EINVAL;
198                 goto end;
199         }
200
201         if (volume == trans->rootvol) {
202                 hmkprintf(hmp, "Cannot remove root-volume\n");
203                 hammer_rel_volume(volume, 0);
204                 error = EINVAL;
205                 goto end;
206         }
207
208         /*
209          * Reblock filesystem if the volume is not empty
210          */
211         hmp->volume_to_remove = volume->vol_no;
212
213         error = hammer_count_bigblocks(hmp, volume,
214                         &total_bigblocks, &empty_bigblocks);
215         KKASSERT(error == 0);
216
217         if (total_bigblocks == empty_bigblocks) {
218                 hmkprintf(hmp, "%s is already empty\n", volume->vol_name);
219         } else {
220                 error = hammer_do_reblock(trans, ip);
221                 if (error) {
222                         hmp->volume_to_remove = -1;
223                         hammer_rel_volume(volume, 0);
224                         goto end;
225                 }
226         }
227
228         /*
229          * Sync filesystem
230          */
231         hammer_flush_dirty(hmp, 30);
232
233         hammer_sync_lock_sh(trans);
234         hammer_lock_ex(&hmp->blkmap_lock);
235
236         error = hammer_count_bigblocks(hmp, volume,
237                         &total_bigblocks, &empty_bigblocks);
238         KKASSERT(error == 0);
239
240         error = hammer_free_freemap(trans, volume);
241         if (error) {
242                 hmkprintf(hmp, "Failed to free volume: ");
243                 if (error == EBUSY)
244                         kprintf("Volume %d not empty\n", volume->vol_no);
245                 else
246                         kprintf("%d\n", error);
247                 hmp->volume_to_remove = -1;
248                 hammer_rel_volume(volume, 0);
249                 goto end1;
250         }
251         hammer_rel_volume(volume, 0);
252
253         /*
254          * XXX: Temporary solution for
255          * http://lists.dragonflybsd.org/pipermail/kernel/2015-August/175027.html
256          */
257         hammer_unlock(&hmp->blkmap_lock);
258         hammer_sync_unlock(trans);
259         hammer_flusher_sync(hmp); /* 1 */
260         hammer_flusher_sync(hmp); /* 2 */
261         hammer_flusher_sync(hmp); /* 3 */
262         hammer_sync_lock_sh(trans);
263         hammer_lock_ex(&hmp->blkmap_lock);
264
265         /*
266          * Unload buffers
267          */
268         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
269                 hammer_unload_buffer, volume);
270
271         bzero(&ondisk, sizeof(ondisk));
272         error = hammer_unload_volume(volume, &ondisk);
273         if (error == -1) {
274                 hmkprintf(hmp, "Failed to unload volume\n");
275                 goto end1;
276         }
277
278         --hmp->nvolumes;
279         error = hammer_update_volumes_header(trans,
280                         -total_bigblocks, -empty_bigblocks);
281         KKASSERT(error == 0);
282         hmp->volume_to_remove = -1;
283
284 end1:
285         hammer_unlock(&hmp->blkmap_lock);
286         hammer_sync_unlock(trans);
287
288 end:
289         hammer_unlock(&hmp->volume_lock);
290         if (error)
291                 hmkprintf(hmp, "An error occurred: %d\n", error);
292         return (error);
293 }
294
295
296 int
297 hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip,
298     struct hammer_ioc_volume_list *ioc)
299 {
300         struct hammer_mount *hmp = trans->hmp;
301         hammer_volume_t volume;
302         int error = 0;
303         int i, len, cnt = 0;
304
305         if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
306                 hmkprintf(hmp, "Another volume operation is in progress!\n");
307                 return (EAGAIN);
308         }
309
310         HAMMER_VOLUME_NUMBER_FOREACH(hmp, i) {
311                 if (cnt >= ioc->nvols)
312                         break;
313                 volume = hammer_get_volume(hmp, i, &error);
314                 KKASSERT(volume != NULL && error == 0);
315
316                 len = strlen(volume->vol_name) + 1;
317                 KKASSERT(len <= MAXPATHLEN);
318
319                 error = copyout(volume->vol_name, ioc->vols[cnt].device_name,
320                                 len);
321                 hammer_rel_volume(volume, 0);
322                 if (error)
323                         goto end;
324                 cnt++;
325         }
326         ioc->nvols = cnt;
327
328 end:
329         hammer_unlock(&hmp->volume_lock);
330         return (error);
331 }
332
333 static
334 int
335 hammer_do_reblock(hammer_transaction_t trans, hammer_inode_t ip)
336 {
337         struct hammer_mount *hmp = trans->hmp;
338         int error;
339         int vol_no;
340
341         struct hammer_ioc_reblock reblock;
342         bzero(&reblock, sizeof(reblock));
343
344         vol_no = trans->hmp->volume_to_remove;
345         KKASSERT(vol_no != -1);
346
347         reblock.key_beg.localization = HAMMER_MIN_LOCALIZATION;
348         reblock.key_beg.obj_id = HAMMER_MIN_OBJID;
349         reblock.key_end.localization = HAMMER_MAX_LOCALIZATION;
350         reblock.key_end.obj_id = HAMMER_MAX_OBJID;
351         reblock.head.flags = HAMMER_IOC_DO_FLAGS;
352         reblock.free_level = 0; /* reblock all big-blocks */
353         reblock.allpfs = 1;     /* reblock all PFS */
354         reblock.vol_no = vol_no;
355
356         hmkprintf(hmp, "reblock started\n");
357         error = hammer_ioc_reblock(trans, ip, &reblock);
358
359         if (reblock.head.flags & HAMMER_IOC_HEAD_INTR) {
360                 error = EINTR;
361         }
362
363         if (error) {
364                 if (error == EINTR) {
365                         hmkprintf(hmp, "reblock was interrupted\n");
366                 } else {
367                         hmkprintf(hmp, "reblock failed: %d\n", error);
368                 }
369                 return(error);
370         }
371
372         return(0);
373 }
374
375 static int
376 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume)
377 {
378         struct hammer_mount *hmp = trans->hmp;
379         struct hammer_volume_ondisk *ondisk;
380         hammer_blockmap_t freemap;
381         hammer_off_t alloc_offset;
382         hammer_off_t phys_offset;
383         hammer_off_t block_offset;
384         hammer_off_t layer1_offset;
385         hammer_off_t layer2_offset;
386         hammer_off_t vol_free_end;
387         hammer_off_t aligned_vol_free_end;
388         struct hammer_blockmap_layer1 *layer1;
389         struct hammer_blockmap_layer2 *layer2;
390         hammer_buffer_t buffer1 = NULL;
391         hammer_buffer_t buffer2 = NULL;
392         int64_t vol_buf_size;
393         int64_t layer1_count = 0;
394         int error = 0;
395
396         KKASSERT(volume->vol_no != HAMMER_ROOT_VOLNO);
397
398         ondisk = volume->ondisk;
399         vol_buf_size = ondisk->vol_buf_end - ondisk->vol_buf_beg;
400         vol_free_end = HAMMER_ENCODE_RAW_BUFFER(ondisk->vol_no,
401                         vol_buf_size & ~HAMMER_BIGBLOCK_MASK64);
402         aligned_vol_free_end = (vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
403                         & ~HAMMER_BLOCKMAP_LAYER2_MASK;
404
405         freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
406         alloc_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
407
408         hmkprintf(hmp, "Initialize freemap volume %d\n", volume->vol_no);
409
410         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
411              phys_offset < aligned_vol_free_end;
412              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
413                 layer1_offset = freemap->phys_offset +
414                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
415                 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
416                 if (error)
417                         goto end;
418                 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
419                         hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
420                         bzero(layer1, sizeof(*layer1));
421                         layer1->phys_offset = alloc_offset;
422                         layer1->blocks_free = 0;
423                         layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
424                         hammer_modify_buffer_done(buffer1);
425                         alloc_offset += HAMMER_BIGBLOCK_SIZE;
426                 }
427         }
428
429         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
430              phys_offset < aligned_vol_free_end;
431              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
432                 layer1_count = 0;
433                 layer1_offset = freemap->phys_offset +
434                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
435                 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
436                 if (error)
437                         goto end;
438                 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
439
440                 for (block_offset = 0;
441                      block_offset < HAMMER_BLOCKMAP_LAYER2;
442                      block_offset += HAMMER_BIGBLOCK_SIZE) {
443                         layer2_offset = layer1->phys_offset +
444                                         HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
445                         layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
446                         if (error)
447                                 goto end;
448
449                         hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
450                         bzero(layer2, sizeof(*layer2));
451
452                         if (phys_offset + block_offset < alloc_offset) {
453                                 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
454                                 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
455                                 layer2->bytes_free = 0;
456                         } else if (phys_offset + block_offset < vol_free_end) {
457                                 layer2->zone = 0;
458                                 layer2->append_off = 0;
459                                 layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
460                                 ++layer1_count;
461                         } else {
462                                 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
463                                 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
464                                 layer2->bytes_free = 0;
465                         }
466
467                         layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
468                         hammer_modify_buffer_done(buffer2);
469                 }
470
471                 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
472                 layer1->blocks_free += layer1_count;
473                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
474                 hammer_modify_buffer_done(buffer1);
475         }
476
477 end:
478         if (buffer1)
479                 hammer_rel_buffer(buffer1, 0);
480         if (buffer2)
481                 hammer_rel_buffer(buffer2, 0);
482
483         return error;
484 }
485
486 static int
487 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume)
488 {
489         struct hammer_mount *hmp = trans->hmp;
490         struct hammer_volume_ondisk *ondisk;
491         hammer_blockmap_t freemap;
492         hammer_off_t phys_offset;
493         hammer_off_t block_offset;
494         hammer_off_t layer1_offset;
495         hammer_off_t layer2_offset;
496         hammer_off_t vol_free_end;
497         hammer_off_t aligned_vol_free_end;
498         struct hammer_blockmap_layer1 *layer1;
499         struct hammer_blockmap_layer2 *layer2;
500         hammer_buffer_t buffer1 = NULL;
501         hammer_buffer_t buffer2 = NULL;
502         int64_t vol_buf_size;
503         int64_t layer1_count = 0;
504         int error = 0;
505
506         KKASSERT(volume->vol_no != HAMMER_ROOT_VOLNO);
507
508         ondisk = volume->ondisk;
509         vol_buf_size = ondisk->vol_buf_end - ondisk->vol_buf_beg;
510         vol_free_end = HAMMER_ENCODE_RAW_BUFFER(ondisk->vol_no,
511                         vol_buf_size & ~HAMMER_BIGBLOCK_MASK64);
512         aligned_vol_free_end = (vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
513                         & ~HAMMER_BLOCKMAP_LAYER2_MASK;
514
515         freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
516
517         hmkprintf(hmp, "Free freemap volume %d\n", volume->vol_no);
518
519         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
520              phys_offset < aligned_vol_free_end;
521              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
522                 layer1_count = 0;
523                 layer1_offset = freemap->phys_offset +
524                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
525                 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
526                 if (error)
527                         goto end;
528                 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
529
530                 for (block_offset = 0;
531                      block_offset < HAMMER_BLOCKMAP_LAYER2;
532                      block_offset += HAMMER_BIGBLOCK_SIZE) {
533                         layer2_offset = layer1->phys_offset +
534                                         HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
535                         layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
536                         if (error)
537                                 goto end;
538
539                         switch (layer2->zone) {
540                         case HAMMER_ZONE_UNDO_INDEX:
541                                 KKASSERT(0);
542                         case HAMMER_ZONE_FREEMAP_INDEX:
543                         case HAMMER_ZONE_UNAVAIL_INDEX:
544                                 continue;
545                         default:
546                                 KKASSERT(phys_offset + block_offset < aligned_vol_free_end);
547                                 if (layer2->append_off == 0 &&
548                                     layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
549                                         continue;
550                                 break;
551                         }
552                         return EBUSY;  /* Not empty */
553                 }
554         }
555
556         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
557              phys_offset < aligned_vol_free_end;
558              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
559                 layer1_count = 0;
560                 layer1_offset = freemap->phys_offset +
561                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
562                 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
563                 if (error)
564                         goto end;
565                 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
566
567                 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
568                 bzero(layer1, sizeof(*layer1));
569                 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
570                 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
571                 hammer_modify_buffer_done(buffer1);
572         }
573
574 end:
575         if (buffer1)
576                 hammer_rel_buffer(buffer1, 0);
577         if (buffer2)
578                 hammer_rel_buffer(buffer2, 0);
579
580         return error;
581 }
582
583 static int
584 hammer_format_volume_header(struct hammer_mount *hmp,
585         struct hammer_volume_ondisk *ondisk,
586         const char *vol_name, int vol_no, int vol_count,
587         int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size)
588 {
589         int64_t vol_alloc;
590
591         KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk));
592
593         bzero(ondisk, sizeof(struct hammer_volume_ondisk));
594         ksnprintf(ondisk->vol_name, sizeof(ondisk->vol_name), "%s", vol_name);
595         ondisk->vol_fstype = hmp->rootvol->ondisk->vol_fstype;
596         ondisk->vol_signature = HAMMER_FSBUF_VOLUME;
597         ondisk->vol_fsid = hmp->fsid;
598         ondisk->vol_rootvol = hmp->rootvol->vol_no;
599         ondisk->vol_no = vol_no;
600         ondisk->vol_count = vol_count;
601         ondisk->vol_version = hmp->version;
602
603         /*
604          * Reserve space for (future) header junk, copy volume relative
605          * offset from the existing root volume.
606          */
607         vol_alloc = hmp->rootvol->ondisk->vol_bot_beg;
608         ondisk->vol_bot_beg = vol_alloc;
609         vol_alloc += boot_area_size;
610         ondisk->vol_mem_beg = vol_alloc;
611         vol_alloc += mem_area_size;
612
613         /*
614          * The remaining area is the zone 2 buffer allocation area.
615          */
616         ondisk->vol_buf_beg = vol_alloc;
617         ondisk->vol_buf_end = vol_size & ~(int64_t)HAMMER_BUFMASK;
618
619         if (ondisk->vol_buf_end < ondisk->vol_buf_beg) {
620                 hmkprintf(hmp, "volume %d %s is too small to hold the volume header\n",
621                      ondisk->vol_no, ondisk->vol_name);
622                 return(EFTYPE);
623         }
624
625         ondisk->vol_nblocks = (ondisk->vol_buf_end - ondisk->vol_buf_beg) /
626                               HAMMER_BUFSIZE;
627         ondisk->vol_blocksize = HAMMER_BUFSIZE;
628         return(0);
629 }
630
631 static int
632 hammer_update_volumes_header(hammer_transaction_t trans,
633         int64_t total_bigblocks, int64_t empty_bigblocks)
634 {
635         struct hammer_mount *hmp = trans->hmp;
636         struct mount *mp = hmp->mp;
637         hammer_volume_t volume;
638         int vol_no;
639         int error = 0;
640
641         /*
642          * Set each volume's new value of the vol_count field.
643          */
644         HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) {
645                 volume = hammer_get_volume(hmp, vol_no, &error);
646                 KKASSERT(volume != NULL && error == 0);
647                 hammer_modify_volume_field(trans, volume, vol_count);
648                 volume->ondisk->vol_count = hmp->nvolumes;
649                 hammer_modify_volume_done(volume);
650
651                 /*
652                  * Only changes to the header of the root volume
653                  * are automatically flushed to disk. For all
654                  * other volumes that we modify we do it here.
655                  *
656                  * No interlock is needed, volume buffers are not
657                  * messed with by bioops.
658                  */
659                 if (volume != trans->rootvol && volume->io.modified) {
660                         hammer_crc_set_volume(volume->ondisk);
661                         hammer_io_flush(&volume->io, 0);
662                 }
663
664                 hammer_rel_volume(volume, 0);
665         }
666
667         /*
668          * Update the total number of big-blocks.
669          */
670         hammer_modify_volume_field(trans, trans->rootvol, vol0_stat_bigblocks);
671         trans->rootvol->ondisk->vol0_stat_bigblocks += total_bigblocks;
672         hammer_modify_volume_done(trans->rootvol);
673
674         /*
675          * Big-block count changed so recompute the total number of blocks.
676          */
677         mp->mnt_stat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
678                                 HAMMER_BUFFERS_PER_BIGBLOCK;
679         mp->mnt_vstat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
680                                 HAMMER_BUFFERS_PER_BIGBLOCK;
681
682         /*
683          * Update the total number of free big-blocks.
684          */
685         hammer_modify_volume_field(trans, trans->rootvol,
686                 vol0_stat_freebigblocks);
687         trans->rootvol->ondisk->vol0_stat_freebigblocks += empty_bigblocks;
688         hammer_modify_volume_done(trans->rootvol);
689
690         /*
691          * Update the copy in hmp.
692          */
693         hmp->copy_stat_freebigblocks =
694                 trans->rootvol->ondisk->vol0_stat_freebigblocks;
695
696         return(error);
697 }
698
699 /*
700  * Count total big-blocks and empty big-blocks within the volume.
701  * The volume must be a non-root volume.
702  *
703  * Note that total big-blocks doesn't include big-blocks for layer2
704  * (and obviously layer1 and undomap).  This is requirement of the
705  * volume header and this function is to retrieve that information.
706  */
707 static int
708 hammer_count_bigblocks(hammer_mount_t hmp, hammer_volume_t volume,
709         int64_t *total_bigblocks, int64_t *empty_bigblocks)
710 {
711         struct hammer_volume_ondisk *ondisk;
712         hammer_blockmap_t freemap;
713         hammer_off_t phys_offset;
714         hammer_off_t block_offset;
715         hammer_off_t layer1_offset;
716         hammer_off_t layer2_offset;
717         hammer_off_t vol_free_end;
718         hammer_off_t aligned_vol_free_end;
719         struct hammer_blockmap_layer1 *layer1;
720         struct hammer_blockmap_layer2 *layer2;
721         hammer_buffer_t buffer1 = NULL;
722         hammer_buffer_t buffer2 = NULL;
723         int64_t vol_buf_size;
724         int64_t total = 0;
725         int64_t empty = 0;
726         int error = 0;
727
728         KKASSERT(volume->vol_no != HAMMER_ROOT_VOLNO);
729
730         ondisk = volume->ondisk;
731         vol_buf_size = ondisk->vol_buf_end - ondisk->vol_buf_beg;
732         vol_free_end = HAMMER_ENCODE_RAW_BUFFER(ondisk->vol_no,
733                         vol_buf_size & ~HAMMER_BIGBLOCK_MASK64);
734         aligned_vol_free_end = (vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
735                         & ~HAMMER_BLOCKMAP_LAYER2_MASK;
736
737         freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
738
739         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 0);
740              phys_offset < aligned_vol_free_end;
741              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
742                 layer1_offset = freemap->phys_offset +
743                                 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
744                 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
745                 if (error)
746                         goto end;
747
748                 for (block_offset = 0;
749                      block_offset < HAMMER_BLOCKMAP_LAYER2;
750                      block_offset += HAMMER_BIGBLOCK_SIZE) {
751                         layer2_offset = layer1->phys_offset +
752                                         HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
753                         layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
754                         if (error)
755                                 goto end;
756
757                         switch (layer2->zone) {
758                         case HAMMER_ZONE_UNDO_INDEX:
759                                 KKASSERT(0);
760                         case HAMMER_ZONE_FREEMAP_INDEX:
761                         case HAMMER_ZONE_UNAVAIL_INDEX:
762                                 continue;
763                         default:
764                                 KKASSERT(phys_offset + block_offset < aligned_vol_free_end);
765                                 total++;
766                                 if (layer2->append_off == 0 &&
767                                     layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
768                                         empty++;
769                                 break;
770                         }
771                 }
772         }
773
774         hmkprintf(hmp, "big-blocks total=%jd empty=%jd\n", total, empty);
775         *total_bigblocks = total;
776         *empty_bigblocks = empty;
777 end:
778         if (buffer1)
779                 hammer_rel_buffer(buffer1, 0);
780         if (buffer2)
781                 hammer_rel_buffer(buffer2, 0);
782
783         return error;
784 }