2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com> and
6 * Michael Neumann <mneumann@ntecs.de>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 hammer_format_volume_header(hammer_mount_t hmp,
41 struct hammer_ioc_volume *ioc,
42 struct hammer_volume_ondisk *ondisk,
46 hammer_update_volumes_header(hammer_transaction_t trans,
47 int64_t total_bigblocks, int64_t empty_bigblocks);
50 hammer_do_reblock(hammer_transaction_t trans, hammer_inode_t ip);
53 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume);
56 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume);
59 hammer_count_bigblocks(hammer_mount_t hmp, hammer_volume_t volume,
60 int64_t *total_bigblocks, int64_t *empty_bigblocks);
63 hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
64 struct hammer_ioc_volume *ioc)
66 struct hammer_mount *hmp = trans->hmp;
67 struct mount *mp = hmp->mp;
68 struct hammer_volume_ondisk ondisk;
69 hammer_volume_t volume;
70 int64_t total_bigblocks, empty_bigblocks;
74 if (mp->mnt_flag & MNT_RDONLY) {
75 hmkprintf(hmp, "Cannot add volume to read-only HAMMER filesystem\n");
79 if (hmp->nvolumes >= HAMMER_MAX_VOLUMES) {
80 hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n");
84 if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
85 hmkprintf(hmp, "Another volume operation is in progress!\n");
90 * Find an unused volume number.
92 while (free_vol_no < HAMMER_MAX_VOLUMES &&
93 HAMMER_VOLUME_NUMBER_IS_SET(hmp, free_vol_no)) {
96 if (free_vol_no >= HAMMER_MAX_VOLUMES) {
97 hmkprintf(hmp, "Max number of HAMMER volumes exceeded\n");
102 error = hammer_format_volume_header(hmp, ioc, &ondisk, free_vol_no);
106 error = hammer_install_volume(hmp, ioc->device_name, NULL, &ondisk);
110 hammer_sync_lock_sh(trans);
111 hammer_lock_ex(&hmp->blkmap_lock);
113 volume = hammer_get_volume(hmp, free_vol_no, &error);
114 KKASSERT(volume != NULL && error == 0);
116 error = hammer_format_freemap(trans, volume);
117 KKASSERT(error == 0);
119 error = hammer_count_bigblocks(hmp, volume,
120 &total_bigblocks, &empty_bigblocks);
121 KKASSERT(error == 0);
122 KKASSERT(total_bigblocks == empty_bigblocks);
124 hammer_rel_volume(volume, 0);
127 error = hammer_update_volumes_header(trans,
128 total_bigblocks, empty_bigblocks);
129 KKASSERT(error == 0);
131 hammer_unlock(&hmp->blkmap_lock);
132 hammer_sync_unlock(trans);
134 KKASSERT(error == 0);
136 hammer_unlock(&hmp->volume_lock);
138 hmkprintf(hmp, "An error occurred: %d\n", error);
147 hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
148 struct hammer_ioc_volume *ioc)
150 struct hammer_mount *hmp = trans->hmp;
151 struct mount *mp = hmp->mp;
152 struct hammer_volume_ondisk ondisk;
153 hammer_volume_t volume;
154 int64_t total_bigblocks, empty_bigblocks;
158 if (mp->mnt_flag & MNT_RDONLY) {
159 hmkprintf(hmp, "Cannot del volume from read-only HAMMER filesystem\n");
163 if (hmp->nvolumes <= 1) {
164 hmkprintf(hmp, "No HAMMER volume to delete\n");
168 if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
169 hmkprintf(hmp, "Another volume operation is in progress!\n");
174 * find volume by volname
177 HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) {
178 volume = hammer_get_volume(hmp, vol_no, &error);
179 KKASSERT(volume != NULL && error == 0);
180 if (strcmp(volume->vol_name, ioc->device_name) == 0) {
183 hammer_rel_volume(volume, 0);
187 if (volume == NULL) {
188 hmkprintf(hmp, "Couldn't find volume\n");
193 if (volume == trans->rootvol) {
194 hmkprintf(hmp, "Cannot remove root-volume\n");
195 hammer_rel_volume(volume, 0);
201 * Reblock filesystem if the volume is not empty
203 hmp->volume_to_remove = volume->vol_no;
205 error = hammer_count_bigblocks(hmp, volume,
206 &total_bigblocks, &empty_bigblocks);
207 KKASSERT(error == 0);
209 if (total_bigblocks == empty_bigblocks) {
210 hmkprintf(hmp, "%s is already empty\n", volume->vol_name);
211 } else if (ioc->flag & HAMMER_IOC_VOLUME_REBLOCK) {
212 error = hammer_do_reblock(trans, ip);
214 hmp->volume_to_remove = -1;
215 hammer_rel_volume(volume, 0);
219 hmkprintf(hmp, "%s is not empty\n", volume->vol_name);
220 hammer_rel_volume(volume, 0);
225 hammer_sync_lock_sh(trans);
226 hammer_lock_ex(&hmp->blkmap_lock);
228 error = hammer_count_bigblocks(hmp, volume,
229 &total_bigblocks, &empty_bigblocks);
230 KKASSERT(error == 0);
232 error = hammer_free_freemap(trans, volume);
234 hmkprintf(hmp, "Failed to free volume: ");
236 kprintf("Volume %d not empty\n", volume->vol_no);
238 kprintf("%d\n", error);
239 hmp->volume_to_remove = -1;
240 hammer_rel_volume(volume, 0);
243 hammer_rel_volume(volume, 0);
246 * XXX: Temporary solution for
247 * http://lists.dragonflybsd.org/pipermail/kernel/2015-August/175027.html
249 hammer_unlock(&hmp->blkmap_lock);
250 hammer_sync_unlock(trans);
251 hammer_flusher_sync(hmp); /* 1 */
252 hammer_flusher_sync(hmp); /* 2 */
253 hammer_flusher_sync(hmp); /* 3 */
254 hammer_sync_lock_sh(trans);
255 hammer_lock_ex(&hmp->blkmap_lock);
260 RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
261 hammer_unload_buffer, volume);
263 bzero(&ondisk, sizeof(ondisk));
264 error = hammer_unload_volume(volume, &ondisk);
266 hmkprintf(hmp, "Failed to unload volume\n");
271 error = hammer_update_volumes_header(trans,
272 -total_bigblocks, -empty_bigblocks);
273 KKASSERT(error == 0);
274 hmp->volume_to_remove = -1;
277 hammer_unlock(&hmp->blkmap_lock);
278 hammer_sync_unlock(trans);
281 hammer_unlock(&hmp->volume_lock);
283 hmkprintf(hmp, "An error occurred: %d\n", error);
289 hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip,
290 struct hammer_ioc_volume_list *ioc)
292 struct hammer_mount *hmp = trans->hmp;
293 hammer_volume_t volume;
297 if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
298 hmkprintf(hmp, "Another volume operation is in progress!\n");
302 HAMMER_VOLUME_NUMBER_FOREACH(hmp, i) {
303 if (cnt >= ioc->nvols)
305 volume = hammer_get_volume(hmp, i, &error);
306 KKASSERT(volume != NULL && error == 0);
308 len = strlen(volume->vol_name) + 1;
309 KKASSERT(len <= MAXPATHLEN);
311 ioc->vols[cnt].vol_no = volume->vol_no;
312 error = copyout(volume->vol_name, ioc->vols[cnt].device_name,
314 hammer_rel_volume(volume, 0);
322 hammer_unlock(&hmp->volume_lock);
328 hammer_do_reblock(hammer_transaction_t trans, hammer_inode_t ip)
330 struct hammer_mount *hmp = trans->hmp;
334 struct hammer_ioc_reblock reblock;
335 bzero(&reblock, sizeof(reblock));
337 vol_no = trans->hmp->volume_to_remove;
338 KKASSERT(vol_no != -1);
340 reblock.key_beg.localization = HAMMER_MIN_LOCALIZATION;
341 reblock.key_beg.obj_id = HAMMER_MIN_OBJID;
342 reblock.key_end.localization = HAMMER_MAX_LOCALIZATION;
343 reblock.key_end.obj_id = HAMMER_MAX_OBJID;
344 reblock.head.flags = HAMMER_IOC_DO_FLAGS;
345 reblock.free_level = 0; /* reblock all big-blocks */
346 reblock.allpfs = 1; /* reblock all PFS */
347 reblock.vol_no = vol_no;
349 hmkprintf(hmp, "reblock started\n");
350 error = hammer_ioc_reblock(trans, ip, &reblock);
352 if (reblock.head.flags & HAMMER_IOC_HEAD_INTR) {
357 if (error == EINTR) {
358 hmkprintf(hmp, "reblock was interrupted\n");
360 hmkprintf(hmp, "reblock failed: %d\n", error);
369 * XXX This somehow needs to stop doing hammer_modify_buffer() for
370 * layer2 entries. In theory adding a large block device could
371 * blow away UNDO fifo. The best way is to format layer2 entries
372 * in userspace without UNDO getting involved before the device is
373 * safely added to the filesystem. HAMMER has no interest in what
374 * has happened to the device before it safely joins the filesystem.
377 hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume)
379 struct hammer_mount *hmp = trans->hmp;
380 struct hammer_volume_ondisk *ondisk;
381 hammer_blockmap_t freemap;
382 hammer_off_t alloc_offset;
383 hammer_off_t phys_offset;
384 hammer_off_t block_offset;
385 hammer_off_t layer1_offset;
386 hammer_off_t layer2_offset;
387 hammer_off_t vol_free_end;
388 hammer_off_t aligned_vol_free_end;
389 struct hammer_blockmap_layer1 *layer1;
390 struct hammer_blockmap_layer2 *layer2;
391 hammer_buffer_t buffer1 = NULL;
392 hammer_buffer_t buffer2 = NULL;
393 int64_t vol_buf_size;
394 int64_t layer1_count = 0;
397 KKASSERT(volume->vol_no != HAMMER_ROOT_VOLNO);
399 ondisk = volume->ondisk;
400 vol_buf_size = ondisk->vol_buf_end - ondisk->vol_buf_beg;
401 vol_free_end = HAMMER_ENCODE_RAW_BUFFER(ondisk->vol_no,
402 vol_buf_size & ~HAMMER_BIGBLOCK_MASK64);
403 aligned_vol_free_end = (vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
404 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
406 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
407 alloc_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
409 hmkprintf(hmp, "Initialize freemap volume %d\n", volume->vol_no);
411 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
412 phys_offset < aligned_vol_free_end;
413 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
414 layer1_offset = freemap->phys_offset +
415 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
416 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
419 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
420 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
421 bzero(layer1, sizeof(*layer1));
422 layer1->phys_offset = alloc_offset;
423 layer1->blocks_free = 0;
424 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
425 hammer_modify_buffer_done(buffer1);
426 alloc_offset += HAMMER_BIGBLOCK_SIZE;
430 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
431 phys_offset < aligned_vol_free_end;
432 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
434 layer1_offset = freemap->phys_offset +
435 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
436 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
439 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
441 for (block_offset = 0;
442 block_offset < HAMMER_BLOCKMAP_LAYER2;
443 block_offset += HAMMER_BIGBLOCK_SIZE) {
444 layer2_offset = layer1->phys_offset +
445 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
446 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
450 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
451 bzero(layer2, sizeof(*layer2));
453 if (phys_offset + block_offset < alloc_offset) {
454 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
455 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
456 layer2->bytes_free = 0;
457 } else if (phys_offset + block_offset < vol_free_end) {
459 layer2->append_off = 0;
460 layer2->bytes_free = HAMMER_BIGBLOCK_SIZE;
463 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
464 layer2->append_off = HAMMER_BIGBLOCK_SIZE;
465 layer2->bytes_free = 0;
468 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
469 hammer_modify_buffer_done(buffer2);
472 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
473 layer1->blocks_free += layer1_count;
474 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
475 hammer_modify_buffer_done(buffer1);
480 hammer_rel_buffer(buffer1, 0);
482 hammer_rel_buffer(buffer2, 0);
488 * XXX This somehow needs to stop doing hammer_modify_buffer() for
489 * layer2 entries. In theory removing a large block device could
490 * blow away UNDO fifo. The best way is to erase layer2 entries
491 * in userspace without UNDO getting involved after the device has
492 * been safely removed from the filesystem. HAMMER has no interest
493 * in what happens to the device once it's safely removed.
496 hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume)
498 struct hammer_mount *hmp = trans->hmp;
499 struct hammer_volume_ondisk *ondisk;
500 hammer_blockmap_t freemap;
501 hammer_off_t phys_offset;
502 hammer_off_t block_offset;
503 hammer_off_t layer1_offset;
504 hammer_off_t layer2_offset;
505 hammer_off_t vol_free_end;
506 hammer_off_t aligned_vol_free_end;
507 struct hammer_blockmap_layer1 *layer1;
508 struct hammer_blockmap_layer2 *layer2;
509 hammer_buffer_t buffer1 = NULL;
510 hammer_buffer_t buffer2 = NULL;
511 int64_t vol_buf_size;
514 KKASSERT(volume->vol_no != HAMMER_ROOT_VOLNO);
516 ondisk = volume->ondisk;
517 vol_buf_size = ondisk->vol_buf_end - ondisk->vol_buf_beg;
518 vol_free_end = HAMMER_ENCODE_RAW_BUFFER(ondisk->vol_no,
519 vol_buf_size & ~HAMMER_BIGBLOCK_MASK64);
520 aligned_vol_free_end = (vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
521 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
523 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
525 hmkprintf(hmp, "Free freemap volume %d\n", volume->vol_no);
527 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
528 phys_offset < aligned_vol_free_end;
529 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
530 layer1_offset = freemap->phys_offset +
531 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
532 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
535 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
537 for (block_offset = 0;
538 block_offset < HAMMER_BLOCKMAP_LAYER2;
539 block_offset += HAMMER_BIGBLOCK_SIZE) {
540 layer2_offset = layer1->phys_offset +
541 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
542 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
546 switch (layer2->zone) {
547 case HAMMER_ZONE_UNDO_INDEX:
549 case HAMMER_ZONE_FREEMAP_INDEX:
550 case HAMMER_ZONE_UNAVAIL_INDEX:
553 KKASSERT(phys_offset + block_offset < aligned_vol_free_end);
554 if (layer2->append_off == 0 &&
555 layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
559 return EBUSY; /* Not empty */
563 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no, 0);
564 phys_offset < aligned_vol_free_end;
565 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
566 layer1_offset = freemap->phys_offset +
567 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
568 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
571 KKASSERT(layer1->phys_offset != HAMMER_BLOCKMAP_UNAVAIL);
573 for (block_offset = 0;
574 block_offset < HAMMER_BLOCKMAP_LAYER2;
575 block_offset += HAMMER_BIGBLOCK_SIZE) {
576 layer2_offset = layer1->phys_offset +
577 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
578 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
582 switch (layer2->zone) {
583 case HAMMER_ZONE_UNDO_INDEX:
586 KKASSERT(phys_offset + block_offset < aligned_vol_free_end);
587 hammer_modify_buffer(trans, buffer2, layer2, sizeof(*layer2));
588 bzero(layer2, sizeof(*layer2));
589 hammer_modify_buffer_done(buffer2);
594 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
595 bzero(layer1, sizeof(*layer1));
596 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
597 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
598 hammer_modify_buffer_done(buffer1);
603 hammer_rel_buffer(buffer1, 0);
605 hammer_rel_buffer(buffer2, 0);
611 hammer_format_volume_header(hammer_mount_t hmp,
612 struct hammer_ioc_volume *ioc,
613 struct hammer_volume_ondisk *ondisk,
616 struct hammer_volume_ondisk *root_ondisk;
619 KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk));
622 * Just copy from the root volume header.
624 root_ondisk = hmp->rootvol->ondisk;
625 bzero(ondisk, sizeof(struct hammer_volume_ondisk));
626 ondisk->vol_fsid = root_ondisk->vol_fsid;
627 ondisk->vol_fstype = root_ondisk->vol_fstype;
628 ksnprintf(ondisk->vol_label, sizeof(ondisk->vol_label), "%s",
629 root_ondisk->vol_label);
630 ondisk->vol_version = root_ondisk->vol_version;
631 ondisk->vol_rootvol = root_ondisk->vol_no;
632 ondisk->vol_signature = root_ondisk->vol_signature;
634 KKASSERT(ondisk->vol_rootvol == HAMMER_ROOT_VOLNO);
635 KKASSERT(ondisk->vol_signature == HAMMER_FSBUF_VOLUME);
638 * Assign the new vol_no and vol_count.
640 ondisk->vol_no = vol_no;
641 ondisk->vol_count = root_ondisk->vol_count + 1;
644 * Reserve space for (future) header junk, copy volume relative
645 * offset from the existing root volume.
647 vol_alloc = root_ondisk->vol_bot_beg;
648 ondisk->vol_bot_beg = vol_alloc;
649 vol_alloc += ioc->boot_area_size;
650 ondisk->vol_mem_beg = vol_alloc;
651 vol_alloc += ioc->mem_area_size;
654 * The remaining area is the zone 2 buffer allocation area.
656 ondisk->vol_buf_beg = vol_alloc;
657 ondisk->vol_buf_end = ioc->vol_size & ~(int64_t)HAMMER_BUFMASK;
659 if (ondisk->vol_buf_end < ondisk->vol_buf_beg) {
660 hmkprintf(hmp, "volume %d is too small to hold the volume header\n",
669 hammer_update_volumes_header(hammer_transaction_t trans,
670 int64_t total_bigblocks, int64_t empty_bigblocks)
672 struct hammer_mount *hmp = trans->hmp;
673 struct mount *mp = hmp->mp;
674 hammer_volume_t volume;
679 * Set each volume's new value of the vol_count field.
681 HAMMER_VOLUME_NUMBER_FOREACH(hmp, vol_no) {
682 volume = hammer_get_volume(hmp, vol_no, &error);
683 KKASSERT(volume != NULL && error == 0);
684 hammer_modify_volume_field(trans, volume, vol_count);
685 volume->ondisk->vol_count = hmp->nvolumes;
686 hammer_modify_volume_done(volume);
689 * Only changes to the header of the root volume
690 * are automatically flushed to disk. For all
691 * other volumes that we modify we do it here.
693 * No interlock is needed, volume buffers are not
694 * messed with by bioops.
696 if (volume != trans->rootvol && volume->io.modified) {
697 hammer_crc_set_volume(volume->ondisk);
698 hammer_io_flush(&volume->io, 0);
701 hammer_rel_volume(volume, 0);
705 * Update the total number of big-blocks.
707 hammer_modify_volume_field(trans, trans->rootvol, vol0_stat_bigblocks);
708 trans->rootvol->ondisk->vol0_stat_bigblocks += total_bigblocks;
709 hammer_modify_volume_done(trans->rootvol);
712 * Big-block count changed so recompute the total number of blocks.
714 mp->mnt_stat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
715 HAMMER_BUFFERS_PER_BIGBLOCK;
716 mp->mnt_vstat.f_blocks = trans->rootvol->ondisk->vol0_stat_bigblocks *
717 HAMMER_BUFFERS_PER_BIGBLOCK;
720 * Update the total number of free big-blocks.
722 hammer_modify_volume_field(trans, trans->rootvol,
723 vol0_stat_freebigblocks);
724 trans->rootvol->ondisk->vol0_stat_freebigblocks += empty_bigblocks;
725 hammer_modify_volume_done(trans->rootvol);
728 * Update the copy in hmp.
730 hmp->copy_stat_freebigblocks =
731 trans->rootvol->ondisk->vol0_stat_freebigblocks;
737 * Count total big-blocks and empty big-blocks within the volume.
738 * The volume must be a non-root volume.
740 * Note that total big-blocks doesn't include big-blocks for layer2
741 * (and obviously layer1 and undomap). This is requirement of the
742 * volume header and this function is to retrieve that information.
745 hammer_count_bigblocks(hammer_mount_t hmp, hammer_volume_t volume,
746 int64_t *total_bigblocks, int64_t *empty_bigblocks)
748 struct hammer_volume_ondisk *ondisk;
749 hammer_blockmap_t freemap;
750 hammer_off_t phys_offset;
751 hammer_off_t block_offset;
752 hammer_off_t layer1_offset;
753 hammer_off_t layer2_offset;
754 hammer_off_t vol_free_end;
755 hammer_off_t aligned_vol_free_end;
756 struct hammer_blockmap_layer1 *layer1;
757 struct hammer_blockmap_layer2 *layer2;
758 hammer_buffer_t buffer1 = NULL;
759 hammer_buffer_t buffer2 = NULL;
760 int64_t vol_buf_size;
765 KKASSERT(volume->vol_no != HAMMER_ROOT_VOLNO);
767 ondisk = volume->ondisk;
768 vol_buf_size = ondisk->vol_buf_end - ondisk->vol_buf_beg;
769 vol_free_end = HAMMER_ENCODE_RAW_BUFFER(ondisk->vol_no,
770 vol_buf_size & ~HAMMER_BIGBLOCK_MASK64);
771 aligned_vol_free_end = (vol_free_end + HAMMER_BLOCKMAP_LAYER2_MASK)
772 & ~HAMMER_BLOCKMAP_LAYER2_MASK;
774 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
776 for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 0);
777 phys_offset < aligned_vol_free_end;
778 phys_offset += HAMMER_BLOCKMAP_LAYER2) {
779 layer1_offset = freemap->phys_offset +
780 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
781 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
785 for (block_offset = 0;
786 block_offset < HAMMER_BLOCKMAP_LAYER2;
787 block_offset += HAMMER_BIGBLOCK_SIZE) {
788 layer2_offset = layer1->phys_offset +
789 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_offset);
790 layer2 = hammer_bread(hmp, layer2_offset, &error, &buffer2);
794 switch (layer2->zone) {
795 case HAMMER_ZONE_UNDO_INDEX:
797 case HAMMER_ZONE_FREEMAP_INDEX:
798 case HAMMER_ZONE_UNAVAIL_INDEX:
801 KKASSERT(phys_offset + block_offset < aligned_vol_free_end);
803 if (layer2->append_off == 0 &&
804 layer2->bytes_free == HAMMER_BIGBLOCK_SIZE)
811 hmkprintf(hmp, "big-blocks total=%jd empty=%jd\n", total, empty);
812 *total_bigblocks = total;
813 *empty_bigblocks = empty;
816 hammer_rel_buffer(buffer1, 0);
818 hammer_rel_buffer(buffer2, 0);