Fix more wrong sizeof() usages, part 1/x
[dragonfly.git] / sys / vfs / hammer / hammer_volume.c
CommitLineData
e27700cf
MN
1/*
2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
90ecab35
MN
5 * by Matthew Dillon <dillon@backplane.com> and
6 * Michael Neumann <mneumann@ntecs.de>
e27700cf
MN
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 */
36
37#include "hammer.h"
90ecab35
MN
38#include <sys/fcntl.h>
39#include <sys/nlookup.h>
40#include <sys/buf.h>
41
54341a3b
MD
42#include <sys/buf2.h>
43
90ecab35 44static int
114d4836
MN
45hammer_setup_device(struct vnode **devvpp, const char *dev_path, int ronly);
46
47static void
48hammer_close_device(struct vnode **devvpp, int ronly);
49
50static int
51hammer_format_volume_header(struct hammer_mount *hmp, struct vnode *devvp,
2c794fb2 52 const char *vol_name, int vol_no, int vol_count,
114d4836
MN
53 int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size);
54
c47d84e8
MN
55static int
56hammer_clear_volume_header(struct vnode *devvp);
57
16b533e8
MN
58struct bigblock_stat {
59 uint64_t total_bigblocks;
60 uint64_t total_free_bigblocks;
61 uint64_t counter;
62};
114d4836 63
865c9609 64static int
16b533e8
MN
65hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume,
66 struct bigblock_stat *stat);
114d4836 67
16b533e8
MN
68static int
69hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume,
70 struct bigblock_stat *stat);
e27700cf
MN
71
72int
d121f61c 73hammer_ioc_volume_add(hammer_transaction_t trans, hammer_inode_t ip,
865c9609 74 struct hammer_ioc_volume *ioc)
e27700cf 75{
90ecab35
MN
76 struct hammer_mount *hmp = trans->hmp;
77 struct mount *mp = hmp->mp;
114d4836 78 hammer_volume_t volume;
90ecab35
MN
79 int error;
80
81 if (mp->mnt_flag & MNT_RDONLY) {
d121f61c 82 kprintf("Cannot add volume to read-only HAMMER filesystem\n");
90ecab35
MN
83 return (EINVAL);
84 }
85
86 if (hmp->nvolumes + 1 >= HAMMER_MAX_VOLUMES) {
87 kprintf("Max number of HAMMER volumes exceeded\n");
88 return (EINVAL);
89 }
90
52e547e3
MN
91 if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
92 kprintf("Another volume operation is in progress!\n");
93 return (EAGAIN);
94 }
95
93d839df
MN
96 /*
97 * Find an unused volume number.
98 */
99 int free_vol_no = 0;
100 while (free_vol_no < HAMMER_MAX_VOLUMES &&
101 RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, free_vol_no)) {
102 ++free_vol_no;
103 }
104 if (free_vol_no >= HAMMER_MAX_VOLUMES) {
105 kprintf("Max number of HAMMER volumes exceeded\n");
52e547e3 106 hammer_unlock(&hmp->volume_lock);
93d839df
MN
107 return (EINVAL);
108 }
109
114d4836 110 struct vnode *devvp = NULL;
d121f61c 111 error = hammer_setup_device(&devvp, ioc->device_name, 0);
114d4836
MN
112 if (error)
113 goto end;
114 KKASSERT(devvp);
90ecab35
MN
115 error = hammer_format_volume_header(
116 hmp,
114d4836 117 devvp,
90ecab35 118 hmp->rootvol->ondisk->vol_name,
93d839df 119 free_vol_no,
90ecab35 120 hmp->nvolumes+1,
d121f61c
MN
121 ioc->vol_size,
122 ioc->boot_area_size,
123 ioc->mem_area_size);
114d4836 124 hammer_close_device(&devvp, 0);
2c794fb2
MN
125 if (error)
126 goto end;
90ecab35 127
d121f61c 128 error = hammer_install_volume(hmp, ioc->device_name, NULL);
2c794fb2
MN
129 if (error)
130 goto end;
90ecab35 131
2c794fb2
MN
132 hammer_sync_lock_sh(trans);
133 hammer_lock_ex(&hmp->blkmap_lock);
134
114d4836
MN
135 ++hmp->nvolumes;
136
2c794fb2
MN
137 /*
138 * Set each volumes new value of the vol_count field.
139 */
140 for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
2c794fb2
MN
141 volume = hammer_get_volume(hmp, vol_no, &error);
142 if (volume == NULL && error == ENOENT) {
143 /*
144 * Skip unused volume numbers
145 */
146 error = 0;
147 continue;
90ecab35 148 }
5ba58ea0 149 KKASSERT(volume != NULL && error == 0);
2c794fb2
MN
150 hammer_modify_volume_field(trans, volume, vol_count);
151 volume->ondisk->vol_count = hmp->nvolumes;
152 hammer_modify_volume_done(volume);
ebc29cdc
MN
153
154 /*
155 * Only changes to the header of the root volume
156 * are automatically flushed to disk. For all
157 * other volumes that we modify we do it here.
77912481
MD
158 *
159 * No interlock is needed, volume buffers are not
160 * messed with by bioops.
ebc29cdc
MN
161 */
162 if (volume != trans->rootvol && volume->io.modified) {
163 hammer_crc_set_volume(volume->ondisk);
164 hammer_io_flush(&volume->io, 0);
165 }
166
2c794fb2
MN
167 hammer_rel_volume(volume, 0);
168 }
90ecab35 169
114d4836
MN
170 volume = hammer_get_volume(hmp, free_vol_no, &error);
171 KKASSERT(volume != NULL && error == 0);
69e6d11c 172
16b533e8
MN
173 struct bigblock_stat stat;
174 error = hammer_format_freemap(trans, volume, &stat);
175 KKASSERT(error == 0);
e6e0a973
MN
176
177 /*
178 * Increase the total number of bigblocks
179 */
865c9609 180 hammer_modify_volume_field(trans, trans->rootvol,
e6e0a973 181 vol0_stat_bigblocks);
16b533e8 182 trans->rootvol->ondisk->vol0_stat_bigblocks += stat.total_bigblocks;
865c9609 183 hammer_modify_volume_done(trans->rootvol);
e6e0a973
MN
184
185 /*
186 * Increase the number of free bigblocks
187 * (including the copy in hmp)
188 */
865c9609 189 hammer_modify_volume_field(trans, trans->rootvol,
e6e0a973 190 vol0_stat_freebigblocks);
16b533e8 191 trans->rootvol->ondisk->vol0_stat_freebigblocks += stat.total_free_bigblocks;
e6e0a973 192 hmp->copy_stat_freebigblocks =
865c9609
MN
193 trans->rootvol->ondisk->vol0_stat_freebigblocks;
194 hammer_modify_volume_done(trans->rootvol);
69e6d11c 195
114d4836 196 hammer_rel_volume(volume, 0);
69e6d11c 197
114d4836
MN
198 hammer_unlock(&hmp->blkmap_lock);
199 hammer_sync_unlock(trans);
69e6d11c 200
52e547e3 201 KKASSERT(error == 0);
114d4836 202end:
52e547e3 203 hammer_unlock(&hmp->volume_lock);
114d4836 204 if (error)
1de3c21d 205 kprintf("An error occurred: %d\n", error);
114d4836
MN
206 return (error);
207}
69e6d11c 208
865c9609
MN
209
210/*
211 * Remove a volume.
212 */
213int
214hammer_ioc_volume_del(hammer_transaction_t trans, hammer_inode_t ip,
215 struct hammer_ioc_volume *ioc)
114d4836 216{
865c9609
MN
217 struct hammer_mount *hmp = trans->hmp;
218 struct mount *mp = hmp->mp;
219 hammer_volume_t volume;
220 int error = 0;
221
222 if (mp->mnt_flag & MNT_RDONLY) {
223 kprintf("Cannot del volume from read-only HAMMER filesystem\n");
224 return (EINVAL);
225 }
226
52e547e3
MN
227 if (hammer_lock_ex_try(&hmp->volume_lock) != 0) {
228 kprintf("Another volume operation is in progress!\n");
229 return (EAGAIN);
230 }
865c9609
MN
231
232 volume = NULL;
233
234 /*
235 * find volume by volname
236 */
237 for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
238 volume = hammer_get_volume(hmp, vol_no, &error);
239 if (volume == NULL && error == ENOENT) {
240 /*
241 * Skip unused volume numbers
242 */
243 error = 0;
244 continue;
245 }
246 KKASSERT(volume != NULL && error == 0);
247 if (strcmp(volume->vol_name, ioc->device_name) == 0) {
248 break;
249 }
07be83b8 250 hammer_rel_volume(volume, 0);
865c9609
MN
251 volume = NULL;
252 }
253
3c6039be 254 if (volume == NULL) {
865c9609 255 kprintf("Couldn't find volume\n");
52e547e3
MN
256 error = EINVAL;
257 goto end;
865c9609
MN
258 }
259
260 if (volume == trans->rootvol) {
261 kprintf("Cannot remove root-volume\n");
262 hammer_rel_volume(volume, 0);
52e547e3
MN
263 error = EINVAL;
264 goto end;
865c9609
MN
265 }
266
267 /*
268 *
269 */
270
271 hmp->volume_to_remove = volume->vol_no;
272
273 struct hammer_ioc_reblock reblock;
274 bzero(&reblock, sizeof(reblock));
275
276 reblock.key_beg.localization = HAMMER_MIN_LOCALIZATION;
277 reblock.key_beg.obj_id = HAMMER_MIN_OBJID;
278 reblock.key_end.localization = HAMMER_MAX_LOCALIZATION;
279 reblock.key_end.obj_id = HAMMER_MAX_OBJID;
bbda1970 280 reblock.head.flags = HAMMER_IOC_DO_FLAGS;
865c9609
MN
281 reblock.free_level = 0;
282
283 error = hammer_ioc_reblock(trans, ip, &reblock);
284
bbda1970
MN
285 if (reblock.head.flags & HAMMER_IOC_HEAD_INTR) {
286 error = EINTR;
287 }
288
865c9609 289 if (error) {
bbda1970
MN
290 if (error == EINTR) {
291 kprintf("reblock was interrupted\n");
292 } else {
293 kprintf("reblock failed: %d\n", error);
294 }
865c9609
MN
295 hmp->volume_to_remove = -1;
296 hammer_rel_volume(volume, 0);
52e547e3 297 goto end;
865c9609
MN
298 }
299
bbda1970
MN
300 /*
301 * Sync filesystem
302 */
303 int count = 0;
304 while (hammer_flusher_haswork(hmp)) {
305 hammer_flusher_sync(hmp);
306 ++count;
307 if (count >= 5) {
308 if (count == 5)
309 kprintf("HAMMER: flushing.");
310 else
311 kprintf(".");
312 tsleep(&count, 0, "hmrufl", hz);
313 }
314 if (count == 30) {
315 kprintf("giving up");
316 break;
317 }
318 }
319 kprintf("\n");
320
865c9609
MN
321 hammer_sync_lock_sh(trans);
322 hammer_lock_ex(&hmp->blkmap_lock);
323
16b533e8
MN
324 /*
325 * We use stat later to update rootvol's bigblock stats
326 */
327 struct bigblock_stat stat;
328 error = hammer_free_freemap(trans, volume, &stat);
865c9609 329 if (error) {
c47d84e8 330 kprintf("Failed to free volume. Volume not empty!\n");
865c9609
MN
331 hmp->volume_to_remove = -1;
332 hammer_rel_volume(volume, 0);
333 hammer_unlock(&hmp->blkmap_lock);
334 hammer_sync_unlock(trans);
52e547e3 335 goto end;
865c9609
MN
336 }
337
338 hmp->volume_to_remove = -1;
865c9609 339
07be83b8
MN
340 hammer_rel_volume(volume, 0);
341
342 /*
343 * Unload buffers
344 */
345 RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
346 hammer_unload_buffer, volume);
347
348 error = hammer_unload_volume(volume, NULL);
865c9609
MN
349 if (error == -1) {
350 kprintf("Failed to unload volume\n");
351 hammer_unlock(&hmp->blkmap_lock);
352 hammer_sync_unlock(trans);
52e547e3 353 goto end;
07be83b8 354 }
865c9609 355
07be83b8 356 volume = NULL;
865c9609
MN
357 --hmp->nvolumes;
358
359 /*
360 * Set each volume's new value of the vol_count field.
361 */
362 for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
363 volume = hammer_get_volume(hmp, vol_no, &error);
364 if (volume == NULL && error == ENOENT) {
365 /*
366 * Skip unused volume numbers
367 */
368 error = 0;
369 continue;
370 }
07be83b8 371
865c9609
MN
372 KKASSERT(volume != NULL && error == 0);
373 hammer_modify_volume_field(trans, volume, vol_count);
374 volume->ondisk->vol_count = hmp->nvolumes;
375 hammer_modify_volume_done(volume);
ebc29cdc
MN
376
377 /*
378 * Only changes to the header of the root volume
379 * are automatically flushed to disk. For all
380 * other volumes that we modify we do it here.
77912481
MD
381 *
382 * No interlock is needed, volume buffers are not
383 * messed with by bioops.
ebc29cdc
MN
384 */
385 if (volume != trans->rootvol && volume->io.modified) {
386 hammer_crc_set_volume(volume->ondisk);
387 hammer_io_flush(&volume->io, 0);
388 }
389
865c9609
MN
390 hammer_rel_volume(volume, 0);
391 }
392
16b533e8
MN
393 /*
394 * Update the total number of bigblocks
395 */
396 hammer_modify_volume_field(trans, trans->rootvol,
397 vol0_stat_bigblocks);
398 trans->rootvol->ondisk->vol0_stat_bigblocks -= stat.total_bigblocks;
399 hammer_modify_volume_done(trans->rootvol);
400
401 /*
402 * Update the number of free bigblocks
403 * (including the copy in hmp)
404 */
405 hammer_modify_volume_field(trans, trans->rootvol,
406 vol0_stat_freebigblocks);
407 trans->rootvol->ondisk->vol0_stat_freebigblocks -= stat.total_free_bigblocks;
408 hmp->copy_stat_freebigblocks =
409 trans->rootvol->ondisk->vol0_stat_freebigblocks;
410 hammer_modify_volume_done(trans->rootvol);
411
412
865c9609
MN
413 hammer_unlock(&hmp->blkmap_lock);
414 hammer_sync_unlock(trans);
415
c47d84e8
MN
416 /*
417 * Erase the volume header of the removed device.
418 *
419 * This is to not accidentally mount the volume again.
420 */
421 struct vnode *devvp = NULL;
422 error = hammer_setup_device(&devvp, ioc->device_name, 0);
423 if (error) {
424 kprintf("Failed to open device: %s\n", ioc->device_name);
52e547e3 425 goto end;
c47d84e8
MN
426 }
427 KKASSERT(devvp);
428 error = hammer_clear_volume_header(devvp);
429 if (error) {
430 kprintf("Failed to clear volume header of device: %s\n",
431 ioc->device_name);
52e547e3 432 goto end;
c47d84e8
MN
433 }
434 hammer_close_device(&devvp, 0);
435
52e547e3
MN
436 KKASSERT(error == 0);
437end:
438 hammer_unlock(&hmp->volume_lock);
439 return (error);
865c9609
MN
440}
441
442
e914c91d
SK
443int
444hammer_ioc_volume_list(hammer_transaction_t trans, hammer_inode_t ip,
445 struct hammer_ioc_volume_list *ioc)
446{
447 struct hammer_mount *hmp = trans->hmp;
448 hammer_volume_t volume;
449 int error = 0;
450 int i, cnt, len;
451
452 for (i = 0, cnt = 0; i < HAMMER_MAX_VOLUMES && cnt < ioc->nvols; i++) {
453 volume = hammer_get_volume(hmp, i, &error);
454 if (volume == NULL && error == ENOENT) {
455 error = 0;
456 continue;
457 }
458 KKASSERT(volume != NULL && error == 0);
459
460 len = strlen(volume->vol_name) + 1;
461 KKASSERT(len <= MAXPATHLEN);
462
463 error = copyout(volume->vol_name, ioc->vols[cnt].device_name,
464 len);
465 if (error) {
466 hammer_rel_volume(volume, 0);
467 return (error);
468 }
469 cnt++;
470 hammer_rel_volume(volume, 0);
471 }
472 ioc->nvols = cnt;
473
474 return (error);
475}
476
865c9609
MN
477/*
478 * Iterate over all usable L1 entries of the volume and
479 * the corresponding L2 entries.
480 */
481static int
482hammer_iterate_l1l2_entries(hammer_transaction_t trans, hammer_volume_t volume,
c47d84e8
MN
483 int (*callback)(hammer_transaction_t, hammer_volume_t, hammer_buffer_t*,
484 struct hammer_blockmap_layer1*, struct hammer_blockmap_layer2*,
485 hammer_off_t, hammer_off_t, void*),
865c9609
MN
486 void *data)
487{
488 struct hammer_mount *hmp = trans->hmp;
489 hammer_blockmap_t freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
114d4836 490 hammer_buffer_t buffer = NULL;
114d4836 491 int error = 0;
69e6d11c 492
865c9609
MN
493 hammer_off_t phys_off;
494 hammer_off_t block_off;
495 hammer_off_t layer1_off;
496 hammer_off_t layer2_off;
497 hammer_off_t aligned_buf_end_off;
498 struct hammer_blockmap_layer1 *layer1;
499 struct hammer_blockmap_layer2 *layer2;
aace59d0 500
114d4836 501 /*
865c9609 502 * Calculate the usable size of the volume, which
114d4836
MN
503 * must be aligned at a bigblock (8 MB) boundary.
504 */
865c9609 505 aligned_buf_end_off = (HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
114d4836 506 (volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
865c9609 507 & ~HAMMER_LARGEBLOCK_MASK64));
114d4836
MN
508
509 /*
865c9609
MN
510 * Iterate the volume's address space in chunks of 4 TB, where each
511 * chunk consists of at least one physically available 8 MB bigblock.
114d4836
MN
512 *
513 * For each chunk we need one L1 entry and one L2 bigblock.
514 * We use the first bigblock of each chunk as L2 block.
515 */
865c9609
MN
516 for (phys_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 0);
517 phys_off < aligned_buf_end_off;
518 phys_off += HAMMER_BLOCKMAP_LAYER2) {
519 for (block_off = 0;
520 block_off < HAMMER_BLOCKMAP_LAYER2;
521 block_off += HAMMER_LARGEBLOCK_SIZE) {
522 layer2_off = phys_off +
523 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_off);
c47d84e8 524 layer2 = hammer_bread(hmp, layer2_off, &error, &buffer);
865c9609
MN
525 if (error)
526 goto end;
527
c47d84e8
MN
528 error = callback(trans, volume, &buffer, NULL,
529 layer2, phys_off, block_off, data);
865c9609
MN
530 if (error)
531 goto end;
532 }
114d4836 533
865c9609
MN
534 layer1_off = freemap->phys_offset +
535 HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_off);
536 layer1 = hammer_bread(hmp, layer1_off, &error, &buffer);
537 if (error)
538 goto end;
69e6d11c 539
c47d84e8
MN
540 error = callback(trans, volume, &buffer, layer1, NULL,
541 phys_off, 0, data);
865c9609
MN
542 if (error)
543 goto end;
114d4836 544 }
69e6d11c 545
865c9609 546end:
114d4836
MN
547 if (buffer) {
548 hammer_rel_buffer(buffer, 0);
549 buffer = NULL;
550 }
90ecab35 551
865c9609 552 return error;
114d4836 553}
2c794fb2 554
865c9609
MN
555
556static int
c47d84e8
MN
557format_callback(hammer_transaction_t trans, hammer_volume_t volume,
558 hammer_buffer_t *bufferp,
865c9609
MN
559 struct hammer_blockmap_layer1 *layer1,
560 struct hammer_blockmap_layer2 *layer2,
561 hammer_off_t phys_off,
c47d84e8 562 hammer_off_t block_off,
865c9609 563 void *data)
114d4836 564{
16b533e8 565 struct bigblock_stat *stat = (struct bigblock_stat*)data;
114d4836 566
c47d84e8
MN
567 /*
568 * Calculate the usable size of the volume, which must be aligned
569 * at a bigblock (8 MB) boundary.
570 */
571 hammer_off_t aligned_buf_end_off;
572 aligned_buf_end_off = (HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
573 (volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
574 & ~HAMMER_LARGEBLOCK_MASK64));
575
865c9609
MN
576 if (layer1) {
577 KKASSERT(layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL);
114d4836 578
865c9609 579 hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1));
8075c3b8 580 bzero(layer1, sizeof(*layer1));
865c9609 581 layer1->phys_offset = phys_off;
16b533e8 582 layer1->blocks_free = stat->counter;
865c9609
MN
583 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
584 hammer_modify_buffer_done(*bufferp);
114d4836 585
16b533e8
MN
586 stat->total_free_bigblocks += stat->counter;
587 stat->counter = 0; /* reset */
865c9609 588 } else if (layer2) {
114d4836
MN
589 hammer_modify_buffer(trans, *bufferp, layer2, sizeof(*layer2));
590 bzero(layer2, sizeof(*layer2));
591
c47d84e8 592 if (block_off == 0) {
114d4836
MN
593 /*
594 * The first entry represents the L2 bigblock itself.
595 */
c47d84e8 596 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
114d4836
MN
597 layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
598 layer2->bytes_free = 0;
16b533e8 599 ++stat->total_bigblocks;
c47d84e8 600 } else if (phys_off + block_off < aligned_buf_end_off) {
865c9609
MN
601 /*
602 * Available bigblock
603 */
c47d84e8 604 layer2->zone = 0;
114d4836
MN
605 layer2->append_off = 0;
606 layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
16b533e8
MN
607 ++stat->total_bigblocks;
608 ++stat->counter;
c47d84e8 609 } else {
114d4836 610 /*
c47d84e8
MN
611 * Bigblock outside of physically available
612 * space
114d4836 613 */
c47d84e8 614 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
114d4836
MN
615 layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
616 layer2->bytes_free = 0;
617 }
865c9609 618
114d4836 619 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
865c9609
MN
620 hammer_modify_buffer_done(*bufferp);
621 } else {
622 KKASSERT(0);
623 }
624
625 return 0;
626}
114d4836 627
16b533e8
MN
628static int
629hammer_format_freemap(hammer_transaction_t trans, hammer_volume_t volume,
630 struct bigblock_stat *stat)
865c9609 631{
16b533e8
MN
632 stat->total_bigblocks = 0;
633 stat->total_free_bigblocks = 0;
634 stat->counter = 0;
635 return hammer_iterate_l1l2_entries(trans, volume, format_callback, stat);
865c9609
MN
636}
637
638static int
c47d84e8
MN
639free_callback(hammer_transaction_t trans, hammer_volume_t volume __unused,
640 hammer_buffer_t *bufferp,
865c9609
MN
641 struct hammer_blockmap_layer1 *layer1,
642 struct hammer_blockmap_layer2 *layer2,
643 hammer_off_t phys_off,
c47d84e8 644 hammer_off_t block_off __unused,
3c6039be 645 void *data)
865c9609 646{
16b533e8
MN
647 struct bigblock_stat *stat = (struct bigblock_stat*)data;
648
3c6039be
MN
649 /*
650 * No modifications to ondisk structures
651 */
16b533e8 652 int testonly = (stat == NULL);
3c6039be 653
865c9609 654 if (layer1) {
c47d84e8
MN
655 if (layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL) {
656 /*
657 * This layer1 entry is already free.
658 */
659 return 0;
660 }
661
bbda1970
MN
662 KKASSERT((int)HAMMER_VOL_DECODE(layer1->phys_offset) ==
663 trans->hmp->volume_to_remove);
664
3c6039be
MN
665 if (testonly)
666 return 0;
667
865c9609
MN
668 /*
669 * Free the L1 entry
670 */
671 hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1));
8075c3b8 672 bzero(layer1, sizeof(*layer1));
865c9609
MN
673 layer1->phys_offset = HAMMER_BLOCKMAP_UNAVAIL;
674 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
114d4836 675 hammer_modify_buffer_done(*bufferp);
865c9609
MN
676
677 return 0;
678 } else if (layer2) {
16b533e8 679 if (layer2->zone == HAMMER_ZONE_UNAVAIL_INDEX) {
865c9609 680 return 0;
16b533e8
MN
681 }
682
683 if (layer2->zone == HAMMER_ZONE_FREEMAP_INDEX) {
684 if (stat) {
685 ++stat->total_bigblocks;
686 }
687 return 0;
688 }
c47d84e8
MN
689
690 if (layer2->append_off == 0 &&
16b533e8
MN
691 layer2->bytes_free == HAMMER_LARGEBLOCK_SIZE) {
692 if (stat) {
693 ++stat->total_bigblocks;
694 ++stat->total_free_bigblocks;
695 }
c47d84e8 696 return 0;
16b533e8
MN
697 }
698
c47d84e8
MN
699 /*
700 * We found a layer2 entry that is not empty!
701 */
702 return EBUSY;
865c9609
MN
703 } else {
704 KKASSERT(0);
90ecab35 705 }
114d4836 706
865c9609 707 return EINVAL;
114d4836
MN
708}
709
865c9609 710static int
16b533e8
MN
711hammer_free_freemap(hammer_transaction_t trans, hammer_volume_t volume,
712 struct bigblock_stat *stat)
114d4836 713{
3c6039be 714 int error;
16b533e8
MN
715
716 stat->total_bigblocks = 0;
717 stat->total_free_bigblocks = 0;
718 stat->counter = 0;
719
720 error = hammer_iterate_l1l2_entries(trans, volume, free_callback, NULL);
3c6039be
MN
721 if (error)
722 return error;
723
16b533e8 724 error = hammer_iterate_l1l2_entries(trans, volume, free_callback, stat);
3c6039be 725 return error;
90ecab35
MN
726}
727
865c9609
MN
728/************************************************************************
729 * MISC *
730 ************************************************************************
731 */
732
90ecab35 733static int
114d4836 734hammer_setup_device(struct vnode **devvpp, const char *dev_path, int ronly)
90ecab35 735{
90ecab35 736 int error;
114d4836 737 struct nlookupdata nd;
90ecab35
MN
738
739 /*
740 * Get the device vnode
741 */
114d4836
MN
742 if (*devvpp == NULL) {
743 error = nlookup_init(&nd, dev_path, UIO_SYSSPACE, NLC_FOLLOW);
744 if (error == 0)
745 error = nlookup(&nd);
746 if (error == 0)
747 error = cache_vref(&nd.nl_nch, nd.nl_cred, devvpp);
748 nlookup_done(&nd);
749 } else {
750 error = 0;
751 }
90ecab35
MN
752
753 if (error == 0) {
114d4836
MN
754 if (vn_isdisk(*devvpp, &error)) {
755 error = vfs_mountedon(*devvpp);
90ecab35
MN
756 }
757 }
8be7edad 758 if (error == 0 && vcount(*devvpp) > 0)
90ecab35 759 error = EBUSY;
90ecab35 760 if (error == 0) {
114d4836
MN
761 vn_lock(*devvpp, LK_EXCLUSIVE | LK_RETRY);
762 error = vinvalbuf(*devvpp, V_SAVE, 0, 0);
90ecab35 763 if (error == 0) {
114d4836
MN
764 error = VOP_OPEN(*devvpp,
765 (ronly ? FREAD : FREAD|FWRITE),
766 FSCRED, NULL);
90ecab35 767 }
114d4836 768 vn_unlock(*devvpp);
90ecab35 769 }
114d4836
MN
770 if (error && *devvpp) {
771 vrele(*devvpp);
772 *devvpp = NULL;
90ecab35 773 }
114d4836
MN
774 return (error);
775}
776
777static void
778hammer_close_device(struct vnode **devvpp, int ronly)
779{
114d4836
MN
780 if (*devvpp) {
781 vinvalbuf(*devvpp, ronly ? 0 : V_SAVE, 0, 0);
dfec479f 782 VOP_CLOSE(*devvpp, (ronly ? FREAD : FREAD|FWRITE));
114d4836
MN
783 vrele(*devvpp);
784 *devvpp = NULL;
785 }
786}
787
788static int
789hammer_format_volume_header(struct hammer_mount *hmp, struct vnode *devvp,
790 const char *vol_name, int vol_no, int vol_count,
791 int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size)
792{
793 struct buf *bp = NULL;
794 struct hammer_volume_ondisk *ondisk;
795 int error;
90ecab35
MN
796
797 /*
798 * Extract the volume number from the volume header and do various
799 * sanity checks.
800 */
801 KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk));
802 error = bread(devvp, 0LL, HAMMER_BUFSIZE, &bp);
803 if (error || bp->b_bcount < sizeof(struct hammer_volume_ondisk))
804 goto late_failure;
805
806 ondisk = (struct hammer_volume_ondisk*) bp->b_data;
807
808 /*
809 * Note that we do NOT allow to use a device that contains
810 * a valid HAMMER signature. It has to be cleaned up with dd
811 * before.
812 */
813 if (ondisk->vol_signature == HAMMER_FSBUF_VOLUME) {
d121f61c 814 kprintf("hammer_volume_add: Formatting of valid HAMMER volume "
90ecab35
MN
815 "%s denied. Erase with dd!\n", vol_name);
816 error = EFTYPE;
817 goto late_failure;
818 }
819
820 bzero(ondisk, sizeof(struct hammer_volume_ondisk));
821 ksnprintf(ondisk->vol_name, sizeof(ondisk->vol_name), "%s", vol_name);
822 ondisk->vol_fstype = hmp->rootvol->ondisk->vol_fstype;
823 ondisk->vol_signature = HAMMER_FSBUF_VOLUME;
824 ondisk->vol_fsid = hmp->fsid;
825 ondisk->vol_rootvol = hmp->rootvol->vol_no;
826 ondisk->vol_no = vol_no;
827 ondisk->vol_count = vol_count;
828 ondisk->vol_version = hmp->version;
829
830 /*
831 * Reserve space for (future) header junk, setup our poor-man's
832 * bigblock allocator.
833 */
834 int64_t vol_alloc = HAMMER_BUFSIZE * 16;
835
836 ondisk->vol_bot_beg = vol_alloc;
837 vol_alloc += boot_area_size;
838 ondisk->vol_mem_beg = vol_alloc;
839 vol_alloc += mem_area_size;
840
841 /*
842 * The remaining area is the zone 2 buffer allocation area. These
843 * buffers
844 */
845 ondisk->vol_buf_beg = vol_alloc;
846 ondisk->vol_buf_end = vol_size & ~(int64_t)HAMMER_BUFMASK;
847
848 if (ondisk->vol_buf_end < ondisk->vol_buf_beg) {
849 kprintf("volume %d %s is too small to hold the volume header",
850 ondisk->vol_no, ondisk->vol_name);
851 error = EFTYPE;
852 goto late_failure;
853 }
854
855 ondisk->vol_nblocks = (ondisk->vol_buf_end - ondisk->vol_buf_beg) /
856 HAMMER_BUFSIZE;
857 ondisk->vol_blocksize = HAMMER_BUFSIZE;
858
859 /*
860 * Write volume header to disk
861 */
862 error = bwrite(bp);
863 bp = NULL;
864
865late_failure:
866 if (bp)
867 brelse(bp);
90ecab35 868 return (error);
e27700cf 869}
c47d84e8
MN
870
871/*
872 * Invalidates the volume header. Used by volume-del.
873 */
874static int
875hammer_clear_volume_header(struct vnode *devvp)
876{
877 struct buf *bp = NULL;
878 struct hammer_volume_ondisk *ondisk;
879 int error;
880
c47d84e8
MN
881 KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk));
882 error = bread(devvp, 0LL, HAMMER_BUFSIZE, &bp);
883 if (error || bp->b_bcount < sizeof(struct hammer_volume_ondisk))
884 goto late_failure;
885
886 ondisk = (struct hammer_volume_ondisk*) bp->b_data;
887 bzero(ondisk, sizeof(struct hammer_volume_ondisk));
888
c47d84e8
MN
889 error = bwrite(bp);
890 bp = NULL;
891
892late_failure:
893 if (bp)
894 brelse(bp);
895 return (error);
896}