2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com> and
6 * Michael Neumann <mneumann@ntecs.de>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 #include <sys/fcntl.h>
39 #include <sys/nlookup.h>
43 hammer_format_volume_header(struct hammer_mount *hmp, const char *dev_path,
44 const char *vol_name, int vol_no, int vol_count,
45 int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size,
46 uint64_t *num_layer1_entries_p, uint64_t *layer1_free_blocks);
49 hammer_ioc_expand(hammer_transaction_t trans, hammer_inode_t ip,
50 struct hammer_ioc_expand *expand)
52 struct hammer_mount *hmp = trans->hmp;
53 struct mount *mp = hmp->mp;
56 if (mp->mnt_flag & MNT_RDONLY) {
57 kprintf("Cannot expand read-only HAMMER filesystem\n");
61 if (hmp->nvolumes + 1 >= HAMMER_MAX_VOLUMES) {
62 kprintf("Max number of HAMMER volumes exceeded\n");
67 * Find an unused volume number.
70 while (free_vol_no < HAMMER_MAX_VOLUMES &&
71 RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, free_vol_no)) {
74 if (free_vol_no >= HAMMER_MAX_VOLUMES) {
75 kprintf("Max number of HAMMER volumes exceeded\n");
79 uint64_t num_layer1_entries = 0;
80 uint64_t *layer1_free_blocks =
81 kmalloc(1024 * sizeof(uint64_t), M_TEMP, M_WAITOK|M_ZERO);
83 error = hammer_format_volume_header(
86 hmp->rootvol->ondisk->vol_name,
90 expand->boot_area_size,
91 expand->mem_area_size,
92 &num_layer1_entries /* out param */,
94 KKASSERT(num_layer1_entries < 1024);
98 error = hammer_install_volume(hmp, expand->device_name, NULL);
104 hammer_sync_lock_sh(trans);
105 hammer_lock_ex(&hmp->blkmap_lock);
108 * Set each volumes new value of the vol_count field.
110 for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
111 hammer_volume_t volume;
112 volume = hammer_get_volume(hmp, vol_no, &error);
113 if (volume == NULL && error == ENOENT) {
115 * Skip unused volume numbers
120 KKASSERT(error == 0);
121 hammer_modify_volume_field(trans, volume, vol_count);
122 volume->ondisk->vol_count = hmp->nvolumes;
123 hammer_modify_volume_done(volume);
124 hammer_rel_volume(volume, 0);
128 * Assign Layer1 entries
131 hammer_volume_t root_volume = NULL;
132 hammer_blockmap_t freemap;
134 freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
135 root_volume = hammer_get_root_volume(hmp, &error);
136 KKASSERT(root_volume && error == 0);
138 for (uint64_t i_layer1 = 0; i_layer1 < num_layer1_entries; i_layer1++) {
139 hammer_buffer_t buffer1 = NULL;
140 struct hammer_blockmap_layer1 *layer1;
141 hammer_off_t layer1_offset;
143 layer1_offset = freemap->phys_offset +
144 (free_vol_no * 1024L) *
145 sizeof(struct hammer_blockmap_layer1) + i_layer1;
147 layer1 = hammer_bread(hmp, layer1_offset, &error, &buffer1);
148 KKASSERT(layer1 != NULL && error == 0);
149 KKASSERT(layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL);
151 hammer_modify_buffer(trans, buffer1, layer1, sizeof(*layer1));
152 bzero(layer1, sizeof(*layer1));
153 layer1->phys_offset = HAMMER_ENCODE_RAW_BUFFER(free_vol_no,
154 i_layer1 * HAMMER_LARGEBLOCK_SIZE);
156 layer1->blocks_free = layer1_free_blocks[i_layer1];
157 layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
159 hammer_modify_buffer_done(buffer1);
161 hammer_rel_buffer(buffer1, 0);
163 hammer_modify_volume_field(trans, root_volume,
164 vol0_stat_freebigblocks);
166 root_volume->ondisk->vol0_stat_freebigblocks +=
167 layer1_free_blocks[i_layer1];
168 hmp->copy_stat_freebigblocks =
169 root_volume->ondisk->vol0_stat_freebigblocks;
170 hammer_modify_volume_done(root_volume);
173 hammer_rel_volume(root_volume, 0);
175 hammer_unlock(&hmp->blkmap_lock);
176 hammer_sync_unlock(trans);
180 kprintf("An error occured: %d\n", error);
182 if (layer1_free_blocks)
183 kfree(layer1_free_blocks, M_TEMP);
188 hammer_format_volume_header(struct hammer_mount *hmp, const char *dev_path,
189 const char *vol_name, int vol_no, int vol_count,
190 int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size,
191 uint64_t *num_layer1_entries_p, uint64_t *layer1_free_blocks)
193 struct vnode *devvp = NULL;
194 struct buf *bp = NULL;
195 struct nlookupdata nd;
196 struct hammer_volume_ondisk *ondisk;
200 * Get the device vnode
202 error = nlookup_init(&nd, dev_path, UIO_SYSSPACE, NLC_FOLLOW);
204 error = nlookup(&nd);
206 error = cache_vref(&nd.nl_nch, nd.nl_cred, &devvp);
210 if (vn_isdisk(devvp, &error)) {
211 error = vfs_mountedon(devvp);
215 count_udev(devvp->v_umajor, devvp->v_uminor) > 0) {
219 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
220 error = vinvalbuf(devvp, V_SAVE, 0, 0);
222 error = VOP_OPEN(devvp, FREAD|FWRITE, FSCRED, NULL);
233 * Extract the volume number from the volume header and do various
236 KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk));
237 error = bread(devvp, 0LL, HAMMER_BUFSIZE, &bp);
238 if (error || bp->b_bcount < sizeof(struct hammer_volume_ondisk))
241 ondisk = (struct hammer_volume_ondisk*) bp->b_data;
244 * Note that we do NOT allow to use a device that contains
245 * a valid HAMMER signature. It has to be cleaned up with dd
248 if (ondisk->vol_signature == HAMMER_FSBUF_VOLUME) {
249 kprintf("hammer_expand: Formatting of valid HAMMER volume "
250 "%s denied. Erase with dd!\n", vol_name);
255 bzero(ondisk, sizeof(struct hammer_volume_ondisk));
256 ksnprintf(ondisk->vol_name, sizeof(ondisk->vol_name), "%s", vol_name);
257 ondisk->vol_fstype = hmp->rootvol->ondisk->vol_fstype;
258 ondisk->vol_signature = HAMMER_FSBUF_VOLUME;
259 ondisk->vol_fsid = hmp->fsid;
260 ondisk->vol_rootvol = hmp->rootvol->vol_no;
261 ondisk->vol_no = vol_no;
262 ondisk->vol_count = vol_count;
263 ondisk->vol_version = hmp->version;
266 * Reserve space for (future) header junk, setup our poor-man's
267 * bigblock allocator.
269 int64_t vol_alloc = HAMMER_BUFSIZE * 16;
271 ondisk->vol_bot_beg = vol_alloc;
272 vol_alloc += boot_area_size;
273 ondisk->vol_mem_beg = vol_alloc;
274 vol_alloc += mem_area_size;
277 * The remaining area is the zone 2 buffer allocation area. These
280 ondisk->vol_buf_beg = vol_alloc;
281 ondisk->vol_buf_end = vol_size & ~(int64_t)HAMMER_BUFMASK;
283 if (ondisk->vol_buf_end < ondisk->vol_buf_beg) {
284 kprintf("volume %d %s is too small to hold the volume header",
285 ondisk->vol_no, ondisk->vol_name);
290 ondisk->vol_nblocks = (ondisk->vol_buf_end - ondisk->vol_buf_beg) /
292 ondisk->vol_blocksize = HAMMER_BUFSIZE;
295 * Write volume header to disk
301 * Initialize layer2 freemap
305 * Determine the number of L1 entries we need to represent the
306 * space of the whole volume. Each L1 entry covers 4 TB of space
307 * (8MB * 2**19) and we need one L2 big block for each L1 entry.
308 * L1 entries are stored in the root volume.
310 hammer_off_t off_end = (ondisk->vol_buf_end - ondisk->vol_buf_beg)
311 & ~HAMMER_LARGEBLOCK_MASK64;
312 uint64_t num_layer1_entries = (off_end / HAMMER_BLOCKMAP_LAYER2) +
313 ((off_end & HAMMER_BLOCKMAP_LAYER2_MASK) == 0 ? 0 : 1);
314 *num_layer1_entries_p = num_layer1_entries;
317 * We allocate all L2 big blocks sequentially from the start of
320 KKASSERT(off_end / HAMMER_LARGEBLOCK_SIZE >= num_layer1_entries);
322 hammer_off_t layer2_end = num_layer1_entries * HAMMER_LARGEBLOCK_SIZE;
323 hammer_off_t off = 0;
324 while (off < layer2_end) {
325 error = bread(devvp, ondisk->vol_buf_beg + off,
326 HAMMER_BUFSIZE, &bp);
327 if (error || bp->b_bcount != HAMMER_BUFSIZE)
329 struct hammer_blockmap_layer2 *layer2 = (void*)bp->b_data;
331 for (int i = 0; i < HAMMER_BUFSIZE / sizeof(*layer2); ++i) {
333 /* the bigblock described by the layer2 entry */
334 hammer_off_t bigblock_off = HAMMER_LARGEBLOCK_SIZE *
335 (off / sizeof(*layer2));
338 * To which layer1 entry does the current layer2
341 * We need this to calculate the free bigblocks
342 * which is required for the layer1.
344 uint64_t i_layer1 = HAMMER_BLOCKMAP_LAYER1_OFFSET(off) /
345 sizeof(struct hammer_blockmap_layer1);
346 KKASSERT(i_layer1 < 1024);
348 bzero(layer2, sizeof(*layer2));
350 if ((off & HAMMER_LARGEBLOCK_SIZE) == bigblock_off) {
352 * Bigblock is part of the layer2 freemap
354 layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
355 layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
356 layer2->bytes_free = 0;
357 } else if (bigblock_off < off_end) {
359 layer2->append_off = 0;
360 layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
361 ++layer1_free_blocks[i_layer1];
363 layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
364 layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
365 layer2->bytes_free = 0;
367 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
368 off += sizeof(*layer2);
381 VOP_CLOSE(devvp, FREAD|FWRITE);