Merge branch 'master' of ssh://crater.dragonflybsd.org/repository/git/dragonfly
[dragonfly.git] / sys / vfs / hammer / hammer_expand.c
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com> and
6  * Michael Neumann <mneumann@ntecs.de>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  */
36
37 #include "hammer.h"
38 #include <sys/fcntl.h>
39 #include <sys/nlookup.h>
40 #include <sys/buf.h>
41
42 static int
43 hammer_setup_device(struct vnode **devvpp, const char *dev_path, int ronly);
44
45 static void
46 hammer_close_device(struct vnode **devvpp, int ronly);
47
48 static int
49 hammer_format_volume_header(struct hammer_mount *hmp, struct vnode *devvp,
50         const char *vol_name, int vol_no, int vol_count,
51         int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size);
52
53 static int
54 hammer_format_freemap(struct hammer_mount *hmp,
55         hammer_transaction_t trans,
56         hammer_volume_t volume,
57         hammer_volume_t root_volume);
58
59 static uint64_t
60 hammer_format_layer2_chunk(struct hammer_mount *hmp,
61         hammer_transaction_t trans,
62         hammer_off_t phys_offset,
63         hammer_off_t aligned_buf_end_off,
64         hammer_buffer_t *bufferp,
65         int *errorp);
66
67 static void
68 hammer_set_layer1_entry(struct hammer_mount *hmp,
69         hammer_transaction_t trans,
70         hammer_off_t phys_offset,
71         uint64_t free_bigblocks,
72         hammer_blockmap_t freemap,
73         hammer_buffer_t *bufferp,
74         int *errorp);
75
76 int
77 hammer_ioc_expand(hammer_transaction_t trans, hammer_inode_t ip,
78                 struct hammer_ioc_expand *expand)
79 {
80         struct hammer_mount *hmp = trans->hmp;
81         struct mount *mp = hmp->mp;
82         hammer_volume_t volume;
83         hammer_volume_t root_volume;
84         int error;
85
86         if (mp->mnt_flag & MNT_RDONLY) {
87                 kprintf("Cannot expand read-only HAMMER filesystem\n");
88                 return (EINVAL);
89         }
90
91         if (hmp->nvolumes + 1 >= HAMMER_MAX_VOLUMES) {
92                 kprintf("Max number of HAMMER volumes exceeded\n");
93                 return (EINVAL);
94         }
95
96         /*
97          * Find an unused volume number.
98          */
99         int free_vol_no = 0;
100         while (free_vol_no < HAMMER_MAX_VOLUMES &&
101                RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, free_vol_no)) {
102                 ++free_vol_no;
103         }
104         if (free_vol_no >= HAMMER_MAX_VOLUMES) {
105                 kprintf("Max number of HAMMER volumes exceeded\n");
106                 return (EINVAL);
107         }
108
109         struct vnode *devvp = NULL;
110         error = hammer_setup_device(&devvp, expand->device_name, 0);
111         if (error)
112                 goto end;
113         KKASSERT(devvp);
114         error = hammer_format_volume_header(
115                 hmp,
116                 devvp,
117                 hmp->rootvol->ondisk->vol_name,
118                 free_vol_no,
119                 hmp->nvolumes+1,
120                 expand->vol_size,
121                 expand->boot_area_size,
122                 expand->mem_area_size);
123         hammer_close_device(&devvp, 0);
124         if (error)
125                 goto end;
126
127         error = hammer_install_volume(hmp, expand->device_name, NULL);
128         if (error)
129                 goto end;
130
131         hammer_sync_lock_sh(trans);
132         hammer_lock_ex(&hmp->blkmap_lock);
133
134         ++hmp->nvolumes;
135
136         /*
137          * Set each volumes new value of the vol_count field.
138          */
139         for (int vol_no = 0; vol_no < HAMMER_MAX_VOLUMES; ++vol_no) {
140                 volume = hammer_get_volume(hmp, vol_no, &error);
141                 if (volume == NULL && error == ENOENT) {
142                         /*
143                          * Skip unused volume numbers
144                          */
145                         error = 0;
146                         continue;
147                 }
148                 KKASSERT(error == 0);
149                 hammer_modify_volume_field(trans, volume, vol_count);
150                 volume->ondisk->vol_count = hmp->nvolumes;
151                 hammer_modify_volume_done(volume);
152                 hammer_rel_volume(volume, 0);
153         }
154
155         volume = hammer_get_volume(hmp, free_vol_no, &error);
156         KKASSERT(volume != NULL && error == 0);
157
158         root_volume = hammer_get_root_volume(hmp, &error);
159         KKASSERT(root_volume && error == 0);
160
161         error = hammer_format_freemap(hmp, trans, volume, root_volume);
162
163         hammer_rel_volume(root_volume, 0);
164         hammer_rel_volume(volume, 0);
165
166         hammer_unlock(&hmp->blkmap_lock);
167         hammer_sync_unlock(trans);
168
169 end:
170         if (error)
171                 kprintf("An error occurred: %d\n", error);
172         return (error);
173 }
174
175 static int
176 hammer_format_freemap(struct hammer_mount *hmp,
177         hammer_transaction_t trans,
178         hammer_volume_t volume,
179         hammer_volume_t root_volume)
180 {
181         hammer_off_t phys_offset;
182         hammer_buffer_t buffer = NULL;
183         hammer_blockmap_t freemap;
184         hammer_off_t aligned_buf_end_off;
185         uint64_t free_bigblocks;
186         int error = 0;
187
188         /*
189          * Calculate the usable size of the new volume, which
190          * must be aligned at a bigblock (8 MB) boundary.
191          */
192         aligned_buf_end_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
193                 (volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
194                 & ~HAMMER_LARGEBLOCK_MASK64);
195
196         freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
197
198         /*
199          * Iterate the volume's address space in chunks of 4 TB,
200          * where each chunk consists of at least one physically
201          * available 8 MB bigblock.
202          *
203          * For each chunk we need one L1 entry and one L2 bigblock.
204          * We use the first bigblock of each chunk as L2 block.
205          */
206         for (phys_offset = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 0);
207              phys_offset < aligned_buf_end_off;
208              phys_offset += HAMMER_BLOCKMAP_LAYER2) {
209
210                 free_bigblocks = hammer_format_layer2_chunk(hmp, trans,
211                         phys_offset, aligned_buf_end_off, &buffer, &error);
212                 KKASSERT(error == 0);
213
214                 hammer_set_layer1_entry(hmp, trans, phys_offset,
215                         free_bigblocks, freemap, &buffer, &error);
216                 KKASSERT(error == 0);
217
218                 /*
219                  * Increase the total number of bigblocks
220                  */
221                 hammer_modify_volume_field(trans, root_volume,
222                         vol0_stat_bigblocks);
223                 root_volume->ondisk->vol0_stat_bigblocks += free_bigblocks;
224                 hammer_modify_volume_done(root_volume);
225
226                 /*
227                  * Increase the number of free bigblocks
228                  * (including the copy in hmp)
229                  */
230                 hammer_modify_volume_field(trans, root_volume,
231                         vol0_stat_freebigblocks);
232                 root_volume->ondisk->vol0_stat_freebigblocks += free_bigblocks;
233                 hmp->copy_stat_freebigblocks =
234                         root_volume->ondisk->vol0_stat_freebigblocks;
235                 hammer_modify_volume_done(root_volume);
236         }
237
238         if (buffer) {
239                 hammer_rel_buffer(buffer, 0);
240                 buffer = NULL;
241         }
242
243         return (error);
244 }
245
246 /*
247  * Format the L2 bigblock representing a 4 TB chunk.
248  *
249  * Returns the number of free bigblocks.
250  */
251 static uint64_t
252 hammer_format_layer2_chunk(struct hammer_mount *hmp,
253         hammer_transaction_t trans,
254         hammer_off_t phys_offset,
255         hammer_off_t aligned_buf_end_off,
256         hammer_buffer_t *bufferp,
257         int *errorp)
258 {
259         uint64_t free_bigblocks = 0;
260         hammer_off_t block_off;
261         hammer_off_t layer2_offset;
262         struct hammer_blockmap_layer2 *layer2;
263
264         for (block_off = 0;
265              block_off < HAMMER_BLOCKMAP_LAYER2;
266              block_off += HAMMER_LARGEBLOCK_SIZE) {
267                 layer2_offset = phys_offset +
268                                 HAMMER_BLOCKMAP_LAYER2_OFFSET(block_off);
269                 layer2 = hammer_bread(hmp, layer2_offset, errorp, bufferp);
270                 if (*errorp)
271                         return free_bigblocks;
272
273                 KKASSERT(layer2);
274
275                 hammer_modify_buffer(trans, *bufferp, layer2, sizeof(*layer2));
276                 bzero(layer2, sizeof(*layer2));
277
278                 if (block_off == 0) {
279                         /*
280                          * The first entry represents the L2 bigblock itself.
281                          */
282                         layer2->zone = HAMMER_ZONE_FREEMAP_INDEX;
283                         layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
284                         layer2->bytes_free = 0;
285                 } else if (phys_offset + block_off < aligned_buf_end_off) {
286                         layer2->zone = 0;
287                         layer2->append_off = 0;
288                         layer2->bytes_free = HAMMER_LARGEBLOCK_SIZE;
289                         ++free_bigblocks;
290                 } else {
291                         /*
292                          * Bigblock outside of physically available space
293                          */
294                         layer2->zone = HAMMER_ZONE_UNAVAIL_INDEX;
295                         layer2->append_off = HAMMER_LARGEBLOCK_SIZE;
296                         layer2->bytes_free = 0;
297                 }
298                 layer2->entry_crc = crc32(layer2, HAMMER_LAYER2_CRCSIZE);
299
300                 hammer_modify_buffer_done(*bufferp);
301         }
302
303         return free_bigblocks;
304 }
305
306 static void
307 hammer_set_layer1_entry(struct hammer_mount *hmp,
308         hammer_transaction_t trans,
309         hammer_off_t phys_offset,
310         uint64_t free_bigblocks,
311         hammer_blockmap_t freemap,
312         hammer_buffer_t *bufferp,
313         int *errorp)
314 {
315         struct hammer_blockmap_layer1 *layer1;
316         hammer_off_t layer1_offset;
317
318         layer1_offset = freemap->phys_offset +
319                         HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_offset);
320         layer1 = hammer_bread(hmp, layer1_offset, errorp, bufferp);
321         if (*errorp)
322                 return;
323         KKASSERT(layer1);
324         KKASSERT(layer1->phys_offset == HAMMER_BLOCKMAP_UNAVAIL);
325
326         hammer_modify_buffer(trans, *bufferp, layer1, sizeof(*layer1));
327         bzero(layer1, sizeof(*layer1));
328         layer1->phys_offset = phys_offset;
329         layer1->blocks_free = free_bigblocks;
330         layer1->layer1_crc = crc32(layer1, HAMMER_LAYER1_CRCSIZE);
331
332         hammer_modify_buffer_done(*bufferp);
333 }
334
335 static int
336 hammer_setup_device(struct vnode **devvpp, const char *dev_path, int ronly)
337 {
338         int error;
339         struct nlookupdata nd;
340
341         /*
342          * Get the device vnode
343          */
344         if (*devvpp == NULL) {
345                 error = nlookup_init(&nd, dev_path, UIO_SYSSPACE, NLC_FOLLOW);
346                 if (error == 0)
347                         error = nlookup(&nd);
348                 if (error == 0)
349                         error = cache_vref(&nd.nl_nch, nd.nl_cred, devvpp);
350                 nlookup_done(&nd);
351         } else {
352                 error = 0;
353         }
354
355         if (error == 0) {
356                 if (vn_isdisk(*devvpp, &error)) {
357                         error = vfs_mountedon(*devvpp);
358                 }
359         }
360         if (error == 0 && vcount(*devvpp) > 0)
361                 error = EBUSY;
362         if (error == 0) {
363                 vn_lock(*devvpp, LK_EXCLUSIVE | LK_RETRY);
364                 error = vinvalbuf(*devvpp, V_SAVE, 0, 0);
365                 if (error == 0) {
366                         error = VOP_OPEN(*devvpp,
367                                          (ronly ? FREAD : FREAD|FWRITE),
368                                          FSCRED, NULL);
369                 }
370                 vn_unlock(*devvpp);
371         }
372         if (error && *devvpp) {
373                 vrele(*devvpp);
374                 *devvpp = NULL;
375         }
376         return (error);
377 }
378
379 static void
380 hammer_close_device(struct vnode **devvpp, int ronly)
381 {
382         VOP_CLOSE(*devvpp, (ronly ? FREAD : FREAD|FWRITE));
383         if (*devvpp) {
384                 vinvalbuf(*devvpp, ronly ? 0 : V_SAVE, 0, 0);
385                 vrele(*devvpp);
386                 *devvpp = NULL;
387         }
388 }
389
390 static int
391 hammer_format_volume_header(struct hammer_mount *hmp, struct vnode *devvp,
392         const char *vol_name, int vol_no, int vol_count,
393         int64_t vol_size, int64_t boot_area_size, int64_t mem_area_size)
394 {
395         struct buf *bp = NULL;
396         struct hammer_volume_ondisk *ondisk;
397         int error;
398
399         /*
400          * Extract the volume number from the volume header and do various
401          * sanity checks.
402          */
403         KKASSERT(HAMMER_BUFSIZE >= sizeof(struct hammer_volume_ondisk));
404         error = bread(devvp, 0LL, HAMMER_BUFSIZE, &bp);
405         if (error || bp->b_bcount < sizeof(struct hammer_volume_ondisk))
406                 goto late_failure;
407
408         ondisk = (struct hammer_volume_ondisk*) bp->b_data;
409
410         /*
411          * Note that we do NOT allow to use a device that contains
412          * a valid HAMMER signature. It has to be cleaned up with dd
413          * before.
414          */
415         if (ondisk->vol_signature == HAMMER_FSBUF_VOLUME) {
416                 kprintf("hammer_expand: Formatting of valid HAMMER volume "
417                         "%s denied. Erase with dd!\n", vol_name);
418                 error = EFTYPE;
419                 goto late_failure;
420         }
421
422         bzero(ondisk, sizeof(struct hammer_volume_ondisk));
423         ksnprintf(ondisk->vol_name, sizeof(ondisk->vol_name), "%s", vol_name);
424         ondisk->vol_fstype = hmp->rootvol->ondisk->vol_fstype;
425         ondisk->vol_signature = HAMMER_FSBUF_VOLUME;
426         ondisk->vol_fsid = hmp->fsid;
427         ondisk->vol_rootvol = hmp->rootvol->vol_no;
428         ondisk->vol_no = vol_no;
429         ondisk->vol_count = vol_count;
430         ondisk->vol_version = hmp->version;
431
432         /*
433          * Reserve space for (future) header junk, setup our poor-man's
434          * bigblock allocator.
435          */
436         int64_t vol_alloc = HAMMER_BUFSIZE * 16;
437
438         ondisk->vol_bot_beg = vol_alloc;
439         vol_alloc += boot_area_size;
440         ondisk->vol_mem_beg = vol_alloc;
441         vol_alloc += mem_area_size;
442
443         /*
444          * The remaining area is the zone 2 buffer allocation area.  These
445          * buffers
446          */
447         ondisk->vol_buf_beg = vol_alloc;
448         ondisk->vol_buf_end = vol_size & ~(int64_t)HAMMER_BUFMASK;
449
450         if (ondisk->vol_buf_end < ondisk->vol_buf_beg) {
451                 kprintf("volume %d %s is too small to hold the volume header",
452                      ondisk->vol_no, ondisk->vol_name);
453                 error = EFTYPE;
454                 goto late_failure;
455         }
456
457         ondisk->vol_nblocks = (ondisk->vol_buf_end - ondisk->vol_buf_beg) /
458                               HAMMER_BUFSIZE;
459         ondisk->vol_blocksize = HAMMER_BUFSIZE;
460
461         /*
462          * Write volume header to disk
463          */
464         error = bwrite(bp);
465         bp = NULL;
466
467 late_failure:
468         if (bp)
469                 brelse(bp);
470         return (error);
471 }