2 * Copyright (c) 2000 Christoph Herrmann, Thomas-Henning von Kamptz
3 * Copyright (c) 1980, 1989, 1993 The Regents of the University of California.
6 * This code is derived from software contributed to Berkeley by
7 * Christoph Herrmann and Thomas-Henning von Kamptz, Munich and Frankfurt.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgment:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors, as well as Christoph
21 * Herrmann and Thomas-Henning von Kamptz.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * $TSHeader: src/sbin/growfs/growfs.c,v 1.5 2000/12/12 19:31:00 tomsoft Exp $
40 * @(#) Copyright (c) 2000 Christoph Herrmann, Thomas-Henning von Kamptz Copyright (c) 1980, 1989, 1993 The Regents of the University of California. All rights reserved.
41 * $FreeBSD: src/sbin/growfs/growfs.c,v 1.4.2.2 2001/08/14 12:45:11 chm Exp $
44 /* ********************************************************** INCLUDES ***** */
45 #include <sys/param.h>
46 #include <sys/diskslice.h>
47 #include <sys/ioctl.h>
58 #include <vfs/ufs/dinode.h>
59 #include <vfs/ufs/fs.h>
63 /* *************************************************** GLOBALS & TYPES ***** */
65 int _dbg_lvl_ = (DL_INFO); /* DL_TRC */
72 #define sblock fsun1.fs /* the new superblock */
73 #define osblock fsun2.fs /* the old superblock */
79 #define acg cgun1.cg /* a cylinder cgroup (new) */
80 #define aocg cgun2.cg /* an old cylinder group */
82 static char ablk[MAXBSIZE]; /* a block */
83 static char i1blk[MAXBSIZE]; /* some indirect blocks */
84 static char i2blk[MAXBSIZE];
85 static char i3blk[MAXBSIZE];
87 /* where to write back updated blocks */
88 static daddr_t in_src, i1_src, i2_src, i3_src;
90 /* what object contains the reference */
98 static struct csum *fscs; /* cylinder summary */
100 static struct ufs1_dinode zino[MAXBSIZE/sizeof(struct ufs1_dinode)]; /* some inodes */
103 * An array of elements of type struct gfs_bpp describes all blocks to
104 * be relocated in order to free the space needed for the cylinder group
105 * summary for all cylinder groups located in the first cylinder group.
108 daddr_t old; /* old block number */
109 daddr_t new; /* new block number */
110 #define GFS_FL_FIRST 1
111 #define GFS_FL_LAST 2
112 unsigned int flags; /* special handling required */
113 int found; /* how many references were updated */
116 /* ******************************************************** PROTOTYPES ***** */
117 static void growfs(int, int, unsigned int);
118 static void rdfs(daddr_t, size_t, void *, int);
119 static void wtfs(daddr_t, size_t, void *, int, unsigned int);
120 static daddr_t alloc(void);
121 static int charsperline(void);
122 static void usage(void);
123 static int isblock(struct fs *, unsigned char *, int);
124 static void clrblock(struct fs *, unsigned char *, int);
125 static void setblock(struct fs *, unsigned char *, int);
126 static void initcg(int, time_t, int, unsigned int);
127 static void updjcg(int, time_t, int, int, unsigned int);
128 static void updcsloc(time_t, int, int, unsigned int);
129 static struct ufs1_dinode *ginode(ino_t, int, int);
130 static void frag_adjust(daddr_t, int);
131 static void cond_bl_upd(ufs_daddr_t *, struct gfs_bpp *,
132 enum pointer_source, int, unsigned int);
133 static void updclst(int);
134 static void updrefs(int, ino_t, struct gfs_bpp *, int, int, unsigned int);
136 /* ************************************************************ growfs ***** */
138 * Here we actually start growing the filesystem. We basically read the
139 * cylinder summary from the first cylinder group as we want to update
140 * this on the fly during our various operations. First we handle the
141 * changes in the former last cylinder group. Afterwards we create all new
142 * cylinder groups. Now we handle the cylinder group containing the
143 * cylinder summary which might result in a relocation of the whole
144 * structure. In the end we write back the updated cylinder summary, the
145 * new superblock, and slightly patched versions of the super block
149 growfs(int fsi, int fso, unsigned int Nflag)
158 static int randinit=0;
166 #else /* not FSIRAND */
174 * Get the cylinder summary into the memory.
176 fscs = (struct csum *)calloc((size_t)1, (size_t)sblock.fs_cssize);
178 errx(1, "calloc failed");
180 for (i = 0; i < osblock.fs_cssize; i += osblock.fs_bsize) {
181 rdfs(fsbtodb(&osblock, osblock.fs_csaddr +
182 numfrags(&osblock, i)), (size_t)MIN(osblock.fs_cssize - i,
183 osblock.fs_bsize), (void *)(((char *)fscs)+i), fsi);
188 struct csum *dbg_csp;
193 for(dbg_csc=0; dbg_csc<osblock.fs_ncg; dbg_csc++) {
194 snprintf(dbg_line, sizeof(dbg_line),
195 "%d. old csum in old location", dbg_csc);
196 DBG_DUMP_CSUM(&osblock,
201 #endif /* FS_DEBUG */
202 DBG_PRINT0("fscs read\n");
205 * Do all needed changes in the former last cylinder group.
207 updjcg(osblock.fs_ncg-1, utime, fsi, fso, Nflag);
210 * Dump out summary information about file system.
212 printf("growfs:\t%d sectors in %d %s of %d tracks, %d sectors\n",
213 sblock.fs_size * NSPF(&sblock), sblock.fs_ncyl,
214 "cylinders", sblock.fs_ntrak, sblock.fs_nsect);
215 #define B2MBFACTOR (1 / (1024.0 * 1024.0))
216 printf("\t%.1fMB in %d cyl groups (%d c/g, %.2fMB/g, %d i/g)\n",
217 (float)sblock.fs_size * sblock.fs_fsize * B2MBFACTOR,
218 sblock.fs_ncg, sblock.fs_cpg,
219 (float)sblock.fs_fpg * sblock.fs_fsize * B2MBFACTOR,
224 * Now build the cylinders group blocks and
225 * then print out indices of cylinder groups.
227 printf("super-block backups (for fsck -b #) at:\n");
229 width = charsperline();
232 * Iterate for only the new cylinder groups.
234 for (cylno = osblock.fs_ncg; cylno < sblock.fs_ncg; cylno++) {
235 initcg(cylno, utime, fso, Nflag);
236 j = sprintf(tmpbuf, " %d%s",
237 (int)fsbtodb(&sblock, cgsblock(&sblock, cylno)),
238 cylno < (sblock.fs_ncg-1) ? "," : "" );
239 if (i + j >= width) {
244 printf("%s", tmpbuf);
250 * Do all needed changes in the first cylinder group.
251 * allocate blocks in new location
253 updcsloc(utime, fsi, fso, Nflag);
256 * Now write the cylinder summary back to disk.
258 for (i = 0; i < sblock.fs_cssize; i += sblock.fs_bsize) {
259 wtfs(fsbtodb(&sblock, sblock.fs_csaddr + numfrags(&sblock, i)),
260 (size_t)MIN(sblock.fs_cssize - i, sblock.fs_bsize),
261 (void *)(((char *)fscs) + i), fso, Nflag);
263 DBG_PRINT0("fscs written\n");
267 struct csum *dbg_csp;
272 for(dbg_csc=0; dbg_csc<sblock.fs_ncg; dbg_csc++) {
273 snprintf(dbg_line, sizeof(dbg_line),
274 "%d. new csum in new location", dbg_csc);
275 DBG_DUMP_CSUM(&sblock,
280 #endif /* FS_DEBUG */
283 * Now write the new superblock back to disk.
285 sblock.fs_time = utime;
286 wtfs((daddr_t)(SBOFF / DEV_BSIZE), (size_t)SBSIZE, (void *)&sblock,
288 DBG_PRINT0("sblock written\n");
290 "new initial sblock");
293 * Clean up the dynamic fields in our superblock copies.
298 sblock.fs_cgrotor = 0;
300 memset((void *)&sblock.fs_fsmnt, 0, sizeof(sblock.fs_fsmnt));
301 sblock.fs_flags &= FS_DOSOFTDEP;
305 * The following fields are currently distributed from the superblock
313 * fs_flags regarding SOFTPDATES
315 * We probably should rather change the summary for the cylinder group
316 * statistics here to the value of what would be in there, if the file
317 * system were created initially with the new size. Therefor we still
318 * need to find an easy way of calculating that.
319 * Possibly we can try to read the first superblock copy and apply the
320 * "diffed" stats between the old and new superblock by still copying
321 * certain parameters onto that.
325 * Write out the duplicate super blocks.
327 for (cylno = 0; cylno < sblock.fs_ncg; cylno++) {
328 wtfs(fsbtodb(&sblock, cgsblock(&sblock, cylno)),
329 (size_t)SBSIZE, (void *)&sblock, fso, Nflag);
331 DBG_PRINT0("sblock copies written\n");
333 "new other sblocks");
339 /* ************************************************************ initcg ***** */
341 * This creates a new cylinder group structure, for more details please see
342 * the source of newfs(8), as this function is taken over almost unchanged.
343 * As this is never called for the first cylinder group, the special
344 * provisions for that case are removed here.
347 initcg(int cylno, time_t utime, int fso, unsigned int Nflag)
350 daddr_t cbase, d, dlower, dupper, dmax, blkno;
360 * Determine block bounds for cylinder group.
362 cbase = cgbase(&sblock, cylno);
363 dmax = cbase + sblock.fs_fpg;
364 if (dmax > sblock.fs_size) {
365 dmax = sblock.fs_size;
367 dlower = cgsblock(&sblock, cylno) - cbase;
368 dupper = cgdmin(&sblock, cylno) - cbase;
369 if (cylno == 0) { /* XXX fscs may be relocated */
370 dupper += howmany(sblock.fs_cssize, sblock.fs_fsize);
373 memset(&acg, 0, (size_t)sblock.fs_cgsize);
375 acg.cg_magic = CG_MAGIC;
377 if (cylno == sblock.fs_ncg - 1) {
378 acg.cg_ncyl = sblock.fs_ncyl % sblock.fs_cpg;
380 acg.cg_ncyl = sblock.fs_cpg;
382 acg.cg_niblk = sblock.fs_ipg;
383 acg.cg_ndblk = dmax - cbase;
384 if (sblock.fs_contigsumsize > 0) {
385 acg.cg_nclusterblks = acg.cg_ndblk / sblock.fs_frag;
387 acg.cg_btotoff = &acg.cg_space[0] - (u_char *)(&acg.cg_firstfield);
388 acg.cg_boff = acg.cg_btotoff + sblock.fs_cpg * sizeof(int32_t);
389 acg.cg_iusedoff = acg.cg_boff +
390 sblock.fs_cpg * sblock.fs_nrpos * sizeof(u_int16_t);
391 acg.cg_freeoff = acg.cg_iusedoff + howmany(sblock.fs_ipg, NBBY);
392 if (sblock.fs_contigsumsize <= 0) {
393 acg.cg_nextfreeoff = acg.cg_freeoff +
394 howmany(sblock.fs_cpg* sblock.fs_spc/ NSPF(&sblock), NBBY);
396 acg.cg_clustersumoff = acg.cg_freeoff + howmany
397 (sblock.fs_cpg * sblock.fs_spc / NSPF(&sblock), NBBY) -
399 acg.cg_clustersumoff =
400 roundup(acg.cg_clustersumoff, sizeof(u_int32_t));
401 acg.cg_clusteroff = acg.cg_clustersumoff +
402 (sblock.fs_contigsumsize + 1) * sizeof(u_int32_t);
403 acg.cg_nextfreeoff = acg.cg_clusteroff + howmany
404 (sblock.fs_cpg * sblock.fs_spc / NSPB(&sblock), NBBY);
406 if (acg.cg_nextfreeoff-(intptr_t)(&acg.cg_firstfield) > sblock.fs_cgsize) {
408 * XXX This should never happen as we would have had that panic
409 * already on filesystem creation
411 errx(37, "panic: cylinder group too big");
413 acg.cg_cs.cs_nifree += sblock.fs_ipg;
415 for (i = 0; (size_t)i < ROOTINO; i++) {
416 setbit(cg_inosused(&acg), i);
417 acg.cg_cs.cs_nifree--;
419 for (i = 0; i < sblock.fs_ipg / INOPF(&sblock); i += sblock.fs_frag) {
421 for (j = 0; j < sblock.fs_bsize / sizeof(struct ufs1_dinode); j++) {
422 zino[j].di_gen = random();
425 wtfs(fsbtodb(&sblock, cgimin(&sblock, cylno) + i),
426 (size_t)sblock.fs_bsize, (void *)zino, fso, Nflag);
428 for (d = 0; d < dlower; d += sblock.fs_frag) {
429 blkno = d / sblock.fs_frag;
430 setblock(&sblock, cg_blksfree(&acg), blkno);
431 if (sblock.fs_contigsumsize > 0) {
432 setbit(cg_clustersfree(&acg), blkno);
434 acg.cg_cs.cs_nbfree++;
435 cg_blktot(&acg)[cbtocylno(&sblock, d)]++;
436 cg_blks(&sblock, &acg, cbtocylno(&sblock, d))
437 [cbtorpos(&sblock, d)]++;
439 sblock.fs_dsize += dlower;
440 sblock.fs_dsize += acg.cg_ndblk - dupper;
441 if ((i = dupper % sblock.fs_frag)) {
442 acg.cg_frsum[sblock.fs_frag - i]++;
443 for (d = dupper + sblock.fs_frag - i; dupper < d; dupper++) {
444 setbit(cg_blksfree(&acg), dupper);
445 acg.cg_cs.cs_nffree++;
448 for (d = dupper; d + sblock.fs_frag <= dmax - cbase; ) {
449 blkno = d / sblock.fs_frag;
450 setblock(&sblock, cg_blksfree(&acg), blkno);
451 if (sblock.fs_contigsumsize > 0) {
452 setbit(cg_clustersfree(&acg), blkno);
454 acg.cg_cs.cs_nbfree++;
455 cg_blktot(&acg)[cbtocylno(&sblock, d)]++;
456 cg_blks(&sblock, &acg, cbtocylno(&sblock, d))
457 [cbtorpos(&sblock, d)]++;
460 if (d < dmax - cbase) {
461 acg.cg_frsum[dmax - cbase - d]++;
462 for (; d < dmax - cbase; d++) {
463 setbit(cg_blksfree(&acg), d);
464 acg.cg_cs.cs_nffree++;
467 if (sblock.fs_contigsumsize > 0) {
468 int32_t *sump = cg_clustersum(&acg);
469 u_char *mapp = cg_clustersfree(&acg);
474 for (i = 0; i < acg.cg_nclusterblks; i++) {
475 if ((map & bit) != 0) {
477 } else if (run != 0) {
478 if (run > sblock.fs_contigsumsize) {
479 run = sblock.fs_contigsumsize;
484 if ((i & (NBBY - 1)) != (NBBY - 1)) {
492 if (run > sblock.fs_contigsumsize) {
493 run = sblock.fs_contigsumsize;
498 sblock.fs_cstotal.cs_ndir += acg.cg_cs.cs_ndir;
499 sblock.fs_cstotal.cs_nffree += acg.cg_cs.cs_nffree;
500 sblock.fs_cstotal.cs_nbfree += acg.cg_cs.cs_nbfree;
501 sblock.fs_cstotal.cs_nifree += acg.cg_cs.cs_nifree;
503 wtfs(fsbtodb(&sblock, cgtod(&sblock, cylno)),
504 (size_t)sblock.fs_bsize, (void *)&acg, fso, Nflag);
513 /* ******************************************************* frag_adjust ***** */
515 * Here we add or subtract (sign +1/-1) the available fragments in a given
516 * block to or from the fragment statistics. By subtracting before and adding
517 * after an operation on the free frag map we can easy update the fragment
518 * statistic, which seems to be otherwise an rather complex operation.
521 frag_adjust(daddr_t frag, int sign)
523 DBG_FUNC("frag_adjust")
531 * Here frag only needs to point to any fragment in the block we want
534 for(f=rounddown(frag, sblock.fs_frag);
535 f<roundup(frag+1, sblock.fs_frag);
538 * Count contiguos free fragments.
540 if(isset(cg_blksfree(&acg), f)) {
543 if(fragsize && fragsize<sblock.fs_frag) {
545 * We found something in between.
547 acg.cg_frsum[fragsize]+=sign;
548 DBG_PRINT2("frag_adjust [%d]+=%d\n",
555 if(fragsize && fragsize<sblock.fs_frag) {
557 * We found something.
559 acg.cg_frsum[fragsize]+=sign;
560 DBG_PRINT2("frag_adjust [%d]+=%d\n",
564 DBG_PRINT2("frag_adjust [[%d]]+=%d\n",
572 /* ******************************************************* cond_bl_upd ***** */
574 * Here we conditionally update a pointer to a fragment. We check for all
575 * relocated blocks if any of it's fragments is referenced by the current
576 * field, and update the pointer to the respective fragment in our new
577 * block. If we find a reference we write back the block immediately,
578 * as there is no easy way for our general block reading engine to figure
579 * out if a write back operation is needed.
582 cond_bl_upd(ufs_daddr_t *block, struct gfs_bpp *field,
583 enum pointer_source source, int fso, unsigned int Nflag)
585 DBG_FUNC("cond_bl_upd")
593 while(f->old) { /* for all old blocks */
594 if(*block/sblock.fs_frag == f->old) {
596 * The fragment is part of the block, so update.
598 *block=(f->new*sblock.fs_frag+(*block%sblock.fs_frag));
600 DBG_PRINT3("scg (%d->%d)[%d] reference updated\n",
603 *block%sblock.fs_frag);
605 /* Write the block back to disk immediately */
611 case GFS_PS_IND_BLK_LVL1:
615 case GFS_PS_IND_BLK_LVL2:
619 case GFS_PS_IND_BLK_LVL3:
629 * XXX If src is not of type inode we have to
630 * implement copy on write here in case
631 * of active snapshots.
633 wtfs(dst, (size_t)sblock.fs_bsize, (void *)src,
638 * The same block can't be found again in this loop.
649 /* ************************************************************ updjcg ***** */
651 * Here we do all needed work for the former last cylinder group. It has to be
652 * changed in any case, even if the filesystem ended exactly on the end of
653 * this group, as there is some slightly inconsistent handling of the number
654 * of cylinders in the cylinder group. We start again by reading the cylinder
655 * group from disk. If the last block was not fully available, we first handle
656 * the missing fragments, then we handle all new full blocks in that file
657 * system and finally we handle the new last fragmented block in the file
658 * system. We again have to handle the fragment statistics rotational layout
659 * tables and cluster summary during all those operations.
662 updjcg(int cylno, time_t utime, int fsi, int fso, unsigned int Nflag)
665 daddr_t cbase, dmax, dupper;
673 * Read the former last (joining) cylinder group from disk, and make
676 rdfs(fsbtodb(&osblock, cgtod(&osblock, cylno)),
677 (size_t)osblock.fs_cgsize, (void *)&aocg, fsi);
678 DBG_PRINT0("jcg read\n");
683 memcpy((void *)&cgun1, (void *)&cgun2, sizeof(cgun2));
686 * If the cylinder group had already it's new final size almost
687 * nothing is to be done ... except:
688 * For some reason the value of cg_ncyl in the last cylinder group has
689 * to be zero instead of fs_cpg. As this is now no longer the last
690 * cylinder group we have to change that value now to fs_cpg.
693 if(cgbase(&osblock, cylno+1) == osblock.fs_size) {
694 acg.cg_ncyl=sblock.fs_cpg;
696 wtfs(fsbtodb(&sblock, cgtod(&sblock, cylno)),
697 (size_t)sblock.fs_cgsize, (void *)&acg, fso, Nflag);
698 DBG_PRINT0("jcg written\n");
708 * Set up some variables needed later.
710 cbase = cgbase(&sblock, cylno);
711 dmax = cbase + sblock.fs_fpg;
712 if (dmax > sblock.fs_size)
713 dmax = sblock.fs_size;
714 dupper = cgdmin(&sblock, cylno) - cbase;
715 if (cylno == 0) { /* XXX fscs may be relocated */
716 dupper += howmany(sblock.fs_cssize, sblock.fs_fsize);
720 * Set pointer to the cylinder summary for our cylinder group.
725 * Touch the cylinder group, update all fields in the cylinder group as
726 * needed, update the free space in the superblock.
729 if (cylno == sblock.fs_ncg - 1) {
731 * This is still the last cylinder group.
733 acg.cg_ncyl = sblock.fs_ncyl % sblock.fs_cpg;
735 acg.cg_ncyl = sblock.fs_cpg;
737 DBG_PRINT4("jcg dbg: %d %u %d %u\n",
742 acg.cg_ndblk = dmax - cbase;
743 sblock.fs_dsize += acg.cg_ndblk-aocg.cg_ndblk;
744 if (sblock.fs_contigsumsize > 0) {
745 acg.cg_nclusterblks = acg.cg_ndblk / sblock.fs_frag;
749 * Now we have to update the free fragment bitmap for our new free
750 * space. There again we have to handle the fragmentation and also
751 * the rotational layout tables and the cluster summary. This is
752 * also done per fragment for the first new block if the old file
753 * system end was not on a block boundary, per fragment for the new
754 * last block if the new file system end is not on a block boundary,
755 * and per block for all space in between.
757 * Handle the first new block here if it was partially available
760 if(osblock.fs_size % sblock.fs_frag) {
761 if(roundup(osblock.fs_size, sblock.fs_frag)<=sblock.fs_size) {
763 * The new space is enough to fill at least this
767 for(i=roundup(osblock.fs_size-cbase, sblock.fs_frag)-1;
768 i>=osblock.fs_size-cbase;
770 setbit(cg_blksfree(&acg), i);
771 acg.cg_cs.cs_nffree++;
776 * Check if the fragment just created could join an
777 * already existing fragment at the former end of the
780 if(isblock(&sblock, cg_blksfree(&acg),
781 ((osblock.fs_size - cgbase(&sblock, cylno))/
784 * The block is now completely available
786 DBG_PRINT0("block was\n");
787 acg.cg_frsum[osblock.fs_size%sblock.fs_frag]--;
788 acg.cg_cs.cs_nbfree++;
789 acg.cg_cs.cs_nffree-=sblock.fs_frag;
790 k=rounddown(osblock.fs_size-cbase,
792 cg_blktot(&acg)[cbtocylno(&sblock, k)]++;
793 cg_blks(&sblock, &acg, cbtocylno(&sblock, k))
794 [cbtorpos(&sblock, k)]++;
795 updclst((osblock.fs_size-cbase)/sblock.fs_frag);
798 * Lets rejoin a possible partially growed
802 while(isset(cg_blksfree(&acg), i) &&
803 (i>=rounddown(osblock.fs_size-cbase,
815 * We only grow by some fragments within this last
818 for(i=sblock.fs_size-cbase-1;
819 i>=osblock.fs_size-cbase;
821 setbit(cg_blksfree(&acg), i);
822 acg.cg_cs.cs_nffree++;
826 * Lets rejoin a possible partially growed fragment.
829 while(isset(cg_blksfree(&acg), i) &&
830 (i>=rounddown(osblock.fs_size-cbase,
843 * Handle all new complete blocks here.
845 for(i=roundup(osblock.fs_size-cbase, sblock.fs_frag);
846 i+sblock.fs_frag<=dmax-cbase; /* XXX <= or only < ? */
848 j = i / sblock.fs_frag;
849 setblock(&sblock, cg_blksfree(&acg), j);
851 acg.cg_cs.cs_nbfree++;
852 cg_blktot(&acg)[cbtocylno(&sblock, i)]++;
853 cg_blks(&sblock, &acg, cbtocylno(&sblock, i))
854 [cbtorpos(&sblock, i)]++;
858 * Handle the last new block if there are stll some new fragments left.
859 * Here we don't have to bother about the cluster summary or the even
860 * the rotational layout table.
862 if (i < (dmax - cbase)) {
863 acg.cg_frsum[dmax - cbase - i]++;
864 for (; i < dmax - cbase; i++) {
865 setbit(cg_blksfree(&acg), i);
866 acg.cg_cs.cs_nffree++;
870 sblock.fs_cstotal.cs_nffree +=
871 (acg.cg_cs.cs_nffree - aocg.cg_cs.cs_nffree);
872 sblock.fs_cstotal.cs_nbfree +=
873 (acg.cg_cs.cs_nbfree - aocg.cg_cs.cs_nbfree);
875 * The following statistics are not changed here:
876 * sblock.fs_cstotal.cs_ndir
877 * sblock.fs_cstotal.cs_nifree
878 * As the statistics for this cylinder group are ready, copy it to
879 * the summary information array.
884 * Write the updated "joining" cylinder group back to disk.
886 wtfs(fsbtodb(&sblock, cgtod(&sblock, cylno)), (size_t)sblock.fs_cgsize,
887 (void *)&acg, fso, Nflag);
888 DBG_PRINT0("jcg written\n");
897 /* ********************************************************** updcsloc ***** */
899 * Here we update the location of the cylinder summary. We have two possible
900 * ways of growing the cylinder summary.
901 * (1) We can try to grow the summary in the current location, and relocate
902 * possibly used blocks within the current cylinder group.
903 * (2) Alternatively we can relocate the whole cylinder summary to the first
904 * new completely empty cylinder group. Once the cylinder summary is no
905 * longer in the beginning of the first cylinder group you should never
906 * use a version of fsck which is not aware of the possibility to have
907 * this structure in a non standard place.
908 * Option (1) is considered to be less intrusive to the structure of the file-
909 * system. So we try to stick to that whenever possible. If there is not enough
910 * space in the cylinder group containing the cylinder summary we have to use
911 * method (2). In case of active snapshots in the filesystem we probably can
912 * completely avoid implementing copy on write if we stick to method (2) only.
915 updcsloc(time_t utime, int fsi, int fso, unsigned int Nflag)
921 daddr_t cbase, dupper, odupper, d, f, g;
931 if(howmany(sblock.fs_cssize, sblock.fs_fsize) ==
932 howmany(osblock.fs_cssize, osblock.fs_fsize)) {
934 * No new fragment needed.
939 ocscg=dtog(&osblock, osblock.fs_csaddr);
941 blocks = 1+howmany(sblock.fs_cssize, sblock.fs_bsize)-
942 howmany(osblock.fs_cssize, osblock.fs_bsize);
945 * Read original cylinder group from disk, and make a copy.
946 * XXX If Nflag is set in some very rare cases we now miss
947 * some changes done in updjcg by reading the unmodified
950 rdfs(fsbtodb(&osblock, cgtod(&osblock, ocscg)),
951 (size_t)osblock.fs_cgsize, (void *)&aocg, fsi);
952 DBG_PRINT0("oscg read\n");
957 memcpy((void *)&cgun1, (void *)&cgun2, sizeof(cgun2));
960 * Touch the cylinder group, set up local variables needed later
961 * and update the superblock.
966 * XXX In the case of having active snapshots we may need much more
967 * blocks for the copy on write. We need each block twice, and
968 * also up to 8*3 blocks for indirect blocks for all possible
971 if(/*((int)sblock.fs_time&0x3)>0||*/ cs->cs_nbfree < blocks) {
973 * There is not enough space in the old cylinder group to
974 * relocate all blocks as needed, so we relocate the whole
975 * cylinder group summary to a new group. We try to use the
976 * first complete new cylinder group just created. Within the
977 * cylinder group we allign the area immediately after the
978 * cylinder group information location in order to be as
979 * close as possible to the original implementation of ffs.
981 * First we have to make sure we'll find enough space in the
982 * new cylinder group. If not, then we currently give up.
983 * We start with freeing everything which was used by the
984 * fragments of the old cylinder summary in the current group.
985 * Now we write back the group meta data, read in the needed
986 * meta data from the new cylinder group, and start allocating
987 * within that group. Here we can assume, the group to be
988 * completely empty. Which makes the handling of fragments and
989 * clusters a lot easier.
992 if(sblock.fs_ncg-osblock.fs_ncg < 2) {
993 errx(2, "panic: not enough space");
997 * Point "d" to the first fragment not used by the cylinder
1000 d=osblock.fs_csaddr+(osblock.fs_cssize/osblock.fs_fsize);
1003 * Set up last cluster size ("lcs") already here. Calculate
1004 * the size for the trailing cluster just behind where "d"
1007 if(sblock.fs_contigsumsize > 0) {
1008 for(block=howmany(d%sblock.fs_fpg, sblock.fs_frag),
1009 lcs=0; lcs<sblock.fs_contigsumsize;
1011 if(isclr(cg_clustersfree(&acg), block)){
1018 * Point "d" to the last frag used by the cylinder summary.
1022 DBG_PRINT1("d=%d\n",
1024 if((d+1)%sblock.fs_frag) {
1026 * The end of the cylinder summary is not a complete
1030 frag_adjust(d%sblock.fs_fpg, -1);
1031 for(; (d+1)%sblock.fs_frag; d--) {
1032 DBG_PRINT1("d=%d\n",
1034 setbit(cg_blksfree(&acg), d%sblock.fs_fpg);
1035 acg.cg_cs.cs_nffree++;
1036 sblock.fs_cstotal.cs_nffree++;
1039 * Point "d" to the last fragment of the last
1040 * (incomplete) block of the clinder summary.
1043 frag_adjust(d%sblock.fs_fpg, 1);
1045 if(isblock(&sblock, cg_blksfree(&acg),
1046 (d%sblock.fs_fpg)/sblock.fs_frag)) {
1047 DBG_PRINT1("d=%d\n",
1049 acg.cg_cs.cs_nffree-=sblock.fs_frag;
1050 acg.cg_cs.cs_nbfree++;
1051 sblock.fs_cstotal.cs_nffree-=sblock.fs_frag;
1052 sblock.fs_cstotal.cs_nbfree++;
1053 cg_blktot(&acg)[cbtocylno(&sblock,
1054 d%sblock.fs_fpg)]++;
1055 cg_blks(&sblock, &acg, cbtocylno(&sblock,
1056 d%sblock.fs_fpg))[cbtorpos(&sblock,
1057 d%sblock.fs_fpg)]++;
1058 if(sblock.fs_contigsumsize > 0) {
1059 setbit(cg_clustersfree(&acg),
1060 (d%sblock.fs_fpg)/sblock.fs_frag);
1061 if(lcs < sblock.fs_contigsumsize) {
1067 cg_clustersum(&acg)[lcs]++;
1072 * Point "d" to the first fragment of the block before
1073 * the last incomplete block.
1078 DBG_PRINT1("d=%d\n",
1080 for(d=rounddown(d, sblock.fs_frag); d >= osblock.fs_csaddr;
1081 d-=sblock.fs_frag) {
1083 DBG_PRINT1("d=%d\n",
1085 setblock(&sblock, cg_blksfree(&acg),
1086 (d%sblock.fs_fpg)/sblock.fs_frag);
1087 acg.cg_cs.cs_nbfree++;
1088 sblock.fs_cstotal.cs_nbfree++;
1089 cg_blktot(&acg)[cbtocylno(&sblock, d%sblock.fs_fpg)]++;
1090 cg_blks(&sblock, &acg, cbtocylno(&sblock,
1091 d%sblock.fs_fpg))[cbtorpos(&sblock,
1092 d%sblock.fs_fpg)]++;
1093 if(sblock.fs_contigsumsize > 0) {
1094 setbit(cg_clustersfree(&acg),
1095 (d%sblock.fs_fpg)/sblock.fs_frag);
1097 * The last cluster size is already set up.
1099 if(lcs < sblock.fs_contigsumsize) {
1101 cg_clustersum(&acg)[lcs]--;
1104 cg_clustersum(&acg)[lcs]++;
1111 * Now write the former cylinder group containing the cylinder
1112 * summary back to disk.
1114 wtfs(fsbtodb(&sblock, cgtod(&sblock, ocscg)),
1115 (size_t)sblock.fs_cgsize, (void *)&acg, fso, Nflag);
1116 DBG_PRINT0("oscg written\n");
1117 DBG_DUMP_CG(&sblock,
1122 * Find the beginning of the new cylinder group containing the
1125 sblock.fs_csaddr=cgdmin(&sblock, osblock.fs_ncg);
1126 ncscg=dtog(&sblock, sblock.fs_csaddr);
1131 * If Nflag is specified, we would now read random data instead
1132 * of an empty cg structure from disk. So we can't simulate that
1136 DBG_PRINT0("nscg update skipped\n");
1142 * Read the future cylinder group containing the cylinder
1143 * summary from disk, and make a copy.
1145 rdfs(fsbtodb(&sblock, cgtod(&sblock, ncscg)),
1146 (size_t)sblock.fs_cgsize, (void *)&aocg, fsi);
1147 DBG_PRINT0("nscg read\n");
1148 DBG_DUMP_CG(&sblock,
1152 memcpy((void *)&cgun1, (void *)&cgun2, sizeof(cgun2));
1155 * Allocate all complete blocks used by the new cylinder
1158 for(d=sblock.fs_csaddr; d+sblock.fs_frag <=
1159 sblock.fs_csaddr+(sblock.fs_cssize/sblock.fs_fsize);
1160 d+=sblock.fs_frag) {
1161 clrblock(&sblock, cg_blksfree(&acg),
1162 (d%sblock.fs_fpg)/sblock.fs_frag);
1163 acg.cg_cs.cs_nbfree--;
1164 sblock.fs_cstotal.cs_nbfree--;
1165 cg_blktot(&acg)[cbtocylno(&sblock, d%sblock.fs_fpg)]--;
1166 cg_blks(&sblock, &acg, cbtocylno(&sblock,
1167 d%sblock.fs_fpg))[cbtorpos(&sblock,
1168 d%sblock.fs_fpg)]--;
1169 if(sblock.fs_contigsumsize > 0) {
1170 clrbit(cg_clustersfree(&acg),
1171 (d%sblock.fs_fpg)/sblock.fs_frag);
1176 * Allocate all fragments used by the cylinder summary in the
1179 if(d<sblock.fs_csaddr+(sblock.fs_cssize/sblock.fs_fsize)) {
1180 for(; d-sblock.fs_csaddr<
1181 sblock.fs_cssize/sblock.fs_fsize;
1183 clrbit(cg_blksfree(&acg), d%sblock.fs_fpg);
1184 acg.cg_cs.cs_nffree--;
1185 sblock.fs_cstotal.cs_nffree--;
1187 acg.cg_cs.cs_nbfree--;
1188 acg.cg_cs.cs_nffree+=sblock.fs_frag;
1189 sblock.fs_cstotal.cs_nbfree--;
1190 sblock.fs_cstotal.cs_nffree+=sblock.fs_frag;
1191 cg_blktot(&acg)[cbtocylno(&sblock, d%sblock.fs_fpg)]--;
1192 cg_blks(&sblock, &acg, cbtocylno(&sblock,
1193 d%sblock.fs_fpg))[cbtorpos(&sblock,
1194 d%sblock.fs_fpg)]--;
1195 if(sblock.fs_contigsumsize > 0) {
1196 clrbit(cg_clustersfree(&acg),
1197 (d%sblock.fs_fpg)/sblock.fs_frag);
1200 frag_adjust(d%sblock.fs_fpg, +1);
1203 * XXX Handle the cluster statistics here in the case this
1204 * cylinder group is now almost full, and the remaining
1205 * space is less then the maximum cluster size. This is
1206 * probably not needed, as you would hardly find a file
1207 * system which has only MAXCSBUFS+FS_MAXCONTIG of free
1208 * space right behind the cylinder group information in
1209 * any new cylinder group.
1213 * Update our statistics in the cylinder summary.
1218 * Write the new cylinder group containing the cylinder summary
1221 wtfs(fsbtodb(&sblock, cgtod(&sblock, ncscg)),
1222 (size_t)sblock.fs_cgsize, (void *)&acg, fso, Nflag);
1223 DBG_PRINT0("nscg written\n");
1224 DBG_DUMP_CG(&sblock,
1232 * We have got enough of space in the current cylinder group, so we
1233 * can relocate just a few blocks, and let the summary information
1234 * grow in place where it is right now.
1238 cbase = cgbase(&osblock, ocscg); /* old and new are equal */
1239 dupper = sblock.fs_csaddr - cbase +
1240 howmany(sblock.fs_cssize, sblock.fs_fsize);
1241 odupper = osblock.fs_csaddr - cbase +
1242 howmany(osblock.fs_cssize, osblock.fs_fsize);
1244 sblock.fs_dsize -= dupper-odupper;
1247 * Allocate the space for the array of blocks to be relocated.
1249 bp=(struct gfs_bpp *)malloc(((dupper-odupper)/sblock.fs_frag+2)*
1250 sizeof(struct gfs_bpp));
1252 errx(1, "malloc failed");
1254 memset((char *)bp, 0, ((dupper-odupper)/sblock.fs_frag+2)*
1255 sizeof(struct gfs_bpp));
1258 * Lock all new frags needed for the cylinder group summary. This is
1259 * done per fragment in the first and last block of the new required
1260 * area, and per block for all other blocks.
1262 * Handle the first new block here (but only if some fragments where
1263 * already used for the cylinder summary).
1266 frag_adjust(odupper, -1);
1267 for(d=odupper; ((d<dupper)&&(d%sblock.fs_frag)); d++) {
1268 DBG_PRINT1("scg first frag check loop d=%d\n",
1270 if(isclr(cg_blksfree(&acg), d)) {
1272 bp[ind].old=d/sblock.fs_frag;
1273 bp[ind].flags|=GFS_FL_FIRST;
1274 if(roundup(d, sblock.fs_frag) >= dupper) {
1275 bp[ind].flags|=GFS_FL_LAST;
1280 clrbit(cg_blksfree(&acg), d);
1281 acg.cg_cs.cs_nffree--;
1282 sblock.fs_cstotal.cs_nffree--;
1285 * No cluster handling is needed here, as there was at least
1286 * one fragment in use by the cylinder summary in the old
1288 * No block-free counter handling here as this block was not
1292 frag_adjust(odupper, 1);
1295 * Handle all needed complete blocks here.
1297 for(; d+sblock.fs_frag<=dupper; d+=sblock.fs_frag) {
1298 DBG_PRINT1("scg block check loop d=%d\n",
1300 if(!isblock(&sblock, cg_blksfree(&acg), d/sblock.fs_frag)) {
1301 for(f=d; f<d+sblock.fs_frag; f++) {
1302 if(isset(cg_blksfree(&aocg), f)) {
1303 acg.cg_cs.cs_nffree--;
1304 sblock.fs_cstotal.cs_nffree--;
1307 clrblock(&sblock, cg_blksfree(&acg), d/sblock.fs_frag);
1308 bp[ind].old=d/sblock.fs_frag;
1311 clrblock(&sblock, cg_blksfree(&acg), d/sblock.fs_frag);
1312 acg.cg_cs.cs_nbfree--;
1313 sblock.fs_cstotal.cs_nbfree--;
1314 cg_blktot(&acg)[cbtocylno(&sblock, d)]--;
1315 cg_blks(&sblock, &acg, cbtocylno(&sblock, d))
1316 [cbtorpos(&sblock, d)]--;
1317 if(sblock.fs_contigsumsize > 0) {
1318 clrbit(cg_clustersfree(&acg), d/sblock.fs_frag);
1319 for(lcs=0, l=(d/sblock.fs_frag)+1;
1320 lcs<sblock.fs_contigsumsize;
1322 if(isclr(cg_clustersfree(&acg),l)){
1326 if(lcs < sblock.fs_contigsumsize) {
1327 cg_clustersum(&acg)[lcs+1]--;
1329 cg_clustersum(&acg)[lcs]++;
1335 * No fragment counter handling is needed here, as this finally
1336 * doesn't change after the relocation.
1341 * Handle all fragments needed in the last new affected block.
1344 frag_adjust(dupper-1, -1);
1346 if(isblock(&sblock, cg_blksfree(&acg), d/sblock.fs_frag)) {
1347 acg.cg_cs.cs_nbfree--;
1348 sblock.fs_cstotal.cs_nbfree--;
1349 acg.cg_cs.cs_nffree+=sblock.fs_frag;
1350 sblock.fs_cstotal.cs_nffree+=sblock.fs_frag;
1351 cg_blktot(&acg)[cbtocylno(&sblock, d)]--;
1352 cg_blks(&sblock, &acg, cbtocylno(&sblock, d))
1353 [cbtorpos(&sblock, d)]--;
1354 if(sblock.fs_contigsumsize > 0) {
1355 clrbit(cg_clustersfree(&acg), d/sblock.fs_frag);
1356 for(lcs=0, l=(d/sblock.fs_frag)+1;
1357 lcs<sblock.fs_contigsumsize;
1359 if(isclr(cg_clustersfree(&acg),l)){
1363 if(lcs < sblock.fs_contigsumsize) {
1364 cg_clustersum(&acg)[lcs+1]--;
1366 cg_clustersum(&acg)[lcs]++;
1372 for(; d<dupper; d++) {
1373 DBG_PRINT1("scg second frag check loop d=%d\n",
1375 if(isclr(cg_blksfree(&acg), d)) {
1376 bp[ind].old=d/sblock.fs_frag;
1377 bp[ind].flags|=GFS_FL_LAST;
1379 clrbit(cg_blksfree(&acg), d);
1380 acg.cg_cs.cs_nffree--;
1381 sblock.fs_cstotal.cs_nffree--;
1384 if(bp[ind].flags & GFS_FL_LAST) { /* we have to advance here */
1387 frag_adjust(dupper-1, 1);
1391 * If we found a block to relocate just do so.
1394 for(i=0; i<ind; i++) {
1395 if(!bp[i].old) { /* no more blocks listed */
1397 * XXX A relative blocknumber should not be
1398 * zero, which is not explicitly
1399 * guaranteed by our code.
1404 * Allocate a complete block in the same (current)
1407 bp[i].new=alloc()/sblock.fs_frag;
1410 * There is no frag_adjust() needed for the new block
1411 * as it will have no fragments yet :-).
1413 for(f=bp[i].old*sblock.fs_frag,
1414 g=bp[i].new*sblock.fs_frag;
1415 f<(bp[i].old+1)*sblock.fs_frag;
1417 if(isset(cg_blksfree(&aocg), f)) {
1418 setbit(cg_blksfree(&acg), g);
1419 acg.cg_cs.cs_nffree++;
1420 sblock.fs_cstotal.cs_nffree++;
1425 * Special handling is required if this was the first
1426 * block. We have to consider the fragments which were
1427 * used by the cylinder summary in the original block
1428 * which re to be free in the copy of our block. We
1429 * have to be careful if this first block happens to
1430 * be also the last block to be relocated.
1432 if(bp[i].flags & GFS_FL_FIRST) {
1433 for(f=bp[i].old*sblock.fs_frag,
1434 g=bp[i].new*sblock.fs_frag;
1437 setbit(cg_blksfree(&acg), g);
1438 acg.cg_cs.cs_nffree++;
1439 sblock.fs_cstotal.cs_nffree++;
1441 if(!(bp[i].flags & GFS_FL_LAST)) {
1442 frag_adjust(bp[i].new*sblock.fs_frag,1);
1448 * Special handling is required if this is the last
1449 * block to be relocated.
1451 if(bp[i].flags & GFS_FL_LAST) {
1452 frag_adjust(bp[i].new*sblock.fs_frag, 1);
1453 frag_adjust(bp[i].old*sblock.fs_frag, -1);
1455 f<roundup(dupper, sblock.fs_frag);
1457 if(isclr(cg_blksfree(&acg), f)) {
1458 setbit(cg_blksfree(&acg), f);
1459 acg.cg_cs.cs_nffree++;
1460 sblock.fs_cstotal.cs_nffree++;
1463 frag_adjust(bp[i].old*sblock.fs_frag, 1);
1467 * !!! Attach the cylindergroup offset here.
1469 bp[i].old+=cbase/sblock.fs_frag;
1470 bp[i].new+=cbase/sblock.fs_frag;
1473 * Copy the content of the block.
1476 * XXX Here we will have to implement a copy on write
1477 * in the case we have any active snapshots.
1479 rdfs(fsbtodb(&sblock, bp[i].old*sblock.fs_frag),
1480 (size_t)sblock.fs_bsize, (void *)&ablk, fsi);
1481 wtfs(fsbtodb(&sblock, bp[i].new*sblock.fs_frag),
1482 (size_t)sblock.fs_bsize, (void *)&ablk, fso, Nflag);
1483 DBG_DUMP_HEX(&sblock,
1484 "copied full block",
1485 (unsigned char *)&ablk);
1487 DBG_PRINT2("scg (%d->%d) block relocated\n",
1493 * Now we have to update all references to any fragment which
1494 * belongs to any block relocated. We iterate now over all
1495 * cylinder groups, within those over all non zero length
1498 for(cylno=0; cylno<osblock.fs_ncg; cylno++) {
1499 DBG_PRINT1("scg doing cg (%d)\n",
1501 for(inc=osblock.fs_ipg-1 ; inc>=0 ; inc--) {
1502 updrefs(cylno, (ino_t)inc, bp, fsi, fso, Nflag);
1507 * All inodes are checked, now make sure the number of
1508 * references found make sense.
1510 for(i=0; i<ind; i++) {
1511 if(!bp[i].found || (bp[i].found>sblock.fs_frag)) {
1512 warnx("error: %d refs found for block %d.",
1513 bp[i].found, bp[i].old);
1519 * The following statistics are not changed here:
1520 * sblock.fs_cstotal.cs_ndir
1521 * sblock.fs_cstotal.cs_nifree
1522 * The following statistics were already updated on the fly:
1523 * sblock.fs_cstotal.cs_nffree
1524 * sblock.fs_cstotal.cs_nbfree
1525 * As the statistics for this cylinder group are ready, copy it to
1526 * the summary information array.
1532 * Write summary cylinder group back to disk.
1534 wtfs(fsbtodb(&sblock, cgtod(&sblock, ocscg)), (size_t)sblock.fs_cgsize,
1535 (void *)&acg, fso, Nflag);
1536 DBG_PRINT0("scg written\n");
1537 DBG_DUMP_CG(&sblock,
1545 /* ************************************************************** rdfs ***** */
1547 * Here we read some block(s) from disk.
1550 rdfs(daddr_t bno, size_t size, void *bf, int fsi)
1557 if (lseek(fsi, (off_t)bno * DEV_BSIZE, 0) < 0) {
1558 err(33, "rdfs: seek error: %ld", (long)bno);
1560 n = read(fsi, bf, size);
1561 if (n != (ssize_t)size) {
1562 err(34, "rdfs: read error: %ld", (long)bno);
1569 /* ************************************************************** wtfs ***** */
1571 * Here we write some block(s) to disk.
1574 wtfs(daddr_t bno, size_t size, void *bf, int fso, unsigned int Nflag)
1585 if (lseek(fso, (off_t)bno * DEV_BSIZE, SEEK_SET) < 0) {
1586 err(35, "wtfs: seek error: %ld", (long)bno);
1588 n = write(fso, bf, size);
1589 if (n != (ssize_t)size) {
1590 err(36, "wtfs: write error: %ld", (long)bno);
1597 /* ************************************************************* alloc ***** */
1599 * Here we allocate a free block in the current cylinder group. It is assumed,
1600 * that acg contains the current cylinder group. As we may take a block from
1601 * somewhere in the filesystem we have to handle cluster summary here.
1611 int dlower, dupper, dmax;
1615 if (acg.cg_magic != CG_MAGIC) {
1616 warnx("acg: bad magic number");
1620 if (acg.cg_cs.cs_nbfree == 0) {
1621 warnx("error: cylinder group ran out of space");
1626 * We start seeking for free blocks only from the space available after
1627 * the end of the new grown cylinder summary. Otherwise we allocate a
1628 * block here which we have to relocate a couple of seconds later again
1629 * again, and we are not prepared to to this anyway.
1632 dlower=cgsblock(&sblock, acg.cg_cgx)-cgbase(&sblock, acg.cg_cgx);
1633 dupper=cgdmin(&sblock, acg.cg_cgx)-cgbase(&sblock, acg.cg_cgx);
1634 dmax=cgbase(&sblock, acg.cg_cgx)+sblock.fs_fpg;
1635 if (dmax > sblock.fs_size) {
1636 dmax = sblock.fs_size;
1638 dmax-=cgbase(&sblock, acg.cg_cgx); /* retransform into cg */
1639 csmin=sblock.fs_csaddr-cgbase(&sblock, acg.cg_cgx);
1640 csmax=csmin+howmany(sblock.fs_cssize, sblock.fs_fsize);
1641 DBG_PRINT3("seek range: dl=%d, du=%d, dm=%d\n",
1645 DBG_PRINT2("range cont: csmin=%d, csmax=%d\n",
1649 for(d=0; (d<dlower && blkno==-1); d+=sblock.fs_frag) {
1650 if(d>=csmin && d<=csmax) {
1653 if(isblock(&sblock, cg_blksfree(&acg), fragstoblks(&sblock,
1655 blkno = fragstoblks(&sblock, d);/* Yeah found a block */
1659 for(d=dupper; (d<dmax && blkno==-1); d+=sblock.fs_frag) {
1660 if(d>=csmin && d<=csmax) {
1663 if(isblock(&sblock, cg_blksfree(&acg), fragstoblks(&sblock,
1665 blkno = fragstoblks(&sblock, d);/* Yeah found a block */
1670 warnx("internal error: couldn't find promised block in cg");
1676 * This is needed if the block was found already in the first loop.
1678 d=blkstofrags(&sblock, blkno);
1680 clrblock(&sblock, cg_blksfree(&acg), blkno);
1681 if (sblock.fs_contigsumsize > 0) {
1683 * Handle the cluster allocation bitmap.
1685 clrbit(cg_clustersfree(&acg), blkno);
1687 * We possibly have split a cluster here, so we have to do
1688 * recalculate the sizes of the remaining cluster halves now,
1689 * and use them for updating the cluster summary information.
1691 * Lets start with the blocks before our allocated block ...
1693 for(lcs1=0, l=blkno-1; lcs1<sblock.fs_contigsumsize;
1695 if(isclr(cg_clustersfree(&acg),l)){
1700 * ... and continue with the blocks right after our allocated
1703 for(lcs2=0, l=blkno+1; lcs2<sblock.fs_contigsumsize;
1705 if(isclr(cg_clustersfree(&acg),l)){
1711 * Now update all counters.
1713 cg_clustersum(&acg)[MIN(lcs1+lcs2+1,sblock.fs_contigsumsize)]--;
1715 cg_clustersum(&acg)[lcs1]++;
1718 cg_clustersum(&acg)[lcs2]++;
1722 * Update all statistics based on blocks.
1724 acg.cg_cs.cs_nbfree--;
1725 sblock.fs_cstotal.cs_nbfree--;
1726 cg_blktot(&acg)[cbtocylno(&sblock, d)]--;
1727 cg_blks(&sblock, &acg, cbtocylno(&sblock, d))[cbtorpos(&sblock, d)]--;
1733 /* *********************************************************** isblock ***** */
1735 * Here we check if all frags of a block are free. For more details again
1736 * please see the source of newfs(8), as this function is taken over almost
1740 isblock(struct fs *fs, unsigned char *cp, int h)
1747 switch (fs->fs_frag) {
1750 return (cp[h] == 0xff);
1752 mask = 0x0f << ((h & 0x1) << 2);
1754 return ((cp[h >> 1] & mask) == mask);
1756 mask = 0x03 << ((h & 0x3) << 1);
1758 return ((cp[h >> 2] & mask) == mask);
1760 mask = 0x01 << (h & 0x7);
1762 return ((cp[h >> 3] & mask) == mask);
1764 fprintf(stderr, "isblock bad fs_frag %d\n", fs->fs_frag);
1770 /* ********************************************************** clrblock ***** */
1772 * Here we allocate a complete block in the block map. For more details again
1773 * please see the source of newfs(8), as this function is taken over almost
1777 clrblock(struct fs *fs, unsigned char *cp, int h)
1779 DBG_FUNC("clrblock")
1783 switch ((fs)->fs_frag) {
1788 cp[h >> 1] &= ~(0x0f << ((h & 0x1) << 2));
1791 cp[h >> 2] &= ~(0x03 << ((h & 0x3) << 1));
1794 cp[h >> 3] &= ~(0x01 << (h & 0x7));
1797 warnx("clrblock bad fs_frag %d", fs->fs_frag);
1805 /* ********************************************************** setblock ***** */
1807 * Here we free a complete block in the free block map. For more details again
1808 * please see the source of newfs(8), as this function is taken over almost
1812 setblock(struct fs *fs, unsigned char *cp, int h)
1814 DBG_FUNC("setblock")
1818 switch (fs->fs_frag) {
1823 cp[h >> 1] |= (0x0f << ((h & 0x1) << 2));
1826 cp[h >> 2] |= (0x03 << ((h & 0x3) << 1));
1829 cp[h >> 3] |= (0x01 << (h & 0x7));
1832 warnx("setblock bad fs_frag %d", fs->fs_frag);
1840 /* ************************************************************ ginode ***** */
1842 * This function provides access to an individual inode. We find out in which
1843 * block the requested inode is located, read it from disk if needed, and
1844 * return the pointer into that block. We maintain a cache of one block to
1845 * not read the same block again and again if we iterate linearly over all
1848 static struct ufs1_dinode *
1849 ginode(ino_t inumber, int fsi, int cg)
1853 static ino_t startinum=0; /* first inode in cached block */
1854 struct ufs1_dinode *pi;
1858 pi=(struct ufs1_dinode *)(void *)ablk;
1859 inumber+=(cg * sblock.fs_ipg);
1860 if (startinum == 0 || inumber < startinum ||
1861 inumber >= startinum + INOPB(&sblock)) {
1863 * The block needed is not cached, so we have to read it from
1866 iblk = ino_to_fsba(&sblock, inumber);
1867 in_src=fsbtodb(&sblock, iblk);
1868 rdfs(in_src, (size_t)sblock.fs_bsize, (void *)&ablk, fsi);
1869 startinum = (inumber / INOPB(&sblock)) * INOPB(&sblock);
1873 return (&(pi[inumber % INOPB(&sblock)]));
1876 /* ****************************************************** charsperline ***** */
1878 * Figure out how many lines our current terminal has. For more details again
1879 * please see the source of newfs(8), as this function is taken over almost
1885 DBG_FUNC("charsperline")
1893 if (ioctl(0, TIOCGWINSZ, &ws) != -1) {
1894 columns = ws.ws_col;
1896 if (columns == 0 && (cp = getenv("COLUMNS"))) {
1900 columns = 80; /* last resort */
1907 /* ************************************************************** main ***** */
1909 * growfs(8) is a utility which allows to increase the size of an existing
1910 * ufs filesystem. Currently this can only be done on unmounted file system.
1911 * It recognizes some command line options to specify the new desired size,
1912 * and it does some basic checkings. The old file system size is determined
1913 * and after some more checks like we can really access the new last block
1914 * on the disk etc. we calculate the new parameters for the superblock. After
1915 * having done this we just call growfs() which will do the work. Before
1916 * we finish the only thing left is to update the disklabel.
1917 * We still have to provide support for snapshots. Therefore we first have to
1918 * understand what data structures are always replicated in the snapshot on
1919 * creation, for all other blocks we touch during our procedure, we have to
1920 * keep the old blocks unchanged somewhere available for the snapshots. If we
1921 * are lucky, then we only have to handle our blocks to be relocated in that
1923 * Also we have to consider in what order we actually update the critical
1924 * data structures of the filesystem to make sure, that in case of a disaster
1925 * fsck(8) is still able to restore any lost data.
1926 * The foreseen last step then will be to provide for growing even mounted
1927 * file systems. There we have to extend the mount() system call to provide
1928 * userland access to the file system locking facility.
1931 main(int argc, char **argv)
1934 struct partinfo pinfo;
1935 char *device, *special, *cp;
1937 unsigned int size=0;
1939 unsigned int Nflag=0;
1946 #endif /* FSMAXSNAP */
1950 while((ch=getopt(argc, argv, "Ns:vy")) != -1) {
1956 size=(size_t)atol(optarg);
1961 case 'v': /* for compatibility to newfs */
1981 * Now try to guess the (raw)device name.
1983 if (0 == strrchr(device, '/')) {
1985 * No path prefix was given, so try in that order:
1991 * FreeBSD now doesn't distinguish between raw and block
1992 * devices any longer, but it should still work this way.
1994 len=strlen(device)+strlen(_PATH_DEV)+2+strlen("vinum/");
1995 special=(char *)malloc(len);
1996 if(special == NULL) {
1997 errx(1, "malloc failed");
1999 snprintf(special, len, "%sr%s", _PATH_DEV, device);
2000 if (stat(special, &st) == -1) {
2001 snprintf(special, len, "%s%s", _PATH_DEV, device);
2002 if (stat(special, &st) == -1) {
2003 snprintf(special, len, "%svinum/r%s",
2005 if (stat(special, &st) == -1) {
2006 /* For now this is the 'last resort' */
2007 snprintf(special, len, "%svinum/%s",
2016 * Try to access our devices for writing ...
2021 fso = open(device, O_WRONLY);
2023 err(1, "%s", device);
2030 fsi = open(device, O_RDONLY);
2032 err(1, "%s", device);
2036 * Try to read a label and gess the slice if not specified. This
2037 * code should guess the right thing and avaid to bother the user
2038 * user with the task of specifying the option -v on vinum volumes.
2040 cp=device+strlen(device)-1;
2042 if (ioctl(fsi, DIOCGPART, &pinfo) < 0) {
2043 if (fstat(fsi, &st) < 0)
2044 err(1, "unable to figure out the partition size");
2045 pinfo.media_blocks = st.st_size / DEV_BSIZE;
2046 pinfo.media_blksize = DEV_BSIZE;
2050 * Check if that partition looks suited for growing a file system.
2052 if (pinfo.media_blocks < 1) {
2053 errx(1, "partition is unavailable");
2057 * Read the current superblock, and take a backup.
2059 rdfs((daddr_t)(SBOFF/DEV_BSIZE), (size_t)SBSIZE, (void *)&(osblock),
2061 if (osblock.fs_magic != FS_MAGIC) {
2062 errx(1, "superblock not recognized");
2064 memcpy((void *)&fsun1, (void *)&fsun2, sizeof(fsun2));
2066 DBG_OPEN("/tmp/growfs.debug"); /* already here we need a superblock */
2067 DBG_DUMP_FS(&sblock,
2071 * Determine size to grow to. Default to the full size specified in
2074 sblock.fs_size = dbtofsb(&osblock, pinfo.media_blocks);
2076 if (size > pinfo.media_blocks){
2077 errx(1, "There is not enough space (%ju < %d)",
2078 (intmax_t)pinfo.media_blocks, size);
2080 sblock.fs_size = dbtofsb(&osblock, size);
2084 * Are we really growing ?
2086 if(osblock.fs_size >= sblock.fs_size) {
2087 errx(1, "we are not growing (%d->%d)", osblock.fs_size,
2094 * Check if we find an active snapshot.
2096 if(ExpertFlag == 0) {
2097 for(j=0; j<FSMAXSNAP; j++) {
2098 if(sblock.fs_snapinum[j]) {
2099 errx(1, "active snapshot found in filesystem\n"
2100 " please remove all snapshots before "
2103 if(!sblock.fs_snapinum[j]) { /* list is dense */
2110 if (ExpertFlag == 0 && Nflag == 0) {
2111 printf("We strongly recommend you to make a backup "
2112 "before growing the Filesystem\n\n"
2113 " Did you backup your data (Yes/No) ? ");
2114 fgets(reply, (int)sizeof(reply), stdin);
2115 if (strcmp(reply, "Yes\n")){
2116 printf("\n Nothing done \n");
2121 printf("new filesystemsize is: %d frags\n", sblock.fs_size);
2124 * Try to access our new last block in the filesystem. Even if we
2125 * later on realize we have to abort our operation, on that block
2126 * there should be no data, so we can't destroy something yet.
2128 wtfs((daddr_t)pinfo.media_blocks-1, (size_t)DEV_BSIZE, (void *)&sblock, fso,
2132 * Now calculate new superblock values and check for reasonable
2133 * bound for new file system size:
2134 * fs_size: is derived from label or user input
2135 * fs_dsize: should get updated in the routines creating or
2136 * updating the cylinder groups on the fly
2137 * fs_cstotal: should get updated in the routines creating or
2138 * updating the cylinder groups
2142 * Update the number of cylinders in the filesystem.
2144 sblock.fs_ncyl = sblock.fs_size * NSPF(&sblock) / sblock.fs_spc;
2145 if (sblock.fs_size * NSPF(&sblock) > sblock.fs_ncyl * sblock.fs_spc) {
2150 * Update the number of cylinder groups in the filesystem.
2152 sblock.fs_ncg = sblock.fs_ncyl / sblock.fs_cpg;
2153 if (sblock.fs_ncyl % sblock.fs_cpg) {
2157 if ((sblock.fs_size - (sblock.fs_ncg-1) * sblock.fs_fpg) <
2158 sblock.fs_fpg && cgdmin(&sblock, (sblock.fs_ncg-1))-
2159 cgbase(&sblock, (sblock.fs_ncg-1)) > (sblock.fs_size -
2160 (sblock.fs_ncg-1) * sblock.fs_fpg )) {
2162 * The space in the new last cylinder group is too small,
2166 #if 1 /* this is a bit more safe */
2167 sblock.fs_ncyl = sblock.fs_ncg * sblock.fs_cpg;
2169 sblock.fs_ncyl -= sblock.fs_ncyl % sblock.fs_cpg;
2171 sblock.fs_ncyl -= sblock.fs_ncyl % sblock.fs_cpg;
2172 printf( "Warning: %d sector(s) cannot be allocated.\n",
2173 (sblock.fs_size-(sblock.fs_ncg)*sblock.fs_fpg) *
2175 sblock.fs_size = sblock.fs_ncyl * sblock.fs_spc / NSPF(&sblock);
2179 * Update the space for the cylinder group summary information in the
2180 * respective cylinder group data area.
2183 fragroundup(&sblock, sblock.fs_ncg * sizeof(struct csum));
2185 if(osblock.fs_size >= sblock.fs_size) {
2186 errx(1, "not enough new space");
2189 DBG_PRINT0("sblock calculated\n");
2192 * Ok, everything prepared, so now let's do the tricks.
2194 growfs(fsi, fso, Nflag);
2197 if(fso>-1) close(fso);
2205 /* ************************************************************* usage ***** */
2207 * Dump a line of usage.
2216 fprintf(stderr, "usage: growfs [-Ny] [-s size] special\n");
2222 /* *********************************************************** updclst ***** */
2224 * This updates most paramters and the bitmap related to cluster. We have to
2225 * assume, that sblock, osblock, acg are set up.
2235 if(sblock.fs_contigsumsize < 1) { /* no clustering */
2239 * update cluster allocation map
2241 setbit(cg_clustersfree(&acg), block);
2244 * update cluster summary table
2248 * calculate size for the trailing cluster
2250 for(block--; lcs<sblock.fs_contigsumsize; block--, lcs++ ) {
2251 if(isclr(cg_clustersfree(&acg), block)){
2256 if(lcs < sblock.fs_contigsumsize) {
2258 cg_clustersum(&acg)[lcs]--;
2261 cg_clustersum(&acg)[lcs]++;
2268 /* *********************************************************** updrefs ***** */
2270 * This updates all references to relocated blocks for the given inode. The
2271 * inode is given as number within the cylinder group, and the number of the
2275 updrefs(int cg, ino_t in, struct gfs_bpp *bp, int fsi, int fso, unsigned int
2279 unsigned int ictr, ind2ctr, ind3ctr;
2280 ufs_daddr_t *iptr, *ind2ptr, *ind3ptr;
2281 struct ufs1_dinode *ino;
2282 int remaining_blocks;
2287 * XXX We should skip unused inodes even from beeing read from disk
2288 * here by using the bitmap.
2290 ino=ginode(in, fsi, cg);
2291 if(!((ino->di_mode & IFMT)==IFDIR || (ino->di_mode & IFMT)==IFREG ||
2292 (ino->di_mode & IFMT)==IFLNK)) {
2294 return; /* only check DIR, FILE, LINK */
2296 if(((ino->di_mode & IFMT)==IFLNK) && (ino->di_size<MAXSYMLINKLEN)) {
2298 return; /* skip short symlinks */
2302 return; /* skip empty file */
2304 if(!ino->di_blocks) {
2306 return; /* skip empty swiss cheesy file or old fastlink */
2308 DBG_PRINT2("scg checking inode (%d in %d)\n",
2313 * Start checking all direct blocks.
2315 remaining_blocks=howmany(ino->di_size, sblock.fs_bsize);
2316 for(ictr=0; ictr < MIN(NDADDR, (unsigned int)remaining_blocks);
2318 iptr=&(ino->di_db[ictr]);
2320 cond_bl_upd(iptr, bp, GFS_PS_INODE, fso, Nflag);
2323 DBG_PRINT0("~~scg direct blocks checked\n");
2325 remaining_blocks-=NDADDR;
2326 if(remaining_blocks<0) {
2332 * Start checking first indirect block
2334 cond_bl_upd(&(ino->di_ib[0]), bp, GFS_PS_INODE, fso, Nflag);
2335 i1_src=fsbtodb(&sblock, ino->di_ib[0]);
2336 rdfs(i1_src, (size_t)sblock.fs_bsize, (void *)&i1blk, fsi);
2337 for(ictr=0; ictr < MIN(howmany(sblock.fs_bsize,
2338 sizeof(ufs_daddr_t)), (unsigned int)remaining_blocks);
2340 iptr=&((ufs_daddr_t *)(void *)&i1blk)[ictr];
2342 cond_bl_upd(iptr, bp, GFS_PS_IND_BLK_LVL1,
2347 DBG_PRINT0("scg indirect_1 blocks checked\n");
2349 remaining_blocks-= howmany(sblock.fs_bsize, sizeof(ufs_daddr_t));
2350 if(remaining_blocks<0) {
2356 * Start checking second indirect block
2358 cond_bl_upd(&(ino->di_ib[1]), bp, GFS_PS_INODE, fso, Nflag);
2359 i2_src=fsbtodb(&sblock, ino->di_ib[1]);
2360 rdfs(i2_src, (size_t)sblock.fs_bsize, (void *)&i2blk, fsi);
2361 for(ind2ctr=0; ind2ctr < howmany(sblock.fs_bsize,
2362 sizeof(ufs_daddr_t)); ind2ctr++) {
2363 ind2ptr=&((ufs_daddr_t *)(void *)&i2blk)[ind2ctr];
2367 cond_bl_upd(ind2ptr, bp, GFS_PS_IND_BLK_LVL2, fso,
2369 i1_src=fsbtodb(&sblock, *ind2ptr);
2370 rdfs(i1_src, (size_t)sblock.fs_bsize, (void *)&i1blk,
2372 for(ictr=0; ictr<MIN(howmany((unsigned int)
2373 sblock.fs_bsize, sizeof(ufs_daddr_t)),
2374 (unsigned int)remaining_blocks); ictr++) {
2375 iptr=&((ufs_daddr_t *)(void *)&i1blk)[ictr];
2377 cond_bl_upd(iptr, bp,
2378 GFS_PS_IND_BLK_LVL1, fso, Nflag);
2383 DBG_PRINT0("scg indirect_2 blocks checked\n");
2385 #define SQUARE(a) ((a)*(a))
2386 remaining_blocks-=SQUARE(howmany(sblock.fs_bsize, sizeof(ufs_daddr_t)));
2388 if(remaining_blocks<0) {
2395 * Start checking third indirect block
2397 cond_bl_upd(&(ino->di_ib[2]), bp, GFS_PS_INODE, fso, Nflag);
2398 i3_src=fsbtodb(&sblock, ino->di_ib[2]);
2399 rdfs(i3_src, (size_t)sblock.fs_bsize, (void *)&i3blk, fsi);
2400 for(ind3ctr=0; ind3ctr < howmany(sblock.fs_bsize,
2401 sizeof(ufs_daddr_t)); ind3ctr ++) {
2402 ind3ptr=&((ufs_daddr_t *)(void *)&i3blk)[ind3ctr];
2406 cond_bl_upd(ind3ptr, bp, GFS_PS_IND_BLK_LVL3, fso,
2408 i2_src=fsbtodb(&sblock, *ind3ptr);
2409 rdfs(i2_src, (size_t)sblock.fs_bsize, (void *)&i2blk,
2411 for(ind2ctr=0; ind2ctr < howmany(sblock.fs_bsize,
2412 sizeof(ufs_daddr_t)); ind2ctr ++) {
2413 ind2ptr=&((ufs_daddr_t *)(void *)&i2blk)
2418 cond_bl_upd(ind2ptr, bp, GFS_PS_IND_BLK_LVL2,
2420 i1_src=fsbtodb(&sblock, *ind2ptr);
2421 rdfs(i1_src, (size_t)sblock.fs_bsize,
2422 (void *)&i1blk, fsi);
2423 for(ictr=0; ictr < MIN(howmany(sblock.fs_bsize,
2424 sizeof(ufs_daddr_t)),
2425 (unsigned int)remaining_blocks); ictr++) {
2426 iptr=&((ufs_daddr_t *)(void *)&i1blk)
2429 cond_bl_upd(iptr, bp,
2430 GFS_PS_IND_BLK_LVL1, fso,
2438 DBG_PRINT0("scg indirect_3 blocks checked\n");