From: Matthew Dillon Date: Sun, 16 Oct 2016 21:13:39 +0000 (-0700) Subject: kernel - Rename desiredvnodes to maxvnodes, fix deadlock X-Git-Tag: v4.8.0rc~833 X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/commitdiff_plain/9629eb3587b2cd563eccbd1eab0984b652f5dad5 kernel - Rename desiredvnodes to maxvnodes, fix deadlock * Rename the kernel variable 'desiredvnodes' to 'maxvnodes' to match the sysctl name (which has always been 'maxvnodes'), and to make the code more readable. * Probable fix to a rare mount/umount deadlock which can occur in two situations (1) When a large number of mounts and unmounts are running concurrently, and (2) During a umount -a, shutdown, or reboot. * Considered minor, normal use cases will not reproduce this bug. Only synth or poudriere can generate the mount/umount traffic necessary to reproduce this bug. * Also fixes a minor kernel memory leak of the mount structure which can occur when a 'df' or filesystem sync races a umount. Also minor. Reported-by: marino (mount race) --- diff --git a/sys/gnu/vfs/ext2fs/ext2_ihash.c b/sys/gnu/vfs/ext2fs/ext2_ihash.c index 3ca4c77168..345c11d6a4 100644 --- a/sys/gnu/vfs/ext2fs/ext2_ihash.c +++ b/sys/gnu/vfs/ext2fs/ext2_ihash.c @@ -60,7 +60,7 @@ void ext2_ihashinit(void) { ext2_ihash = 16; - while (ext2_ihash < desiredvnodes) + while (ext2_ihash < maxvnodes) ext2_ihash <<= 1; ext2_ihashtbl = kmalloc(sizeof(void *) * ext2_ihash, M_EXT2IHASH, M_WAITOK|M_ZERO); --ext2_ihash; diff --git a/sys/gnu/vfs/ext2fs/ext2_quota.c b/sys/gnu/vfs/ext2fs/ext2_quota.c index 8c5deb3f47..5a8c100e08 100644 --- a/sys/gnu/vfs/ext2fs/ext2_quota.c +++ b/sys/gnu/vfs/ext2fs/ext2_quota.c @@ -695,7 +695,7 @@ static long ext2_numdquot, ext2_desireddquot = DQUOTINC; void ext2_dqinit(void) { - ext2_dqhashtbl = hashinit(desiredvnodes, M_EXT2DQUOT, &ext2_dqhash); + ext2_dqhashtbl = hashinit(maxvnodes, M_EXT2DQUOT, &ext2_dqhash); TAILQ_INIT(&ext2_dqfreelist); } @@ -740,8 +740,10 @@ ext2_dqget(struct vnode *vp, u_long id, struct ext2_mount *ump, int type, /* * Not in cache, allocate a new one. */ - if (TAILQ_EMPTY(&ext2_dqfreelist) && ext2_numdquot < MAXQUOTAS * desiredvnodes) + if (TAILQ_EMPTY(&ext2_dqfreelist) && + ext2_numdquot < MAXQUOTAS * maxvnodes) { ext2_desireddquot += DQUOTINC; + } if (ext2_numdquot < ext2_desireddquot) { dq = (struct ext2_dquot *)kmalloc(sizeof *dq, M_EXT2DQUOT, M_WAITOK | M_ZERO); ext2_numdquot++; diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c index 50b39b157d..35adafacb8 100644 --- a/sys/kern/vfs_cache.c +++ b/sys/kern/vfs_cache.c @@ -2624,7 +2624,7 @@ void cache_hysteresis(int critpath) { int poslimit; - int neglimit = desiredvnodes / ncnegfactor; + int neglimit = maxvnodes / ncnegfactor; int xnumcache = numcache; if (critpath == 0) @@ -2669,7 +2669,7 @@ cache_hysteresis(int critpath) * into infinity). */ if ((poslimit = ncposlimit) == 0) - poslimit = desiredvnodes * 2; + poslimit = maxvnodes * 2; if (critpath == 0) poslimit = poslimit * 8 / 10; @@ -3698,7 +3698,7 @@ nchinit(void) } TAILQ_INIT(&ncneglist); spin_init(&ncspin, "nchinit"); - nchashtbl = hashinit_ext(desiredvnodes / 2, + nchashtbl = hashinit_ext(maxvnodes / 2, sizeof(struct nchash_head), M_VFSCACHE, &nchash); for (i = 0; i <= (int)nchash; ++i) { diff --git a/sys/kern/vfs_lock.c b/sys/kern/vfs_lock.c index c44c0d2850..6d915b60f6 100644 --- a/sys/kern/vfs_lock.c +++ b/sys/kern/vfs_lock.c @@ -852,8 +852,8 @@ allocvnode(int lktimeout, int lkflags) * process to handle the cleaning (at 9/10's) before we are forced * to flag it here at 11/10's for userexit path processing. */ - if (numvnodes >= desiredvnodes * 11 / 10 && - cachedvnodes + inactivevnodes >= desiredvnodes * 5 / 10) { + if (numvnodes >= maxvnodes * 11 / 10 && + cachedvnodes + inactivevnodes >= maxvnodes * 5 / 10) { struct thread *td = curthread; if (td->td_lwp) atomic_set_int(&td->td_lwp->lwp_mpflags, LWP_MP_VNLRU); @@ -917,8 +917,8 @@ allocvnode(int lktimeout, int lkflags) void allocvnode_gc(void) { - if (numvnodes >= desiredvnodes && - cachedvnodes + inactivevnodes >= desiredvnodes * 5 / 10) { + if (numvnodes >= maxvnodes && + cachedvnodes + inactivevnodes >= maxvnodes * 5 / 10) { freesomevnodes(batchfreevnodes); } } diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c index f2092270bb..4fb583925f 100644 --- a/sys/kern/vfs_mount.c +++ b/sys/kern/vfs_mount.c @@ -333,10 +333,24 @@ mount_init(struct mount *mp) TAILQ_INIT(&mp->mnt_jlist); mp->mnt_nvnodelistsize = 0; mp->mnt_flag = 0; + mp->mnt_hold = 1; mp->mnt_iosize_max = MAXPHYS; vn_syncer_thr_create(mp); } +void +mount_hold(struct mount *mp) +{ + atomic_add_int(&mp->mnt_hold, 1); +} + +void +mount_drop(struct mount *mp) +{ + if (atomic_fetchadd_int(&mp->mnt_hold, -1) == 1) + kfree(mp, M_MOUNT); +} + /* * Lookup a mount point by filesystem identifier. */ @@ -452,9 +466,9 @@ vnlru_proc(void) * * (long) -> deal with 64 bit machines, intermediate overflow */ - if (numvnodes >= desiredvnodes * 9 / 10 && - cachedvnodes + inactivevnodes >= desiredvnodes * 5 / 10) { - int count = numvnodes - desiredvnodes * 9 / 10; + if (numvnodes >= maxvnodes * 9 / 10 && + cachedvnodes + inactivevnodes >= maxvnodes * 5 / 10) { + int count = numvnodes - maxvnodes * 9 / 10; if (count > (cachedvnodes + inactivevnodes) / 100) count = (cachedvnodes + inactivevnodes) / 100; @@ -474,8 +488,8 @@ vnlru_proc(void) * Nothing to do if most of our vnodes are already on * the free list. */ - if (numvnodes <= desiredvnodes * 9 / 10 || - cachedvnodes + inactivevnodes <= desiredvnodes * 5 / 10) { + if (numvnodes <= maxvnodes * 9 / 10 || + cachedvnodes + inactivevnodes <= maxvnodes * 5 / 10) { tsleep(vnlruthread, 0, "vlruwt", hz); continue; } @@ -605,6 +619,11 @@ mountlist_exists(struct mount *mp) * MNTSCAN_REVERSE - the mountlist is scanned in reverse * MNTSCAN_NOBUSY - the scanner will make the callback without busying * the mount node. + * + * NOTE: mount_hold()/mount_drop() sequence primarily helps us avoid + * confusion for the unbusy check, particularly if a kfree/kmalloc + * occurs quickly (lots of processes mounting and unmounting at the + * same time). */ int mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) @@ -625,6 +644,7 @@ mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) if (how & MNTSCAN_FORWARD) { info.msi_node = TAILQ_FIRST(&mountlist); while ((mp = info.msi_node) != NULL) { + mount_hold(mp); if (how & MNTSCAN_NOBUSY) { count = callback(mp, data); } else if (vfs_busy(mp, LK_NOWAIT) == 0) { @@ -634,6 +654,7 @@ mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) } else { count = 0; } + mount_drop(mp); if (count < 0) break; res += count; @@ -643,6 +664,7 @@ mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) } else if (how & MNTSCAN_REVERSE) { info.msi_node = TAILQ_LAST(&mountlist, mntlist); while ((mp = info.msi_node) != NULL) { + mount_hold(mp); if (how & MNTSCAN_NOBUSY) { count = callback(mp, data); } else if (vfs_busy(mp, LK_NOWAIT) == 0) { @@ -652,6 +674,7 @@ mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) } else { count = 0; } + mount_drop(mp); if (count < 0) break; res += count; diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index 584393cf89..ae2814fced 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -116,9 +116,9 @@ int nfs_mount_type = -1; static struct lwkt_token spechash_token; struct nfs_public nfs_pub; /* publicly exported FS */ -int desiredvnodes; +int maxvnodes; SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, - &desiredvnodes, 0, "Maximum number of vnodes"); + &maxvnodes, 0, "Maximum number of vnodes"); static struct radix_node_head *vfs_create_addrlist_af(int af, struct netexport *nep); @@ -171,10 +171,9 @@ vfs_subr_init(void) */ factor1 = 20 * (sizeof(struct vm_object) + sizeof(struct vnode)); factor2 = 25 * (sizeof(struct vm_object) + sizeof(struct vnode)); - desiredvnodes = - imin((int64_t)vmstats.v_page_count * PAGE_SIZE / factor1, - KvaSize / factor2); - desiredvnodes = imax(desiredvnodes, maxproc * 8); + maxvnodes = imin((int64_t)vmstats.v_page_count * PAGE_SIZE / factor1, + KvaSize / factor2); + maxvnodes = imax(maxvnodes, maxproc * 8); lwkt_token_init(&spechash_token, "spechash"); } @@ -1832,8 +1831,14 @@ vfs_mountedon(struct vnode *vp) /* * Unmount all filesystems. The list is traversed in reverse order * of mounting to avoid dependencies. + * + * We want the umountall to be able to break out of its loop if a + * failure occurs, after scanning all possible mounts, so the callback + * returns 0 on error. + * + * NOTE: Do not call mountlist_remove(mp) on error any more, this will + * confuse mountlist_scan()'s unbusy check. */ - static int vfs_umountall_callback(struct mount *mp, void *data); void @@ -1855,15 +1860,16 @@ vfs_umountall_callback(struct mount *mp, void *data) error = dounmount(mp, MNT_FORCE); if (error) { - mountlist_remove(mp); kprintf("unmount of filesystem mounted from %s failed (", mp->mnt_stat.f_mntfromname); if (error == EBUSY) kprintf("BUSY)\n"); else kprintf("%d)\n", error); + return 0; + } else { + return 1; } - return(1); } /* diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c index 41374f8b03..5062ed7b5a 100644 --- a/sys/kern/vfs_syscalls.c +++ b/sys/kern/vfs_syscalls.c @@ -768,12 +768,12 @@ dounmount(struct mount *mp, int flags) if ((flags & MNT_FORCE) == 0) { mount_warning(mp, "Cannot unmount: " "%d mount refs still present", - mp->mnt_refs); + mp->mnt_refs - 1); error = EBUSY; } else { mount_warning(mp, "Forced unmount: " "%d mount refs still present", - mp->mnt_refs); + mp->mnt_refs - 1); freeok = 0; } } @@ -865,7 +865,7 @@ dounmount(struct mount *mp, int flags) tsleep(&mp->mnt_refs, 0, "umntrwait", hz / 10 + 1); } lwkt_reltoken(&mp->mnt_token); - kfree(mp, M_MOUNT); + mount_drop(mp); mp = NULL; } error = 0; diff --git a/sys/sys/mount.h b/sys/sys/mount.h index 102fe7ec7b..1bab65c4ff 100644 --- a/sys/sys/mount.h +++ b/sys/sys/mount.h @@ -241,6 +241,7 @@ struct mount { struct nchandle mnt_ncmountpt; /* mount point */ struct nchandle mnt_ncmounton; /* mounted on */ int mnt_refs; /* nchandle references */ + int mnt_hold; /* prevent kfree */ struct lwkt_token mnt_token; /* token lock if not MPSAFE */ struct journallst mnt_jlist; /* list of active journals */ u_int8_t *mnt_jbitmap; /* streamid bitmap */ @@ -706,6 +707,8 @@ extern char *mountrootfsname; /* * exported vnode operations */ +void mount_hold(struct mount *); +void mount_drop(struct mount *); int dounmount (struct mount *, int); int vfs_setpublicfs /* set publicly exported fs */ (struct mount *, struct netexport *, const struct export_args *); diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h index 1c6a60f990..b808f93bfe 100644 --- a/sys/sys/vnode.h +++ b/sys/sys/vnode.h @@ -344,7 +344,7 @@ struct objcache; extern struct vnode *rootvnode; /* root (i.e. "/") vnode */ extern struct nchandle rootnch; /* root (i.e. "/") namecache */ -extern int desiredvnodes; /* number of vnodes desired */ +extern int maxvnodes; /* nominal maximum number of vnodes */ extern time_t syncdelay; /* max time to delay syncing data */ extern time_t filedelay; /* time to delay syncing files */ extern time_t dirdelay; /* time to delay syncing directories */ diff --git a/sys/vfs/hammer/hammer_vfsops.c b/sys/vfs/hammer/hammer_vfsops.c index 016771a18a..212424c610 100644 --- a/sys/vfs/hammer/hammer_vfsops.c +++ b/sys/vfs/hammer/hammer_vfsops.c @@ -351,7 +351,7 @@ hammer_vfs_init(struct vfsconf *conf) * memory pool blowout. */ if (hammer_limit_reclaims == 0) - hammer_limit_reclaims = desiredvnodes / 10; + hammer_limit_reclaims = maxvnodes / 10; return(0); } diff --git a/sys/vfs/hammer2/hammer2_vfsops.c b/sys/vfs/hammer2/hammer2_vfsops.c index 165a87d6a1..f07b275278 100644 --- a/sys/vfs/hammer2/hammer2_vfsops.c +++ b/sys/vfs/hammer2/hammer2_vfsops.c @@ -288,7 +288,7 @@ hammer2_vfs_init(struct vfsconf *conf) TAILQ_INIT(&hammer2_mntlist); TAILQ_INIT(&hammer2_pfslist); - hammer2_limit_dirty_chains = desiredvnodes / 10; + hammer2_limit_dirty_chains = maxvnodes / 10; return (error); } diff --git a/sys/vfs/hpfs/hpfs_hash.c b/sys/vfs/hpfs/hpfs_hash.c index d441c23860..b1f3794394 100644 --- a/sys/vfs/hpfs/hpfs_hash.c +++ b/sys/vfs/hpfs/hpfs_hash.c @@ -61,8 +61,8 @@ void hpfs_hphashinit(void) { - lockinit (&hpfs_hphash_lock, "hpfs_hphashlock", 0, 0); - hpfs_hphashtbl = hashinit(desiredvnodes, M_HPFSHASH, &hpfs_hphash); + lockinit(&hpfs_hphash_lock, "hpfs_hphashlock", 0, 0); + hpfs_hphashtbl = hashinit(maxvnodes, M_HPFSHASH, &hpfs_hphash); lwkt_token_init(&hpfs_hphash_token, "hpfsihash"); } diff --git a/sys/vfs/isofs/cd9660/cd9660_node.c b/sys/vfs/isofs/cd9660/cd9660_node.c index 375d4c4020..446112f5f4 100644 --- a/sys/vfs/isofs/cd9660/cd9660_node.c +++ b/sys/vfs/isofs/cd9660/cd9660_node.c @@ -73,7 +73,7 @@ cd9660_init(struct vfsconf *vfsp) { int hlimit; - if ((hlimit = desiredvnodes) < CD9660_HASH_SIZE_LIMIT) + if ((hlimit = maxvnodes) < CD9660_HASH_SIZE_LIMIT) hlimit = CD9660_HASH_SIZE_LIMIT; isohash = 16; diff --git a/sys/vfs/msdosfs/msdosfs_denode.c b/sys/vfs/msdosfs/msdosfs_denode.c index e14f606e7b..91594106e6 100644 --- a/sys/vfs/msdosfs/msdosfs_denode.c +++ b/sys/vfs/msdosfs/msdosfs_denode.c @@ -118,10 +118,11 @@ int msdosfs_init(struct vfsconf *vfsp) { dehash = 16; - while (dehash < desiredvnodes) + while (dehash < maxvnodes) dehash <<= 1; - dehashtbl = kmalloc(sizeof(void *) * dehash, M_MSDOSFSMNT, - M_WAITOK|M_ZERO); + dehashtbl = kmalloc(sizeof(void *) * dehash, + M_MSDOSFSMNT, + M_WAITOK|M_ZERO); --dehash; lwkt_token_init(&dehash_token, "msdosihash"); return (0); diff --git a/sys/vfs/nfs/nfs_node.c b/sys/vfs/nfs/nfs_node.c index e22a39c02c..f1b0073dbf 100644 --- a/sys/vfs/nfs/nfs_node.c +++ b/sys/vfs/nfs/nfs_node.c @@ -72,7 +72,7 @@ void nfs_nhinit(void) { nfsnode_objcache = objcache_create_simple(M_NFSNODE, sizeof(struct nfsnode)); - nfsnodehashtbl = hashinit(desiredvnodes, M_NFSHASH, &nfsnodehash); + nfsnodehashtbl = hashinit(maxvnodes, M_NFSHASH, &nfsnodehash); lockinit(&nfsnhash_lock, "nfsnht", 0, 0); } diff --git a/sys/vfs/ntfs/ntfs_ihash.c b/sys/vfs/ntfs/ntfs_ihash.c index 5214dceb4f..2e1ff3673d 100644 --- a/sys/vfs/ntfs/ntfs_ihash.c +++ b/sys/vfs/ntfs/ntfs_ihash.c @@ -63,7 +63,7 @@ void ntfs_nthashinit(void) { lockinit(&ntfs_hashlock, "ntfs_nthashlock", 0, 0); - ntfs_nthashtbl = hashinit(desiredvnodes, M_NTFSNTHASH, &ntfs_nthash); + ntfs_nthashtbl = hashinit(maxvnodes, M_NTFSNTHASH, &ntfs_nthash); lwkt_token_init(&ntfs_nthash_slock, "ntfsihash"); } diff --git a/sys/vfs/smbfs/smbfs_vfsops.c b/sys/vfs/smbfs/smbfs_vfsops.c index f4279200f8..f238b85c6a 100644 --- a/sys/vfs/smbfs/smbfs_vfsops.c +++ b/sys/vfs/smbfs/smbfs_vfsops.c @@ -144,7 +144,7 @@ smbfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred) smp = kmalloc(sizeof(*smp), M_SMBFSDATA, M_WAITOK | M_USE_RESERVE | M_ZERO); mp->mnt_data = (qaddr_t)smp; smp->sm_cred = crhold(cred); - smp->sm_hash = hashinit(desiredvnodes, M_SMBFSHASH, &smp->sm_hashlen); + smp->sm_hash = hashinit(maxvnodes, M_SMBFSHASH, &smp->sm_hashlen); if (smp->sm_hash == NULL) goto bad; lockinit(&smp->sm_hashlock, "smbfsh", 0, 0); diff --git a/sys/vfs/ufs/ffs_softdep.c b/sys/vfs/ufs/ffs_softdep.c index 971495bb42..e459c8a69a 100644 --- a/sys/vfs/ufs/ffs_softdep.c +++ b/sys/vfs/ufs/ffs_softdep.c @@ -1044,13 +1044,12 @@ softdep_initialize(void) { LIST_INIT(&mkdirlisthd); LIST_INIT(&softdep_workitem_pending); - max_softdeps = min(desiredvnodes * 8, + max_softdeps = min(maxvnodes * 8, M_INODEDEP->ks_limit / (2 * sizeof(struct inodedep))); - pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, - &pagedep_hash); + pagedep_hashtbl = hashinit(maxvnodes / 5, M_PAGEDEP, &pagedep_hash); lockinit(&lk, "ffs_softdep", 0, LK_CANRECURSE); sema_init(&pagedep_in_progress, "pagedep", 0); - inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash); + inodedep_hashtbl = hashinit(maxvnodes, M_INODEDEP, &inodedep_hash); sema_init(&inodedep_in_progress, "inodedep", 0); newblk_hashtbl = hashinit(64, M_NEWBLK, &newblk_hash); sema_init(&newblk_in_progress, "newblk", 0); diff --git a/sys/vfs/ufs/ufs_ihash.c b/sys/vfs/ufs/ufs_ihash.c index 2558cc0831..5e4e144e6d 100644 --- a/sys/vfs/ufs/ufs_ihash.c +++ b/sys/vfs/ufs/ufs_ihash.c @@ -55,12 +55,13 @@ static MALLOC_DEFINE(M_UFSIHASH, "UFS ihash", "UFS Inode hash tables"); void ufs_ihashinit(struct ufsmount *ump) { - u_long target = desiredvnodes / 4 + 1; + u_long target = maxvnodes / 4 + 1; ump->um_ihash = 16; while (ump->um_ihash < target) ump->um_ihash <<= 1; - ump->um_ihashtbl = kmalloc(sizeof(void *) * ump->um_ihash, M_UFSIHASH, + ump->um_ihashtbl = kmalloc(sizeof(void *) * ump->um_ihash, + M_UFSIHASH, M_WAITOK|M_ZERO); --ump->um_ihash; } diff --git a/sys/vfs/ufs/ufs_quota.c b/sys/vfs/ufs/ufs_quota.c index fa3627ac03..2d0a0bdfdd 100644 --- a/sys/vfs/ufs/ufs_quota.c +++ b/sys/vfs/ufs/ufs_quota.c @@ -736,7 +736,7 @@ static long ufs_numdquot, ufs_desireddquot = DQUOTINC; void ufs_dqinit(void) { - ufs_dqhashtbl = hashinit(desiredvnodes, M_DQUOT, &ufs_dqhash); + ufs_dqhashtbl = hashinit(maxvnodes, M_DQUOT, &ufs_dqhash); TAILQ_INIT(&ufs_dqfreelist); } @@ -778,11 +778,14 @@ ufs_dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type, *dqp = dq; return (0); } + /* * Not in cache, allocate a new one. */ - if (TAILQ_EMPTY(&ufs_dqfreelist) && ufs_numdquot < MAXQUOTAS * desiredvnodes) + if (TAILQ_EMPTY(&ufs_dqfreelist) && + ufs_numdquot < MAXQUOTAS * maxvnodes) { ufs_desireddquot += DQUOTINC; + } if (ufs_numdquot < ufs_desireddquot) { dq = (struct ufs_dquot *) kmalloc(sizeof *dq, M_DQUOT, M_WAITOK | M_ZERO); diff --git a/usr.bin/systat/systat.1 b/usr.bin/systat/systat.1 index e823407abe..2129fd2c1e 100644 --- a/usr.bin/systat/systat.1 +++ b/usr.bin/systat/systat.1 @@ -431,7 +431,7 @@ per second over the refresh interval. At the bottom of this column are lines showing the amount of memory, in kilobytes, used for the buffer cache (`buf'), the number of dirty buffers in the buffer cache (`dirtybuf'), -desired maximum size of vnode cache (`desiredvnodes') +desired maximum size of vnode cache (`maxvnodes') (mostly unused, except to size the name cache), number of vnodes actually allocated (`numvnodes'), and diff --git a/usr.bin/systat/vmstat.c b/usr.bin/systat/vmstat.c index bf3eda51ed..2a795c90dd 100644 --- a/usr.bin/systat/vmstat.c +++ b/usr.bin/systat/vmstat.c @@ -73,7 +73,7 @@ static struct Info { long nchpathcount; long *intrcnt; long bufspace; - int desiredvnodes; + int maxvnodes; int cachedvnodes; int inactivevnodes; int activevnodes; @@ -145,7 +145,7 @@ static struct nlist namelist[] = { #define X_NCHSTATS 1 { .n_name = "_nchstats" }, #define X_DESIREDVNODES 2 - { .n_name = "_desiredvnodes" }, + { .n_name = "_maxvnodes" }, #define X_CACHEDVNODES 3 { .n_name = "_cachedvnodes" }, #define X_INACTIVEVNODES 4 @@ -903,7 +903,7 @@ getinfo(struct Info *ls) if (kinfo_get_sched_cputime(&cp_time)) err(1, "kinfo_get_sched_cputime"); NREAD(X_BUFFERSPACE, &ls->bufspace, sizeof(ls->bufspace)); - NREAD(X_DESIREDVNODES, &ls->desiredvnodes, sizeof(ls->desiredvnodes)); + NREAD(X_DESIREDVNODES, &ls->maxvnodes, sizeof(ls->maxvnodes)); NREAD(X_CACHEDVNODES, &ls->cachedvnodes, sizeof(ls->cachedvnodes)); NREAD(X_INACTIVEVNODES, &ls->inactivevnodes, sizeof(ls->inactivevnodes));