2 * Copyright (c) 2004,2013-2019 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * External virtual filesystem routines
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/mount.h>
77 #include <sys/vnode.h>
78 #include <sys/spinlock2.h>
79 #include <sys/eventhandler.h>
80 #include <sys/kthread.h>
81 #include <sys/sysctl.h>
83 #include <machine/limits.h>
86 #include <vm/vm_object.h>
88 struct mountscan_info {
89 TAILQ_ENTRY(mountscan_info) msi_entry;
91 struct mount *msi_node;
94 struct vmntvnodescan_info {
95 TAILQ_ENTRY(vmntvnodescan_info) entry;
104 mount_cmp(struct mount *mnt1, struct mount *mnt2)
106 if (mnt1->mnt_stat.f_fsid.val[0] < mnt2->mnt_stat.f_fsid.val[0])
108 if (mnt1->mnt_stat.f_fsid.val[0] > mnt2->mnt_stat.f_fsid.val[0])
110 if (mnt1->mnt_stat.f_fsid.val[1] < mnt2->mnt_stat.f_fsid.val[1])
112 if (mnt1->mnt_stat.f_fsid.val[1] > mnt2->mnt_stat.f_fsid.val[1])
118 mount_fsid_cmp(fsid_t *fsid, struct mount *mnt)
120 if (fsid->val[0] < mnt->mnt_stat.f_fsid.val[0])
122 if (fsid->val[0] > mnt->mnt_stat.f_fsid.val[0])
124 if (fsid->val[1] < mnt->mnt_stat.f_fsid.val[1])
126 if (fsid->val[1] > mnt->mnt_stat.f_fsid.val[1])
131 RB_HEAD(mount_rb_tree, mount);
132 RB_PROTOTYPEX(mount_rb_tree, FSID, mount, mnt_node, mount_cmp, fsid_t *);
133 RB_GENERATE(mount_rb_tree, mount, mnt_node, mount_cmp);
134 RB_GENERATE_XLOOKUP(mount_rb_tree, FSID, mount, mnt_node,
135 mount_fsid_cmp, fsid_t *);
137 static int vnlru_nowhere = 0;
138 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD,
140 "Number of times the vnlru process ran without success");
143 static struct lwkt_token mntid_token;
144 static struct mount dummymount;
146 /* note: mountlist exported to pstat */
147 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
148 struct mount_rb_tree mounttree = RB_INITIALIZER(dev_tree_mounttree);
149 static TAILQ_HEAD(,mountscan_info) mountscan_list;
150 static struct lwkt_token mountlist_token;
152 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list);
155 * Called from vfsinit()
160 lwkt_token_init(&mountlist_token, "mntlist");
161 lwkt_token_init(&mntid_token, "mntid");
162 TAILQ_INIT(&mountscan_list);
163 mount_init(&dummymount, NULL);
164 dummymount.mnt_flag |= MNT_RDONLY;
165 dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE;
169 * Support function called to remove a vnode from the mountlist and
170 * deal with side effects for scans in progress.
172 * Target mnt_token is held on call.
175 vremovevnodemnt(struct vnode *vp)
177 struct vmntvnodescan_info *info;
178 struct mount *mp = vp->v_mount;
180 TAILQ_FOREACH(info, &mp->mnt_vnodescan_list, entry) {
182 info->vp = TAILQ_NEXT(vp, v_nmntvnodes);
184 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
188 * Allocate a new vnode and associate it with a tag, mount point, and
191 * A VX locked and refd vnode is returned. The caller should setup the
192 * remaining fields and vx_put() or, if he wishes to leave a vref,
193 * vx_unlock() the vnode. Or if he wishes to return a normal locked
194 * vnode, call vx_downgrade(vp); to downgrade the VX lock to a normal
198 getnewvnode(enum vtagtype tag, struct mount *mp,
199 struct vnode **vpp, int lktimeout, int lkflags)
203 KKASSERT(mp != NULL);
205 vp = allocvnode(lktimeout, lkflags);
210 * By default the vnode is assigned the mount point's normal
213 vp->v_ops = &mp->mnt_vn_use_ops;
214 vp->v_pbuf_count = nswbuf_kva / NSWBUF_SPLIT;
217 * Placing the vnode on the mount point's queue makes it visible.
218 * VNON prevents it from being messed with, however.
223 * A VX locked & refd vnode is returned.
230 * This function creates vnodes with special operations vectors. The
231 * mount point is optional.
233 * This routine is being phased out but is still used by vfs_conf to
234 * create vnodes for devices prior to the root mount (with mp == NULL).
237 getspecialvnode(enum vtagtype tag, struct mount *mp,
238 struct vop_ops **ops,
239 struct vnode **vpp, int lktimeout, int lkflags)
243 vp = allocvnode(lktimeout, lkflags);
252 * Placing the vnode on the mount point's queue makes it visible.
253 * VNON prevents it from being messed with, however.
258 * A VX locked & refd vnode is returned.
265 * Interlock against an unmount, return 0 on success, non-zero on failure.
267 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
270 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits
271 * are used. A shared locked will be obtained and the filesystem will not
272 * be unmountable until the lock is released.
275 vfs_busy(struct mount *mp, int flags)
279 atomic_add_int(&mp->mnt_refs, 1);
280 lwkt_gettoken(&mp->mnt_token);
281 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
282 if (flags & LK_NOWAIT) {
283 lwkt_reltoken(&mp->mnt_token);
284 atomic_add_int(&mp->mnt_refs, -1);
287 /* XXX not MP safe */
288 mp->mnt_kern_flag |= MNTK_MWAIT;
291 * Since all busy locks are shared except the exclusive
292 * lock granted when unmounting, the only place that a
293 * wakeup needs to be done is at the release of the
294 * exclusive lock at the end of dounmount.
296 * WARNING! mp can potentially go away once we release
299 tsleep((caddr_t)mp, 0, "vfs_busy", 0);
300 lwkt_reltoken(&mp->mnt_token);
301 atomic_add_int(&mp->mnt_refs, -1);
305 if (lockmgr(&mp->mnt_lock, lkflags))
306 panic("vfs_busy: unexpected lock failure");
307 lwkt_reltoken(&mp->mnt_token);
312 * Free a busy filesystem.
314 * Once refs is decremented the mount point can potentially get ripped
315 * out from under us, but we want to clean up our refs before unlocking
316 * so do a hold/drop around the whole mess.
318 * This is not in the critical path (I hope).
321 vfs_unbusy(struct mount *mp)
324 atomic_add_int(&mp->mnt_refs, -1);
325 lockmgr(&mp->mnt_lock, LK_RELEASE);
330 * Lookup a filesystem type, and if found allocate and initialize
331 * a mount structure for it.
333 * Devname is usually updated by mount(8) after booting.
336 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
338 struct vfsconf *vfsp;
341 if (fstypename == NULL)
344 vfsp = vfsconf_find_by_name(fstypename);
347 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
348 mount_init(mp, vfsp->vfc_vfsops);
349 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0);
353 mp->mnt_pbuf_count = nswbuf_kva / NSWBUF_SPLIT;
354 vfsp->vfc_refcount++;
355 mp->mnt_stat.f_type = vfsp->vfc_typenum;
356 mp->mnt_flag |= MNT_RDONLY;
357 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
358 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
359 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
362 * Pre-set MPSAFE flags for VFS_MOUNT() call.
364 if (vfsp->vfc_flags & VFCF_MPSAFE)
365 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
373 * Basic mount structure initialization
376 mount_init(struct mount *mp, struct vfsops *ops)
378 lockinit(&mp->mnt_lock, "vfslock", hz*5, 0);
379 lwkt_token_init(&mp->mnt_token, "permnt");
381 TAILQ_INIT(&mp->mnt_vnodescan_list);
382 TAILQ_INIT(&mp->mnt_nvnodelist);
383 TAILQ_INIT(&mp->mnt_reservedvnlist);
384 TAILQ_INIT(&mp->mnt_jlist);
385 mp->mnt_nvnodelistsize = 0;
387 mp->mnt_hold = 1; /* hold for umount last drop */
388 mp->mnt_iosize_max = MAXPHYS;
390 if (ops == NULL || (ops->vfs_flags & VFSOPSF_NOSYNCERTHR) == 0)
391 vn_syncer_thr_create(mp);
395 mount_hold(struct mount *mp)
397 atomic_add_int(&mp->mnt_hold, 1);
401 mount_drop(struct mount *mp)
403 if (atomic_fetchadd_int(&mp->mnt_hold, -1) == 1) {
404 KKASSERT(mp->mnt_refs == 0);
410 * Lookup a mount point by filesystem identifier.
412 * If not NULL, the returned mp is held and the caller is expected to drop
413 * it via mount_drop().
416 vfs_getvfs(fsid_t *fsid)
420 lwkt_gettoken_shared(&mountlist_token);
421 mp = mount_rb_tree_RB_LOOKUP_FSID(&mounttree, fsid);
424 lwkt_reltoken(&mountlist_token);
429 * Generate a FSID based on the mountpt. The FSID will be adjusted to avoid
430 * collisions when the mount is added to mountlist.
432 * May only be called prior to the mount succeeding.
436 * Get a new unique fsid. Try to make its val[0] unique, since this value
437 * will be used to create fake device numbers for stat(). Also try (but
438 * not so hard) make its val[0] unique mod 2^16, since some emulators only
439 * support 16-bit device numbers. We end up with unique val[0]'s for the
440 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
443 vfs_getnewfsid(struct mount *mp)
451 mtype = mp->mnt_vfc->vfc_typenum;
452 tfsid.val[1] = mtype;
453 error = cache_fullpath(NULL, &mp->mnt_ncmounton, NULL,
454 &retbuf, &freebuf, 0);
456 tfsid.val[0] = makeudev(255, 0);
458 tfsid.val[0] = makeudev(255,
459 iscsi_crc32(retbuf, strlen(retbuf)) &
461 kfree(freebuf, M_TEMP);
463 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
464 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
468 * Set the FSID for a new mount point to the template.
470 * The FSID will be adjusted to avoid collisions when the mount is
471 * added to mountlist.
473 * May only be called prior to the mount succeeding.
476 vfs_setfsid(struct mount *mp, fsid_t *template)
478 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid));
483 lwkt_gettoken(&mntid_token);
485 mptmp = vfs_getvfs(template);
491 lwkt_reltoken(&mntid_token);
493 mp->mnt_stat.f_fsid = *template;
497 * This routine is called when we have too many vnodes. It attempts
498 * to free <count> vnodes and will potentially free vnodes that still
499 * have VM backing store (VM backing store is typically the cause
500 * of a vnode blowout so we want to do this). Therefore, this operation
501 * is not considered cheap.
503 * A number of conditions may prevent a vnode from being reclaimed.
504 * the buffer cache may have references on the vnode, a directory
505 * vnode may still have references due to the namei cache representing
506 * underlying files, or the vnode may be in active use. It is not
507 * desireable to reuse such vnodes. These conditions may cause the
508 * number of vnodes to reach some minimum value regardless of what
509 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
513 * Attempt to recycle vnodes in a context that is always safe to block.
514 * Calling vlrurecycle() from the bowels of file system code has some
515 * interesting deadlock problems.
517 static struct thread *vnlruthread;
522 struct thread *td = curthread;
524 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
528 int ncachedandinactive;
530 kproc_suspend_loop();
533 * Try to free some vnodes if we have too many. Trigger based
534 * on potentially freeable vnodes but calculate the count
535 * based on total vnodes.
537 * (long) -> deal with 64 bit machines, intermediate overflow
539 synchronizevnodecount();
540 ncachedandinactive = countcachedandinactivevnodes();
541 if (numvnodes >= maxvnodes * 9 / 10 &&
542 ncachedandinactive >= maxvnodes * 5 / 10) {
543 int count = numvnodes - maxvnodes * 9 / 10;
545 if (count > (ncachedandinactive) / 100)
546 count = (ncachedandinactive) / 100;
549 freesomevnodes(count);
553 * Do non-critical-path (more robust) cache cleaning,
554 * even if vnode counts are nominal, to try to avoid
555 * having to do it in the critical path.
560 * Nothing to do if most of our vnodes are already on
563 synchronizevnodecount();
564 ncachedandinactive = countcachedandinactivevnodes();
565 if (numvnodes <= maxvnodes * 9 / 10 ||
566 ncachedandinactive <= maxvnodes * 5 / 10) {
567 tsleep(vnlruthread, 0, "vlruwt", hz);
574 * MOUNTLIST FUNCTIONS
578 * mountlist_insert (MP SAFE)
580 * Add a new mount point to the mount list. Filesystem should attempt to
581 * supply a unique fsid but if a duplicate occurs adjust the fsid to ensure
585 mountlist_insert(struct mount *mp, int how)
587 int lim = 0x01000000;
589 lwkt_gettoken(&mountlist_token);
590 if (how == MNTINS_FIRST)
591 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
593 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
594 while (mount_rb_tree_RB_INSERT(&mounttree, mp)) {
598 * minor device mask: 0xFFFF00FF
600 val = mp->mnt_stat.f_fsid.val[0];
601 val = ((val & 0xFFFF0000) >> 8) | (val & 0x000000FF);
603 val = ((val << 8) & 0xFFFF0000) | (val & 0x000000FF);
604 mp->mnt_stat.f_fsid.val[0] = val;
607 mp->mnt_stat.f_fsid.val[1] += 0x0100;
608 kprintf("mountlist_insert: fsid collision, "
609 "too many mounts\n");
612 lwkt_reltoken(&mountlist_token);
616 * mountlist_interlock (MP SAFE)
618 * Execute the specified interlock function with the mountlist token
619 * held. The function will be called in a serialized fashion verses
620 * other functions called through this mechanism.
622 * The function is expected to be very short-lived.
625 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp)
629 lwkt_gettoken(&mountlist_token);
630 error = callback(mp);
631 lwkt_reltoken(&mountlist_token);
636 * mountlist_boot_getfirst (DURING BOOT ONLY)
638 * This function returns the first mount on the mountlist, which is
639 * expected to be the root mount. Since no interlocks are obtained
640 * this function is only safe to use during booting.
644 mountlist_boot_getfirst(void)
646 return(TAILQ_FIRST(&mountlist));
650 * mountlist_remove (MP SAFE)
652 * Remove a node from the mountlist. If this node is the next scan node
653 * for any active mountlist scans, the active mountlist scan will be
654 * adjusted to skip the node, thus allowing removals during mountlist
658 mountlist_remove(struct mount *mp)
660 struct mountscan_info *msi;
662 lwkt_gettoken(&mountlist_token);
663 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) {
664 if (msi->msi_node == mp) {
665 if (msi->msi_how & MNTSCAN_FORWARD)
666 msi->msi_node = TAILQ_NEXT(mp, mnt_list);
668 msi->msi_node = TAILQ_PREV(mp, mntlist,
672 TAILQ_REMOVE(&mountlist, mp, mnt_list);
673 mount_rb_tree_RB_REMOVE(&mounttree, mp);
674 lwkt_reltoken(&mountlist_token);
678 * mountlist_exists (MP SAFE)
680 * Checks if a node exists in the mountlist.
681 * This function is mainly used by VFS quota code to check if a
682 * cached nullfs struct mount pointer is still valid at use time
684 * FIXME: there is no warranty the mp passed to that function
685 * will be the same one used by VFS_ACCOUNT() later
688 mountlist_exists(struct mount *mp)
693 lwkt_gettoken_shared(&mountlist_token);
694 TAILQ_FOREACH(lmp, &mountlist, mnt_list) {
700 lwkt_reltoken(&mountlist_token);
708 * Safely scan the mount points on the mount list. Each mountpoint
709 * is held across the callback. The callback is responsible for
710 * acquiring any further tokens or locks.
712 * Unless otherwise specified each mount point will be busied prior to the
713 * callback and unbusied afterwords. The callback may safely remove any
714 * mount point without interfering with the scan. If the current callback
715 * mount is removed the scanner will not attempt to unbusy it.
717 * If a mount node cannot be busied it is silently skipped.
719 * The callback return value is aggregated and a total is returned. A return
720 * value of < 0 is not aggregated and will terminate the scan.
722 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction
723 * MNTSCAN_REVERSE - the mountlist is scanned in reverse
724 * MNTSCAN_NOBUSY - the scanner will make the callback without busying
726 * MNTSCAN_NOUNLOCK - Do not unlock mountlist_token across callback
728 * NOTE: mountlist_token is not held across the callback.
731 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how)
733 struct mountscan_info info;
737 int dounlock = ((how & MNTSCAN_NOUNLOCK) == 0);
739 lwkt_gettoken(&mountlist_token);
741 info.msi_node = NULL; /* paranoia */
742 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry);
743 lwkt_reltoken(&mountlist_token);
746 lwkt_gettoken_shared(&mountlist_token);
748 if (how & MNTSCAN_FORWARD) {
749 info.msi_node = TAILQ_FIRST(&mountlist);
750 while ((mp = info.msi_node) != NULL) {
752 if (how & MNTSCAN_NOBUSY) {
754 lwkt_reltoken(&mountlist_token);
755 count = callback(mp, data);
757 lwkt_gettoken_shared(&mountlist_token);
758 } else if (vfs_busy(mp, LK_NOWAIT) == 0) {
760 lwkt_reltoken(&mountlist_token);
761 count = callback(mp, data);
763 lwkt_gettoken_shared(&mountlist_token);
764 if (mp == info.msi_node)
773 if (mp == info.msi_node)
774 info.msi_node = TAILQ_NEXT(mp, mnt_list);
776 } else if (how & MNTSCAN_REVERSE) {
777 info.msi_node = TAILQ_LAST(&mountlist, mntlist);
778 while ((mp = info.msi_node) != NULL) {
780 if (how & MNTSCAN_NOBUSY) {
782 lwkt_reltoken(&mountlist_token);
783 count = callback(mp, data);
785 lwkt_gettoken_shared(&mountlist_token);
786 } else if (vfs_busy(mp, LK_NOWAIT) == 0) {
788 lwkt_reltoken(&mountlist_token);
789 count = callback(mp, data);
791 lwkt_gettoken_shared(&mountlist_token);
792 if (mp == info.msi_node)
801 if (mp == info.msi_node)
802 info.msi_node = TAILQ_PREV(mp, mntlist,
806 lwkt_reltoken(&mountlist_token);
808 lwkt_gettoken(&mountlist_token);
809 TAILQ_REMOVE(&mountscan_list, &info, msi_entry);
810 lwkt_reltoken(&mountlist_token);
816 * MOUNT RELATED VNODE FUNCTIONS
819 static struct kproc_desc vnlru_kp = {
824 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp);
827 * Move a vnode from one mount queue to another.
830 insmntque(struct vnode *vp, struct mount *mp)
835 * Delete from old mount point vnode list, if on one.
837 if ((omp = vp->v_mount) != NULL) {
838 lwkt_gettoken(&omp->mnt_token);
839 KKASSERT(omp == vp->v_mount);
840 KASSERT(omp->mnt_nvnodelistsize > 0,
841 ("bad mount point vnode list size"));
843 omp->mnt_nvnodelistsize--;
844 lwkt_reltoken(&omp->mnt_token);
848 * Insert into list of vnodes for the new mount point, if available.
849 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer.
855 lwkt_gettoken(&mp->mnt_token);
857 if (mp->mnt_syncer) {
858 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes);
860 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
862 mp->mnt_nvnodelistsize++;
863 lwkt_reltoken(&mp->mnt_token);
868 * Scan the vnodes under a mount point and issue appropriate callbacks.
870 * The fastfunc() callback is called with just the mountlist token held
871 * (no vnode lock). It may not block and the vnode may be undergoing
872 * modifications while the caller is processing it. The vnode will
873 * not be entirely destroyed, however, due to the fact that the mountlist
874 * token is held. A return value < 0 skips to the next vnode without calling
875 * the slowfunc(), a return value > 0 terminates the loop.
877 * WARNING! The fastfunc() should not indirect through vp->v_object, the vp
878 * data structure is unstable when called from fastfunc().
880 * The slowfunc() callback is called after the vnode has been successfully
881 * locked based on passed flags. The vnode is skipped if it gets rearranged
882 * or destroyed while blocking on the lock. A non-zero return value from
883 * the slow function terminates the loop. The slow function is allowed to
884 * arbitrarily block. The scanning code guarentees consistency of operation
885 * even if the slow function deletes or moves the node, or blocks and some
886 * other thread deletes or moves the node.
892 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
893 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
896 struct vmntvnodescan_info info;
899 int maxcount = mp->mnt_nvnodelistsize * 2;
903 lwkt_gettoken(&mp->mnt_token);
906 * If asked to do one pass stop after iterating available vnodes.
907 * Under heavy loads new vnodes can be added while we are scanning,
908 * so this isn't perfect. Create a slop factor of 2x.
910 if (flags & VMSC_ONEPASS)
911 stopcount = mp->mnt_nvnodelistsize;
913 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
914 TAILQ_INSERT_TAIL(&mp->mnt_vnodescan_list, &info, entry);
916 while ((vp = info.vp) != NULL) {
917 if (--maxcount == 0) {
918 kprintf("Warning: excessive fssync iteration\n");
919 maxcount = mp->mnt_nvnodelistsize * 2;
923 * Skip if visible but not ready, or special (e.g.
926 if (vp->v_type == VNON)
928 KKASSERT(vp->v_mount == mp);
931 * Quick test. A negative return continues the loop without
932 * calling the slow test. 0 continues onto the slow test.
933 * A positive number aborts the loop.
936 if ((r = fastfunc(mp, vp, data)) < 0) {
945 * Get a vxlock on the vnode, retry if it has moved or isn't
946 * in the mountlist where we expect it.
951 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
953 error = vget(vp, LK_EXCLUSIVE);
955 case VMSC_GETVP|VMSC_NOWAIT:
956 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT);
969 * Do not call the slow function if the vnode is
970 * invalid or if it was ripped out from under us
971 * while we (potentially) blocked.
973 if (info.vp == vp && vp->v_type != VNON)
974 r = slowfunc(mp, vp, data);
979 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
981 case VMSC_GETVP|VMSC_NOWAIT:
996 * Yield after some processing. Depending on the number
997 * of vnodes, we might wind up running for a long time.
998 * Because threads are not preemptable, time critical
999 * userland processes might starve. Give them a chance
1002 if (++count == 10000) {
1004 * We really want to yield a bit, so we simply
1007 tsleep(mp, 0, "vnodescn", 1);
1012 * If doing one pass this decrements to zero. If it starts
1013 * at zero it is effectively unlimited for the purposes of
1016 if (--stopcount == 0)
1020 * Iterate. If the vnode was ripped out from under us
1021 * info.vp will already point to the next vnode, otherwise
1022 * we have to obtain the next valid vnode ourselves.
1025 info.vp = TAILQ_NEXT(vp, v_nmntvnodes);
1028 TAILQ_REMOVE(&mp->mnt_vnodescan_list, &info, entry);
1029 lwkt_reltoken(&mp->mnt_token);
1034 * Remove any vnodes in the vnode table belonging to mount point mp.
1036 * If FORCECLOSE is not specified, there should not be any active ones,
1037 * return error if any are found (nb: this is a user error, not a
1038 * system error). If FORCECLOSE is specified, detach any active vnodes
1041 * If WRITECLOSE is set, only flush out regular file vnodes open for
1044 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
1046 * `rootrefs' specifies the base reference count for the root vnode
1047 * of this filesystem. The root vnode is considered busy if its
1048 * v_refcnt exceeds this value. On a successful return, vflush()
1049 * will call vrele() on the root vnode exactly rootrefs times.
1050 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
1053 static int debug_busyprt = 0; /* print out busy vnodes */
1054 SYSCTL_INT(_vfs, OID_AUTO, debug_busyprt, CTLFLAG_RW, &debug_busyprt, 0, "");
1056 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
1058 struct vflush_info {
1065 vflush(struct mount *mp, int rootrefs, int flags)
1067 struct thread *td = curthread; /* XXX */
1068 struct vnode *rootvp = NULL;
1070 struct vflush_info vflush_info;
1073 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
1074 ("vflush: bad args"));
1076 * Get the filesystem root vnode. We can vput() it
1077 * immediately, since with rootrefs > 0, it won't go away.
1079 if ((error = VFS_ROOT(mp, &rootvp)) != 0) {
1080 if ((flags & FORCECLOSE) == 0)
1083 /* continue anyway */
1089 vflush_info.busy = 0;
1090 vflush_info.flags = flags;
1091 vflush_info.td = td;
1092 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
1094 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
1096 * If just the root vnode is busy, and if its refcount
1097 * is equal to `rootrefs', then go ahead and kill it.
1099 KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
1100 KASSERT(VREFCNT(rootvp) >= rootrefs, ("vflush: rootrefs"));
1101 if (vflush_info.busy == 1 && VREFCNT(rootvp) == rootrefs) {
1103 vgone_vxlocked(rootvp);
1105 vflush_info.busy = 0;
1108 if (vflush_info.busy)
1110 for (; rootrefs > 0; rootrefs--)
1116 * The scan callback is made with an VX locked vnode.
1119 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
1121 struct vflush_info *info = data;
1123 int flags = info->flags;
1126 * Generally speaking try to deactivate on 0 refs (catch-all)
1128 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE);
1131 * Skip over a vnodes marked VSYSTEM.
1133 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1138 * Do not force-close VCHR or VBLK vnodes
1140 if (vp->v_type == VCHR || vp->v_type == VBLK)
1141 flags &= ~(WRITECLOSE|FORCECLOSE);
1144 * If WRITECLOSE is set, flush out unlinked but still open
1145 * files (even if open only for reading) and regular file
1146 * vnodes open for writing.
1148 if ((flags & WRITECLOSE) &&
1149 (vp->v_type == VNON ||
1150 (VOP_GETATTR(vp, &vattr) == 0 &&
1151 vattr.va_nlink > 0)) &&
1152 (vp->v_writecount == 0 || vp->v_type != VREG)) {
1157 * If we are the only holder (refcnt of 1) or the vnode is in
1158 * termination (refcnt < 0), we can vgone the vnode.
1160 if (VREFCNT(vp) <= 1) {
1166 * If FORCECLOSE is set, forcibly destroy the vnode and then move
1167 * it to a dummymount structure so vop_*() functions don't deref
1170 if (flags & FORCECLOSE) {
1173 if (vp->v_mount == NULL)
1174 insmntque(vp, &dummymount);
1178 if (vp->v_type == VCHR || vp->v_type == VBLK)
1179 kprintf("vflush: Warning, cannot destroy busy device vnode\n");
1180 if (debug_busyprt) {
1181 const char *filename;
1183 spin_lock(&vp->v_spin);
1184 filename = TAILQ_FIRST(&vp->v_namecache) ?
1185 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?";
1186 spin_unlock(&vp->v_spin);
1187 kprintf("vflush: busy vnode (%p) %s\n", vp, filename);
1194 add_bio_ops(struct bio_ops *ops)
1196 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry);
1200 rem_bio_ops(struct bio_ops *ops)
1202 TAILQ_REMOVE(&bio_ops_list, ops, entry);
1206 * This calls the bio_ops io_sync function either for a mount point
1209 * WARNING: softdeps is weirdly coded and just isn't happy unless
1210 * io_sync is called with a NULL mount from the general syncing code.
1213 bio_ops_sync(struct mount *mp)
1215 struct bio_ops *ops;
1218 if ((ops = mp->mnt_bioops) != NULL)
1221 TAILQ_FOREACH(ops, &bio_ops_list, entry) {
1228 * Lookup a mount point by nch
1231 mount_get_by_nc(struct namecache *ncp)
1233 struct mount *mp = NULL;
1235 lwkt_gettoken_shared(&mountlist_token);
1236 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1237 if (ncp == mp->mnt_ncmountpt.ncp)
1240 lwkt_reltoken(&mountlist_token);