2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 * $DragonFly: src/sys/kern/vfs_mount.c,v 1.7 2005/02/09 02:51:04 dillon Exp $
74 * External virtual filesystem routines
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/kernel.h>
81 #include <sys/malloc.h>
82 #include <sys/mount.h>
84 #include <sys/vnode.h>
86 #include <sys/eventhandler.h>
87 #include <sys/kthread.h>
88 #include <sys/sysctl.h>
90 #include <machine/limits.h>
93 #include <sys/thread2.h>
96 #include <vm/vm_object.h>
98 static int vnlru_nowhere = 0;
99 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
101 "Number of times the vnlru process ran without success");
104 static struct lwkt_token mntid_token;
106 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); /* mounted fs */
107 struct lwkt_token mountlist_token;
108 struct lwkt_token mntvnode_token;
112 * Called from vfsinit()
117 lwkt_token_init(&mountlist_token);
118 lwkt_token_init(&mntvnode_token);
119 lwkt_token_init(&mntid_token);
123 * Allocate a new vnode and associate it with a tag, mount point, and
126 * A VX locked and refd vnode is returned. The caller should setup the
127 * remaining fields and vx_put() or, if he wishes to leave a vref,
128 * vx_unlock() the vnode.
131 getnewvnode(enum vtagtype tag, struct mount *mp,
132 struct vnode **vpp, int lktimeout, int lkflags)
136 KKASSERT(mp != NULL);
138 vp = allocvnode(lktimeout, lkflags);
143 * By default the vnode is assigned the mount point's normal
146 vp->v_ops = &mp->mnt_vn_use_ops;
149 * Placing the vnode on the mount point's queue makes it visible.
150 * VNON prevents it from being messed with, however.
153 vfs_object_create(vp, curthread);
156 * A VX locked & refd vnode is returned.
163 * This function creates vnodes with special operations vectors. The
164 * mount point is optional.
166 * This routine is being phased out.
169 getspecialvnode(enum vtagtype tag, struct mount *mp,
170 struct vop_ops **ops_pp,
171 struct vnode **vpp, int lktimeout, int lkflags)
175 vp = allocvnode(lktimeout, lkflags);
181 * Placing the vnode on the mount point's queue makes it visible.
182 * VNON prevents it from being messed with, however.
185 vfs_object_create(vp, curthread);
188 * A VX locked & refd vnode is returned.
195 * Mark a mount point as busy. Used to synchronize access and to delay
196 * unmounting. Interlock is not released on failure.
199 vfs_busy(struct mount *mp, int flags,
200 lwkt_tokref_t interlkp, struct thread *td)
204 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
205 if (flags & LK_NOWAIT)
207 mp->mnt_kern_flag |= MNTK_MWAIT;
209 * Since all busy locks are shared except the exclusive
210 * lock granted when unmounting, the only place that a
211 * wakeup needs to be done is at the release of the
212 * exclusive lock at the end of dounmount.
214 * note: interlkp is a serializer and thus can be safely
215 * held through any sleep
217 tsleep((caddr_t)mp, 0, "vfs_busy", 0);
220 lkflags = LK_SHARED | LK_NOPAUSE;
222 lkflags |= LK_INTERLOCK;
223 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td))
224 panic("vfs_busy: unexpected lock failure");
229 * Free a busy filesystem.
232 vfs_unbusy(struct mount *mp, struct thread *td)
234 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
238 * Lookup a filesystem type, and if found allocate and initialize
239 * a mount structure for it.
241 * Devname is usually updated by mount(8) after booting.
244 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
246 struct thread *td = curthread; /* XXX */
247 struct vfsconf *vfsp;
250 if (fstypename == NULL)
252 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
253 if (!strcmp(vfsp->vfc_name, fstypename))
258 mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK);
259 bzero((char *)mp, (u_long)sizeof(struct mount));
260 lockinit(&mp->mnt_lock, 0, "vfslock", VLKTIMEOUT, LK_NOPAUSE);
261 vfs_busy(mp, LK_NOWAIT, NULL, td);
262 TAILQ_INIT(&mp->mnt_nvnodelist);
263 TAILQ_INIT(&mp->mnt_reservedvnlist);
264 TAILQ_INIT(&mp->mnt_jlist);
265 mp->mnt_nvnodelistsize = 0;
267 mp->mnt_op = vfsp->vfc_vfsops;
268 mp->mnt_flag = MNT_RDONLY;
269 mp->mnt_vnodecovered = NULLVP;
270 vfsp->vfc_refcount++;
271 mp->mnt_iosize_max = DFLTPHYS;
272 mp->mnt_stat.f_type = vfsp->vfc_typenum;
273 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
274 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
275 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
281 * Lookup a mount point by filesystem identifier.
284 vfs_getvfs(fsid_t *fsid)
289 lwkt_gettoken(&ilock, &mountlist_token);
290 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
291 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
292 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
296 lwkt_reltoken(&ilock);
301 * Get a new unique fsid. Try to make its val[0] unique, since this value
302 * will be used to create fake device numbers for stat(). Also try (but
303 * not so hard) make its val[0] unique mod 2^16, since some emulators only
304 * support 16-bit device numbers. We end up with unique val[0]'s for the
305 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
307 * Keep in mind that several mounts may be running in parallel. Starting
308 * the search one past where the previous search terminated is both a
309 * micro-optimization and a defense against returning the same fsid to
313 vfs_getnewfsid(struct mount *mp)
315 static u_int16_t mntid_base;
320 lwkt_gettoken(&ilock, &mntid_token);
321 mtype = mp->mnt_vfc->vfc_typenum;
322 tfsid.val[1] = mtype;
323 mtype = (mtype & 0xFF) << 24;
325 tfsid.val[0] = makeudev(255,
326 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
328 if (vfs_getvfs(&tfsid) == NULL)
331 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
332 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
333 lwkt_reltoken(&ilock);
337 * This routine is called when we have too many vnodes. It attempts
338 * to free <count> vnodes and will potentially free vnodes that still
339 * have VM backing store (VM backing store is typically the cause
340 * of a vnode blowout so we want to do this). Therefore, this operation
341 * is not considered cheap.
343 * A number of conditions may prevent a vnode from being reclaimed.
344 * the buffer cache may have references on the vnode, a directory
345 * vnode may still have references due to the namei cache representing
346 * underlying files, or the vnode may be in active use. It is not
347 * desireable to reuse such vnodes. These conditions may cause the
348 * number of vnodes to reach some minimum value regardless of what
349 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
353 * Return 0 if the vnode is not already on the free list, return 1 if the
354 * vnode, with some additional work could possibly be placed on the free list.
355 * We try to avoid recycling vnodes with lots of cached pages. The cache
356 * trigger level is calculated dynamically.
359 vmightfree(struct vnode *vp, int page_count)
361 if (vp->v_flag & VFREE)
363 if (vp->v_usecount != 0)
365 if (vp->v_object && vp->v_object->resident_page_count >= page_count)
371 * The vnode was found to be possibly freeable and the caller has locked it
372 * (thus the usecount should be 1 now). Determine if the vnode is actually
373 * freeable, doing some cleanups in the process. Returns 1 if the vnode
374 * can be freed, 0 otherwise.
376 * Note that v_holdcnt may be non-zero because (A) this vnode is not a leaf
377 * in the namecache topology and (B) this vnode has buffer cache bufs.
378 * We cannot remove vnodes with non-leaf namecache associations. We do a
379 * tentitive leaf check prior to attempting to flush out any buffers but the
380 * 'real' test when all is said in done is that v_holdcnt must become 0 for
381 * the vnode to be freeable.
383 * We could theoretically just unconditionally flush when v_holdcnt != 0,
384 * but flushing data associated with non-leaf nodes (which are always
385 * directories), just throws it away for no benefit. It is the buffer
386 * cache's responsibility to choose buffers to recycle from the cached
387 * data point of view.
390 visleaf(struct vnode *vp)
392 struct namecache *ncp;
394 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
395 if (!TAILQ_EMPTY(&ncp->nc_list))
402 vtrytomakefreeable(struct vnode *vp, int page_count)
404 if (vp->v_flag & VFREE)
406 if (vp->v_usecount != 1)
408 if (vp->v_object && vp->v_object->resident_page_count >= page_count)
410 if (vp->v_holdcnt && visleaf(vp)) {
411 vinvalbuf(vp, V_SAVE, NULL, 0, 0);
413 printf((vp->v_holdcnt ? "vrecycle: vp %p failed: %s\n" :
414 "vrecycle: vp %p succeeded: %s\n"), vp,
415 (TAILQ_FIRST(&vp->v_namecache) ?
416 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"));
419 return(vp->v_usecount == 1 && vp->v_holdcnt == 0);
423 vlrureclaim(struct mount *mp)
433 * Calculate the trigger point, don't allow user
434 * screwups to blow us up. This prevents us from
435 * recycling vnodes with lots of resident pages. We
436 * aren't trying to free memory, we are trying to
439 usevnodes = desiredvnodes;
442 trigger = vmstats.v_page_count * 2 / usevnodes;
445 lwkt_gettoken(&ilock, &mntvnode_token);
446 count = mp->mnt_nvnodelistsize / 10 + 1;
447 while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
451 * The VP will stick around while we hold mntvnode_token,
452 * at least until we block, so we can safely do an initial
453 * check, and then must check again after we lock the vnode.
455 if (vp->v_type == VNON || /* XXX */
456 vp->v_type == VBAD || /* XXX */
457 !vmightfree(vp, trigger) /* critical path opt */
459 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
460 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist,vp, v_nmntvnodes);
466 * VX get the candidate vnode. If the VX get fails the
467 * vnode might still be on the mountlist. Our loop depends
468 * on us at least cycling the vnode to the end of the
471 if (vx_get_nonblock(vp) != 0) {
472 if (vp->v_mount == mp) {
473 TAILQ_REMOVE(&mp->mnt_nvnodelist,
475 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist,
483 * Since we blocked locking the vp, make sure it is still
484 * a candidate for reclamation. That is, it has not already
485 * been reclaimed and only has our VX reference associated
488 if (vp->v_type == VNON || /* XXX */
489 vp->v_type == VBAD || /* XXX */
490 (vp->v_flag & VRECLAIMED) ||
492 !vtrytomakefreeable(vp, trigger) /* critical path opt */
494 if (vp->v_mount == mp) {
495 TAILQ_REMOVE(&mp->mnt_nvnodelist,
497 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist,
506 * All right, we are good, move the vp to the end of the
507 * mountlist and clean it out. The vget will have returned
508 * an error if the vnode was destroyed (VRECLAIMED set), so we
509 * do not have to check again. The vput() will move the
510 * vnode to the free list if the vgone() was successful.
512 KKASSERT(vp->v_mount == mp);
513 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
514 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist,vp, v_nmntvnodes);
520 lwkt_reltoken(&ilock);
525 * Attempt to recycle vnodes in a context that is always safe to block.
526 * Calling vlrurecycle() from the bowels of file system code has some
527 * interesting deadlock problems.
529 static struct thread *vnlruthread;
530 static int vnlruproc_sig;
533 vnlru_proc_wait(void)
535 if (vnlruproc_sig == 0) {
536 vnlruproc_sig = 1; /* avoid unnecessary wakeups */
539 tsleep(&vnlruproc_sig, 0, "vlruwk", hz);
545 struct mount *mp, *nmp;
549 struct thread *td = curthread;
551 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
556 kproc_suspend_loop();
557 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
559 wakeup(&vnlruproc_sig);
560 tsleep(td, 0, "vlruwt", hz);
565 lwkt_gettoken(&ilock, &mountlist_token);
566 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
567 if (vfs_busy(mp, LK_NOWAIT, &ilock, td)) {
568 nmp = TAILQ_NEXT(mp, mnt_list);
571 done += vlrureclaim(mp);
572 lwkt_gettokref(&ilock);
573 nmp = TAILQ_NEXT(mp, mnt_list);
576 lwkt_reltoken(&ilock);
579 tsleep(td, 0, "vlrup", hz * 3);
580 if (vnlru_nowhere % 10 == 0)
581 printf("vnlru_proc: vnode recycler stopped working!\n");
589 static struct kproc_desc vnlru_kp = {
594 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
597 * Move a vnode from one mount queue to another.
600 insmntque(struct vnode *vp, struct mount *mp)
604 lwkt_gettoken(&ilock, &mntvnode_token);
606 * Delete from old mount point vnode list, if on one.
608 if (vp->v_mount != NULL) {
609 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
610 ("bad mount point vnode list size"));
611 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
612 vp->v_mount->mnt_nvnodelistsize--;
615 * Insert into list of vnodes for the new mount point, if available.
617 if ((vp->v_mount = mp) == NULL) {
618 lwkt_reltoken(&ilock);
621 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
622 mp->mnt_nvnodelistsize++;
623 lwkt_reltoken(&ilock);
628 * Scan the vnodes under a mount point. The first function is called
629 * with just the mountlist token held (no vnode lock). The second
630 * function is called with the vnode VX locked.
636 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
637 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
646 * Scan the vnodes on the mount's vnode list. Use a placemarker
648 pvp = allocvnode_placemarker();
650 lwkt_gettoken(&ilock, &mntvnode_token);
651 TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, pvp, v_nmntvnodes);
653 while ((vp = TAILQ_NEXT(pvp, v_nmntvnodes)) != NULL) {
655 * Move the placemarker and skip other placemarkers we
656 * encounter. The nothing can get in our way so the
657 * mount point on the vp must be valid.
659 TAILQ_REMOVE(&mp->mnt_nvnodelist, pvp, v_nmntvnodes);
660 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, pvp, v_nmntvnodes);
661 if (vp->v_flag & VPLACEMARKER) /* another procs placemarker */
663 if (vp->v_type == VNON) /* visible but not ready */
665 KKASSERT(vp->v_mount == mp);
668 * Quick test. A negative return continues the loop without
669 * calling the slow test. 0 continues onto the slow test.
670 * A positive number aborts the loop.
673 if ((r = fastfunc(mp, vp, data)) < 0)
680 * Get a vxlock on the vnode, retry if it has moved or isn't
681 * in the mountlist where we expect it.
688 error = vget(vp, LK_EXCLUSIVE, curthread);
690 case VMSC_GETVP|VMSC_NOWAIT:
691 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT,
706 if (TAILQ_PREV(pvp, vnodelst, v_nmntvnodes) != vp)
708 if (vp->v_type == VNON)
710 r = slowfunc(mp, vp, data);
714 case VMSC_GETVP|VMSC_NOWAIT:
730 TAILQ_REMOVE(&mp->mnt_nvnodelist, pvp, v_nmntvnodes);
731 freevnode_placemarker(pvp);
732 lwkt_reltoken(&ilock);
737 * Remove any vnodes in the vnode table belonging to mount point mp.
739 * If FORCECLOSE is not specified, there should not be any active ones,
740 * return error if any are found (nb: this is a user error, not a
741 * system error). If FORCECLOSE is specified, detach any active vnodes
744 * If WRITECLOSE is set, only flush out regular file vnodes open for
747 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
749 * `rootrefs' specifies the base reference count for the root vnode
750 * of this filesystem. The root vnode is considered busy if its
751 * v_usecount exceeds this value. On a successful return, vflush()
752 * will call vrele() on the root vnode exactly rootrefs times.
753 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
757 static int busyprt = 0; /* print out busy vnodes */
758 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
761 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
770 vflush(struct mount *mp, int rootrefs, int flags)
772 struct thread *td = curthread; /* XXX */
773 struct vnode *rootvp = NULL;
775 struct vflush_info vflush_info;
778 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
779 ("vflush: bad args"));
781 * Get the filesystem root vnode. We can vput() it
782 * immediately, since with rootrefs > 0, it won't go away.
784 if ((error = VFS_ROOT(mp, &rootvp)) != 0)
789 vflush_info.busy = 0;
790 vflush_info.flags = flags;
792 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
794 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
796 * If just the root vnode is busy, and if its refcount
797 * is equal to `rootrefs', then go ahead and kill it.
799 KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
800 KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs"));
801 if (vflush_info.busy == 1 && rootvp->v_usecount == rootrefs) {
802 if (vx_lock(rootvp) == 0) {
805 vflush_info.busy = 0;
809 if (vflush_info.busy)
811 for (; rootrefs > 0; rootrefs--)
817 * The scan callback is made with an VX locked vnode.
820 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
822 struct vflush_info *info = data;
826 * Skip over a vnodes marked VSYSTEM.
828 if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
833 * If WRITECLOSE is set, flush out unlinked but still open
834 * files (even if open only for reading) and regular file
835 * vnodes open for writing.
837 if ((info->flags & WRITECLOSE) &&
838 (vp->v_type == VNON ||
839 (VOP_GETATTR(vp, &vattr, info->td) == 0 &&
840 vattr.va_nlink > 0)) &&
841 (vp->v_writecount == 0 || vp->v_type != VREG)) {
846 * With v_usecount == 0, all we need to do is clear out the
847 * vnode data structures and we are done.
849 if (vp->v_usecount == 1) {
855 * If FORCECLOSE is set, forcibly close the vnode. For block
856 * or character devices, revert to an anonymous device. For
857 * all other files, just kill them.
859 if (info->flags & FORCECLOSE) {
860 if (vp->v_type != VBLK && vp->v_type != VCHR) {
863 vclean(vp, 0, info->td);
864 vp->v_ops = &spec_vnode_vops;
871 vprint("vflush: busy vnode", vp);