2 * Copyright (c) 2004,2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * External lock/ref-related vnode functions
38 * vs_state transition locking requirements:
40 * INACTIVE -> CACHED|DYING vx_lock(excl) + vfs_spin
41 * DYING -> CACHED vx_lock(excl)
42 * ACTIVE -> INACTIVE (none) + v_spin + vfs_spin
43 * INACTIVE -> ACTIVE vn_lock(any) + v_spin + vfs_spin
44 * CACHED -> ACTIVE vn_lock(any) + v_spin + vfs_spin
46 * NOTE: Switching to/from ACTIVE/INACTIVE requires v_spin and vfs_spin,
48 * Switching into ACTIVE also requires a vref and vnode lock, however
49 * the vnode lock is allowed to be SHARED.
51 * Switching into a CACHED or DYING state requires an exclusive vnode
52 * lock or vx_lock (which is almost the same thing).
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/kernel.h>
58 #include <sys/malloc.h>
59 #include <sys/mount.h>
61 #include <sys/vnode.h>
63 #include <sys/sysctl.h>
65 #include <machine/limits.h>
68 #include <vm/vm_object.h>
71 #include <sys/thread2.h>
75 static void vnode_terminate(struct vnode *vp);
77 static MALLOC_DEFINE(M_VNODE, "vnodes", "vnode structures");
80 * The vnode free list hold inactive vnodes. Aged inactive vnodes
81 * are inserted prior to the mid point, and otherwise inserted
84 TAILQ_HEAD(freelst, vnode);
85 static struct freelst vnode_active_list;
86 static struct freelst vnode_inactive_list;
87 static struct vnode vnode_active_rover;
88 static struct spinlock vfs_spin = SPINLOCK_INITIALIZER(vfs_spin);
91 SYSCTL_INT(_debug, OID_AUTO, activevnodes, CTLFLAG_RD,
92 &activevnodes, 0, "Number of active nodes");
94 SYSCTL_INT(_debug, OID_AUTO, cachedvnodes, CTLFLAG_RD,
95 &cachedvnodes, 0, "Number of total cached nodes");
96 int inactivevnodes = 0;
97 SYSCTL_INT(_debug, OID_AUTO, inactivevnodes, CTLFLAG_RD,
98 &inactivevnodes, 0, "Number of inactive nodes");
99 static int wantfreevnodes = 25;
100 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
101 &wantfreevnodes, 0, "Desired number of free vnodes");
102 static int batchfreevnodes = 5;
103 SYSCTL_INT(_debug, OID_AUTO, batchfreevnodes, CTLFLAG_RW,
104 &batchfreevnodes, 0, "Number of vnodes to free at once");
106 static ulong trackvnode;
107 SYSCTL_ULONG(_debug, OID_AUTO, trackvnode, CTLFLAG_RW,
112 * Called from vfsinit()
117 TAILQ_INIT(&vnode_inactive_list);
118 TAILQ_INIT(&vnode_active_list);
119 TAILQ_INSERT_TAIL(&vnode_active_list, &vnode_active_rover, v_list);
120 spin_init(&vfs_spin);
121 kmalloc_raise_limit(M_VNODE, 0); /* unlimited */
129 _vsetflags(struct vnode *vp, int flags)
131 atomic_set_int(&vp->v_flag, flags);
136 _vclrflags(struct vnode *vp, int flags)
138 atomic_clear_int(&vp->v_flag, flags);
142 vsetflags(struct vnode *vp, int flags)
144 _vsetflags(vp, flags);
148 vclrflags(struct vnode *vp, int flags)
150 _vclrflags(vp, flags);
154 * Place the vnode on the active list.
156 * Caller must hold vp->v_spin
160 _vactivate(struct vnode *vp)
163 if ((ulong)vp == trackvnode)
164 kprintf("_vactivate %p %08x\n", vp, vp->v_flag);
166 spin_lock(&vfs_spin);
168 switch(vp->v_state) {
170 panic("_vactivate: already active");
172 spin_unlock(&vfs_spin);
175 TAILQ_REMOVE(&vnode_inactive_list, vp, v_list);
182 TAILQ_INSERT_TAIL(&vnode_active_list, vp, v_list);
183 vp->v_state = VS_ACTIVE;
186 spin_unlock(&vfs_spin);
190 * Put a vnode on the inactive list.
192 * Caller must hold v_spin
196 _vinactive(struct vnode *vp)
199 if ((ulong)vp == trackvnode) {
200 kprintf("_vinactive %p %08x\n", vp, vp->v_flag);
204 spin_lock(&vfs_spin);
207 * Remove from active list if it is sitting on it
209 switch(vp->v_state) {
211 TAILQ_REMOVE(&vnode_active_list, vp, v_list);
215 panic("_vinactive: already inactive");
217 spin_unlock(&vfs_spin);
225 * Distinguish between basically dead vnodes, vnodes with cached
226 * data, and vnodes without cached data. A rover will shift the
227 * vnodes around as their cache status is lost.
229 if (vp->v_flag & VRECLAIMED) {
230 TAILQ_INSERT_HEAD(&vnode_inactive_list, vp, v_list);
232 TAILQ_INSERT_TAIL(&vnode_inactive_list, vp, v_list);
235 vp->v_state = VS_INACTIVE;
237 spin_unlock(&vfs_spin);
242 _vinactive_tail(struct vnode *vp)
244 spin_lock(&vfs_spin);
247 * Remove from active list if it is sitting on it
249 switch(vp->v_state) {
251 TAILQ_REMOVE(&vnode_active_list, vp, v_list);
255 panic("_vinactive_tail: already inactive");
257 spin_unlock(&vfs_spin);
264 TAILQ_INSERT_TAIL(&vnode_inactive_list, vp, v_list);
266 vp->v_state = VS_INACTIVE;
268 spin_unlock(&vfs_spin);
272 * Add a ref to an active vnode. This function should never be called
273 * with an inactive vnode (use vget() instead), but might be called
277 vref(struct vnode *vp)
279 KASSERT((VREFCNT(vp) > 0 && vp->v_state != VS_INACTIVE),
280 ("vref: bad refcnt %08x %d", vp->v_refcnt, vp->v_state));
281 atomic_add_int(&vp->v_refcnt, 1);
285 * Release a ref on an active or inactive vnode.
287 * Caller has no other requirements.
289 * If VREF_FINALIZE is set this will deactivate the vnode on the 1->0
290 * transition, otherwise we leave the vnode in the active list and
291 * do a lockless transition to 0, which is very important for the
294 * (vrele() is not called when a vnode is being destroyed w/kfree)
297 vrele(struct vnode *vp)
300 int count = vp->v_refcnt;
302 KKASSERT((count & VREF_MASK) > 0);
303 KKASSERT(vp->v_state == VS_ACTIVE ||
304 vp->v_state == VS_INACTIVE);
309 if ((count & VREF_MASK) > 1) {
310 if (atomic_cmpset_int(&vp->v_refcnt, count, count - 1))
316 * 1->0 transition case must handle possible finalization.
317 * When finalizing we transition 1->0x40000000. Note that
318 * cachedvnodes is only adjusted on transitions to ->0.
320 * WARNING! VREF_TERMINATE can be cleared at any point
321 * when the refcnt is non-zero (by vget()) and
322 * the vnode has not been reclaimed. Thus
323 * transitions out of VREF_TERMINATE do not have
324 * to mess with cachedvnodes.
326 if (count & VREF_FINALIZE) {
328 if (atomic_cmpset_int(&vp->v_refcnt,
329 count, VREF_TERMINATE)) {
335 if (atomic_cmpset_int(&vp->v_refcnt, count, 0)) {
336 atomic_add_int(&cachedvnodes, 1);
345 * Add an auxiliary data structure reference to the vnode. Auxiliary
346 * references do not change the state of the vnode or prevent deactivation
347 * or reclamation of the vnode, but will prevent the vnode from being
348 * destroyed (kfree()'d).
350 * WARNING! vhold() must not acquire v_spin. The spinlock may or may not
351 * already be held by the caller. vdrop() will clean up the
355 vhold(struct vnode *vp)
357 atomic_add_int(&vp->v_auxrefs, 1);
361 * Remove an auxiliary reference from the vnode.
364 vdrop(struct vnode *vp)
366 atomic_add_int(&vp->v_auxrefs, -1);
370 * This function is called on the 1->0 transition (which is actually
371 * 1->VREF_TERMINATE) when VREF_FINALIZE is set, forcing deactivation
374 * Additional vrefs are allowed to race but will not result in a reentrant
375 * call to vnode_terminate() due to refcnt being VREF_TERMINATE. This
376 * prevents additional 1->0 transitions.
378 * ONLY A VGET() CAN REACTIVATE THE VNODE.
380 * Caller must hold the VX lock.
382 * NOTE: v_mount may be NULL due to assigmment to dead_vnode_vops
384 * NOTE: The vnode may be marked inactive with dirty buffers
385 * or dirty pages in its cached VM object still present.
387 * NOTE: VS_FREE should not be set on entry (the vnode was expected to
388 * previously be active). We lose control of the vnode the instant
389 * it is placed on the free list.
391 * The VX lock is required when transitioning to VS_CACHED but is
392 * not sufficient for the vshouldfree() interlocked test or when
393 * transitioning away from VS_CACHED. v_spin is also required for
398 vnode_terminate(struct vnode *vp)
400 KKASSERT(vp->v_state == VS_ACTIVE);
402 if ((vp->v_flag & VINACTIVE) == 0) {
403 _vsetflags(vp, VINACTIVE);
406 /* might deactivate page */
408 spin_lock(&vp->v_spin);
410 spin_unlock(&vp->v_spin);
415 /****************************************************************
416 * VX LOCKING FUNCTIONS *
417 ****************************************************************
419 * These functions lock vnodes for reclamation and deactivation related
420 * activities. The caller must already be holding some sort of reference
426 vx_lock(struct vnode *vp)
428 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
432 * The non-blocking version also uses a slightly different mechanic.
433 * This function will explicitly fail not only if it cannot acquire
434 * the lock normally, but also if the caller already holds a lock.
436 * The adjusted mechanic is used to close a loophole where complex
437 * VOP_RECLAIM code can circle around recursively and allocate the
438 * same vnode it is trying to destroy from the freelist.
440 * Any filesystem (aka UFS) which puts LK_CANRECURSE in lk_flags can
441 * cause the incorrect behavior to occur. If not for that lockmgr()
442 * would do the right thing.
446 vx_lock_nonblock(struct vnode *vp)
448 if (lockcountnb(&vp->v_lock))
450 return(lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT));
455 vx_unlock(struct vnode *vp)
457 lockmgr(&vp->v_lock, LK_RELEASE);
460 /****************************************************************
461 * VNODE ACQUISITION FUNCTIONS *
462 ****************************************************************
464 * These functions must be used when accessing a vnode that has no
465 * chance of being destroyed in a SMP race. That means the caller will
466 * usually either hold an auxiliary reference (such as the namecache)
467 * or hold some other lock that ensures that the vnode cannot be destroyed.
469 * These functions are MANDATORY for any code chain accessing a vnode
470 * whos activation state is not known.
472 * vget() can be called with LK_NOWAIT and will return EBUSY if the
473 * lock cannot be immediately acquired.
475 * vget()/vput() are used when reactivation is desired.
477 * vx_get() and vx_put() are used when reactivation is not desired.
480 vget(struct vnode *vp, int flags)
485 * A lock type must be passed
487 if ((flags & LK_TYPE_MASK) == 0) {
488 panic("vget() called with no lock specified!");
493 * Reference the structure and then acquire the lock.
495 * NOTE: The requested lock might be a shared lock and does
496 * not protect our access to the refcnt or other fields.
498 if ((atomic_fetchadd_int(&vp->v_refcnt, 1) & VREF_MASK) == 0)
499 atomic_add_int(&cachedvnodes, -1);
501 if ((error = vn_lock(vp, flags)) != 0) {
503 * The lock failed, undo and return an error. This will not
504 * normally trigger a termination.
507 } else if (vp->v_flag & VRECLAIMED) {
509 * The node is being reclaimed and cannot be reactivated
510 * any more, undo and return ENOENT.
515 } else if (vp->v_state == VS_ACTIVE) {
517 * A VS_ACTIVE vnode coupled with the fact that we have
518 * a vnode lock (even if shared) prevents v_state from
519 * changing. Since the vnode is not in a VRECLAIMED state,
520 * we can safely clear VINACTIVE.
522 * NOTE! Multiple threads may clear VINACTIVE if this is
523 * shared lock. This race is allowed.
525 _vclrflags(vp, VINACTIVE); /* SMP race ok */
526 if (++vp->v_act > VACT_MAX) /* SMP race ok */
527 vp->v_act = VACT_MAX;
531 * If the vnode is not VS_ACTIVE it must be reactivated
532 * in addition to clearing VINACTIVE. An exclusive spin_lock
533 * is needed to manipulate the vnode's list.
535 * Because the lockmgr lock might be shared, we might race
536 * another reactivation, which we handle. In this situation,
537 * however, the refcnt prevents other v_state races.
539 * As with above, clearing VINACTIVE is allowed to race other
540 * clearings of VINACTIVE.
542 * VREF_TERMINATE and VREF_FINALIZE can only be cleared when
543 * the refcnt is non-zero and the vnode has not been
544 * reclaimed. This also means that the transitions do
545 * not affect cachedvnodes.
547 _vclrflags(vp, VINACTIVE);
548 if (++vp->v_act > VACT_MAX) /* SMP race ok */
549 vp->v_act = VACT_MAX;
550 spin_lock(&vp->v_spin);
552 switch(vp->v_state) {
555 atomic_clear_int(&vp->v_refcnt, VREF_TERMINATE |
557 spin_unlock(&vp->v_spin);
561 atomic_clear_int(&vp->v_refcnt, VREF_TERMINATE |
563 spin_unlock(&vp->v_spin);
566 atomic_clear_int(&vp->v_refcnt, VREF_FINALIZE);
567 spin_unlock(&vp->v_spin);
570 spin_unlock(&vp->v_spin);
571 panic("Impossible VS_DYING state");
582 debug_vput(struct vnode *vp, const char *filename, int line)
584 kprintf("vput(%p) %s:%d\n", vp, filename, line);
595 vput(struct vnode *vp)
604 * Acquire the vnode lock unguarded.
606 * XXX The vx_*() locks should use auxrefs, not the main reference counter.
609 vx_get(struct vnode *vp)
611 if ((atomic_fetchadd_int(&vp->v_refcnt, 1) & VREF_MASK) == 0)
612 atomic_add_int(&cachedvnodes, -1);
613 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
617 vx_get_nonblock(struct vnode *vp)
621 if (lockcountnb(&vp->v_lock))
623 error = lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT);
625 if ((atomic_fetchadd_int(&vp->v_refcnt, 1) & VREF_MASK) == 0)
626 atomic_add_int(&cachedvnodes, -1);
632 * Release a VX lock that also held a ref on the vnode. vrele() will handle
633 * any needed state transitions.
635 * However, filesystems use this function to get rid of unwanted new vnodes
636 * so try to get the vnode on the correct queue in that case.
639 vx_put(struct vnode *vp)
641 if (vp->v_type == VNON || vp->v_type == VBAD)
642 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE);
643 lockmgr(&vp->v_lock, LK_RELEASE);
648 * Try to reuse a vnode from the free list. This function is somewhat
649 * advisory in that NULL can be returned as a normal case, even if free
650 * vnodes are present.
652 * The scan is limited because it can result in excessive CPU use during
653 * periods of extreme vnode use.
655 * NOTE: The returned vnode is not completely initialized.
661 cleanfreevnode(int maxcount)
667 * Try to deactivate some vnodes cached on the active list.
669 if (cachedvnodes < inactivevnodes)
672 for (count = 0; count < maxcount * 2; count++) {
673 spin_lock(&vfs_spin);
675 vp = TAILQ_NEXT(&vnode_active_rover, v_list);
676 TAILQ_REMOVE(&vnode_active_list, &vnode_active_rover, v_list);
678 TAILQ_INSERT_HEAD(&vnode_active_list,
679 &vnode_active_rover, v_list);
681 TAILQ_INSERT_AFTER(&vnode_active_list, vp,
682 &vnode_active_rover, v_list);
684 if (vp == NULL || (vp->v_refcnt & VREF_MASK) != 0) {
685 spin_unlock(&vfs_spin);
690 spin_unlock(&vfs_spin);
695 * Try to deactivate the vnode.
697 if ((atomic_fetchadd_int(&vp->v_refcnt, 1) & VREF_MASK) == 0)
698 atomic_add_int(&cachedvnodes, -1);
699 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE);
701 spin_unlock(&vfs_spin);
707 * Loop trying to lock the first vnode on the free list.
710 for (count = 0; count < maxcount; count++) {
711 spin_lock(&vfs_spin);
713 vp = TAILQ_FIRST(&vnode_inactive_list);
715 spin_unlock(&vfs_spin);
720 * non-blocking vx_get will also ref the vnode on success.
722 if (vx_get_nonblock(vp)) {
723 KKASSERT(vp->v_state == VS_INACTIVE);
724 TAILQ_REMOVE(&vnode_inactive_list, vp, v_list);
725 TAILQ_INSERT_TAIL(&vnode_inactive_list, vp, v_list);
726 spin_unlock(&vfs_spin);
731 * Because we are holding vfs_spin the vnode should currently
732 * be inactive and VREF_TERMINATE should still be set.
734 * Once vfs_spin is released the vnode's state should remain
735 * unmodified due to both the lock and ref on it.
737 KKASSERT(vp->v_state == VS_INACTIVE);
738 spin_unlock(&vfs_spin);
740 if ((ulong)vp == trackvnode)
741 kprintf("cleanfreevnode %p %08x\n", vp, vp->v_flag);
745 * Do not reclaim/reuse a vnode while auxillary refs exists.
746 * This includes namecache refs due to a related ncp being
747 * locked or having children, a VM object association, or
750 * Do not reclaim/reuse a vnode if someone else has a real
751 * ref on it. This can occur if a filesystem temporarily
752 * releases the vnode lock during VOP_RECLAIM.
755 (vp->v_refcnt & ~VREF_FINALIZE) != VREF_TERMINATE + 1) {
757 if (vp->v_state == VS_INACTIVE) {
758 spin_lock(&vfs_spin);
759 if (vp->v_state == VS_INACTIVE) {
760 TAILQ_REMOVE(&vnode_inactive_list,
762 TAILQ_INSERT_TAIL(&vnode_inactive_list,
765 spin_unlock(&vfs_spin);
772 * VINACTIVE and VREF_TERMINATE are expected to both be set
773 * for vnodes pulled from the inactive list, and cannot be
774 * changed while we hold the vx lock.
776 * Try to reclaim the vnode.
778 KKASSERT(vp->v_flag & VINACTIVE);
779 KKASSERT(vp->v_refcnt & VREF_TERMINATE);
781 if ((vp->v_flag & VRECLAIMED) == 0) {
782 if (cache_inval_vp_nonblock(vp))
785 /* vnode is still VX locked */
789 * At this point if there are no other refs or auxrefs on
790 * the vnode with the inactive list locked, and we remove
791 * the vnode from the inactive list, it should not be
792 * possible for anyone else to access the vnode any more.
794 * Since the vnode is in a VRECLAIMED state, no new
795 * namecache associations could have been made and the
796 * vnode should have already been removed from its mountlist.
798 * Since we hold a VX lock on the vnode it cannot have been
799 * reactivated (moved out of the inactive list).
801 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
802 spin_lock(&vfs_spin);
804 (vp->v_refcnt & ~VREF_FINALIZE) != VREF_TERMINATE + 1) {
805 spin_unlock(&vfs_spin);
808 KKASSERT(vp->v_state == VS_INACTIVE);
809 TAILQ_REMOVE(&vnode_inactive_list, vp, v_list);
811 vp->v_state = VS_DYING;
812 spin_unlock(&vfs_spin);
815 * Nothing should have been able to access this vp. Only
816 * our ref should remain now.
818 atomic_clear_int(&vp->v_refcnt, VREF_TERMINATE|VREF_FINALIZE);
819 KASSERT(vp->v_refcnt == 1,
820 ("vp %p badrefs %08x", vp, vp->v_refcnt));
823 * Return a VX locked vnode suitable for reuse.
831 * Obtain a new vnode. The returned vnode is VX locked & vrefd.
833 * All new vnodes set the VAGE flags. An open() of the vnode will
834 * decrement the (2-bit) flags. Vnodes which are opened several times
835 * are thus retained in the cache over vnodes which are merely stat()d.
837 * We always allocate the vnode. Attempting to recycle existing vnodes
838 * here can lead to numerous deadlocks, particularly with softupdates.
841 allocvnode(int lktimeout, int lkflags)
846 * Do not flag for recyclement unless there are enough free vnodes
847 * to recycle and the number of vnodes has exceeded our target.
849 * The vnlru tries to get to this before we are forced to do it
850 * synchronously in userexit, using 2/10.
852 if (numvnodes >= desiredvnodes &&
853 cachedvnodes + inactivevnodes > desiredvnodes * 5 / 10 &&
854 cachedvnodes + inactivevnodes > wantfreevnodes) {
855 struct thread *td = curthread;
857 atomic_set_int(&td->td_lwp->lwp_mpflags, LWP_MP_VNLRU);
861 * lktimeout only applies when LK_TIMELOCK is used, and only
862 * the pageout daemon uses it. The timeout may not be zero
863 * or the pageout daemon can deadlock in low-VM situations.
868 vp = kmalloc(sizeof(*vp), M_VNODE, M_ZERO | M_WAITOK);
870 lwkt_token_init(&vp->v_token, "vnode");
871 lockinit(&vp->v_lock, "vnode", lktimeout, lkflags);
872 TAILQ_INIT(&vp->v_namecache);
873 RB_INIT(&vp->v_rbclean_tree);
874 RB_INIT(&vp->v_rbdirty_tree);
875 RB_INIT(&vp->v_rbhash_tree);
876 spin_init(&vp->v_spin);
878 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
879 atomic_add_int(&numvnodes, 1);
881 vp->v_flag = VAGE0 | VAGE1;
883 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
884 /* exclusive lock still held */
886 vp->v_filesize = NOOFFSET;
889 vp->v_state = VS_CACHED;
896 * Called after a process has allocated a vnode via allocvnode()
897 * and we detected that too many vnodes were present.
899 * Try to reuse vnodes if we hit the max. This situation only
900 * occurs in certain large-memory (2G+) situations on 32 bit systems,
901 * or if kern.maxvnodes is set to very low values.
903 * This function is called just prior to a return to userland if the
904 * process at some point had to allocate a new vnode during the last
905 * system call and the vnode count was found to be excessive.
907 * WARNING: Sometimes numvnodes can blow out due to children being
908 * present under directory vnodes in the namecache. For the
909 * moment use an if() instead of a while() and note that if
910 * we were to use a while() we would still have to break out
911 * if freesomevnodes() returned 0.
916 if (numvnodes > desiredvnodes &&
917 cachedvnodes + inactivevnodes > wantfreevnodes) {
918 freesomevnodes(batchfreevnodes);
926 freesomevnodes(int n)
932 if ((vp = cleanfreevnode(n)) == NULL)
938 atomic_add_int(&numvnodes, -1);