2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * External virtual filesystem routines
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/mount.h>
45 #include <sys/vnode.h>
47 #include <sys/sysctl.h>
49 #include <machine/limits.h>
52 #include <vm/vm_object.h>
55 #include <sys/thread2.h>
56 #include <sys/sysref2.h>
58 static void vnode_terminate(struct vnode *vp);
59 static boolean_t vnode_ctor(void *obj, void *private, int ocflags);
60 static void vnode_dtor(void *obj, void *private);
62 static MALLOC_DEFINE(M_VNODE, "vnodes", "vnode structures");
63 static struct sysref_class vnode_sysref_class = {
66 .proto = SYSREF_PROTO_VNODE,
67 .offset = offsetof(struct vnode, v_sysref),
68 .objsize = sizeof(struct vnode),
70 .flags = SRC_MANAGEDINIT,
74 .terminate = (sysref_terminate_func_t)vnode_terminate,
75 .lock = (sysref_terminate_func_t)vx_lock,
76 .unlock = (sysref_terminate_func_t)vx_unlock
81 * The vnode free list hold inactive vnodes. Aged inactive vnodes
82 * are inserted prior to the mid point, and otherwise inserted
85 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
86 static struct vnode vnode_free_mid1;
87 static struct vnode vnode_free_mid2;
88 static struct vnode vnode_free_rover;
89 static struct spinlock vfs_spin = SPINLOCK_INITIALIZER(vfs_spin);
90 static enum { ROVER_MID1, ROVER_MID2 } rover_state = ROVER_MID2;
93 SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD,
94 &freevnodes, 0, "Number of free nodes");
95 static int wantfreevnodes = 25;
96 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
97 &wantfreevnodes, 0, "Desired number of free vnodes");
98 static int batchfreevnodes = 5;
99 SYSCTL_INT(_debug, OID_AUTO, batchfreevnodes, CTLFLAG_RW,
100 &batchfreevnodes, 0, "Number of vnodes to free at once");
102 static ulong trackvnode;
103 SYSCTL_ULONG(_debug, OID_AUTO, trackvnode, CTLFLAG_RW,
108 * Called from vfsinit()
113 TAILQ_INIT(&vnode_free_list);
114 TAILQ_INSERT_TAIL(&vnode_free_list, &vnode_free_mid1, v_freelist);
115 TAILQ_INSERT_TAIL(&vnode_free_list, &vnode_free_mid2, v_freelist);
116 TAILQ_INSERT_TAIL(&vnode_free_list, &vnode_free_rover, v_freelist);
117 spin_init(&vfs_spin);
118 kmalloc_raise_limit(M_VNODE, 0); /* unlimited */
126 _vsetflags(struct vnode *vp, int flags)
128 atomic_set_int(&vp->v_flag, flags);
133 _vclrflags(struct vnode *vp, int flags)
135 atomic_clear_int(&vp->v_flag, flags);
139 vsetflags(struct vnode *vp, int flags)
141 _vsetflags(vp, flags);
145 vclrflags(struct vnode *vp, int flags)
147 _vclrflags(vp, flags);
151 * Inline helper functions.
153 * WARNING: vbusy() may only be called while the vnode lock or VX lock
154 * is held. The vnode spinlock need not be held.
160 __vbusy_interlocked(struct vnode *vp)
162 KKASSERT(vp->v_flag & VFREE);
163 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
165 _vclrflags(vp, VFREE);
170 __vbusy(struct vnode *vp)
173 if ((ulong)vp == trackvnode)
174 kprintf("__vbusy %p %08x\n", vp, vp->v_flag);
176 spin_lock(&vfs_spin);
177 __vbusy_interlocked(vp);
178 spin_unlock(&vfs_spin);
182 * Put a vnode on the free list. The caller has cleared VCACHED or owns the
183 * implied sysref related to having removed the vnode from the freelist
184 * (and VCACHED is already clear in that case).
190 __vfree(struct vnode *vp)
193 if ((ulong)vp == trackvnode) {
194 kprintf("__vfree %p %08x\n", vp, vp->v_flag);
198 spin_lock(&vfs_spin);
199 KKASSERT((vp->v_flag & VFREE) == 0);
202 * Distinguish between basically dead vnodes, vnodes with cached
203 * data, and vnodes without cached data. A rover will shift the
204 * vnodes around as their cache status is lost.
206 if (vp->v_flag & VRECLAIMED) {
207 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
208 } else if (vp->v_object && vp->v_object->resident_page_count) {
209 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
210 } else if (vp->v_object && vp->v_object->swblock_count) {
211 TAILQ_INSERT_BEFORE(&vnode_free_mid2, vp, v_freelist);
213 TAILQ_INSERT_BEFORE(&vnode_free_mid1, vp, v_freelist);
216 _vsetflags(vp, VFREE);
217 spin_unlock(&vfs_spin);
221 * Put a vnode on the free list. The caller has cleared VCACHED or owns the
222 * implied sysref related to having removed the vnode from the freelist
223 * (and VCACHED is already clear in that case).
229 __vfreetail(struct vnode *vp)
232 if ((ulong)vp == trackvnode)
233 kprintf("__vfreetail %p %08x\n", vp, vp->v_flag);
235 spin_lock(&vfs_spin);
236 KKASSERT((vp->v_flag & VFREE) == 0);
237 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
239 _vsetflags(vp, VFREE);
240 spin_unlock(&vfs_spin);
244 * Return a C boolean if we should put the vnode on the freelist (VFREE),
245 * or leave it / mark it as VCACHED.
247 * This routine is only valid if the vnode is already either VFREE or
248 * VCACHED, or if it can become VFREE or VCACHED via vnode_terminate().
250 * WARNING! We used to indicate FALSE if the vnode had an object with
251 * resident pages but we no longer do that because it makes
252 * managing kern.maxvnodes difficult. Instead we rely on vfree()
253 * to place the vnode properly on the list.
255 * WARNING! This functions is typically called with v_spin held.
259 static __inline boolean_t
260 vshouldfree(struct vnode *vp)
262 return (vp->v_auxrefs == 0);
264 && (vp->v_object == NULL || vp->v_object->resident_page_count == 0));
269 * Add a ref to an active vnode. This function should never be called
270 * with an inactive vnode (use vget() instead).
275 vref(struct vnode *vp)
277 KKASSERT(vp->v_sysref.refcnt > 0 &&
278 (vp->v_flag & (VFREE|VINACTIVE)) == 0);
279 sysref_get(&vp->v_sysref);
283 * Release a ref on an active or inactive vnode. The sysref termination
284 * function will be called when the active last active reference is released,
285 * and the vnode is returned to the objcache when the last inactive
286 * reference is released.
289 vrele(struct vnode *vp)
291 sysref_put(&vp->v_sysref);
295 * Add an auxiliary data structure reference to the vnode. Auxiliary
296 * references do not change the state of the vnode or prevent them
297 * from being deactivated, reclaimed, or placed on or removed from
300 * An auxiliary reference DOES prevent the vnode from being destroyed,
301 * allowing you to vx_lock() it, test state, etc.
303 * An auxiliary reference DOES NOT move a vnode out of the VFREE state
304 * once it has entered it.
306 * WARNING! vhold() and vhold_interlocked() must not acquire v_spin.
307 * The spinlock may or may not already be held by the caller.
308 * vdrop() will clean up the free list state.
313 vhold(struct vnode *vp)
315 KKASSERT(vp->v_sysref.refcnt != 0);
316 atomic_add_int(&vp->v_auxrefs, 1);
320 vhold_interlocked(struct vnode *vp)
322 atomic_add_int(&vp->v_auxrefs, 1);
326 * Remove an auxiliary reference from the vnode.
328 * vdrop needs to check for a VCACHE->VFREE transition to catch cases
329 * where a vnode is held past its reclamation. We use v_spin to
330 * interlock VCACHED -> !VCACHED transitions.
335 vdrop(struct vnode *vp)
337 KKASSERT(vp->v_sysref.refcnt != 0 && vp->v_auxrefs > 0);
338 spin_lock(&vp->v_spin);
339 atomic_subtract_int(&vp->v_auxrefs, 1);
340 if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
341 _vclrflags(vp, VCACHED);
344 spin_unlock(&vp->v_spin);
348 * This function is called when the last active reference on the vnode
349 * is released, typically via vrele(). SYSREF will VX lock the vnode
350 * and then give the vnode a negative ref count, indicating that it is
351 * undergoing termination or is being set aside for the cache, and one
352 * final sysref_put() is required to actually return it to the memory
355 * Additional inactive sysrefs may race us but that's ok. Reactivations
356 * cannot race us because the sysref code interlocked with the VX lock
357 * (which is held on call).
362 vnode_terminate(struct vnode *vp)
365 * We own the VX lock, it should not be possible for someone else
366 * to have reactivated the vp.
368 KKASSERT(sysref_isinactive(&vp->v_sysref));
371 * Deactivate the vnode by marking it VFREE or VCACHED.
372 * The vnode can be reactivated from either state until
373 * reclaimed. These states inherit the 'last' sysref on the
376 * NOTE: There may be additional inactive references from
377 * other entities blocking on the VX lock while we hold it,
378 * but this does not prevent us from changing the vnode's
381 * NOTE: The vnode could already be marked inactive. XXX
384 * NOTE: v_mount may be NULL due to assignment to
387 * NOTE: The vnode may be marked inactive with dirty buffers
388 * or dirty pages in its cached VM object still present.
390 * NOTE: VCACHED should not be set on entry. We lose control
391 * of the sysref the instant the vnode is placed on the
392 * free list or when VCACHED is set.
394 * The VX lock is required when transitioning to
395 * +VCACHED but is not sufficient for the vshouldfree()
396 * interlocked test or when transitioning to -VCACHED.
398 if ((vp->v_flag & VINACTIVE) == 0) {
399 _vsetflags(vp, VINACTIVE);
403 spin_lock(&vp->v_spin);
404 KKASSERT((vp->v_flag & (VFREE|VCACHED)) == 0);
408 _vsetflags(vp, VCACHED); /* inactive but not yet free*/
409 spin_unlock(&vp->v_spin);
414 * Physical vnode constructor / destructor. These are only executed on
415 * the backend of the objcache. They are NOT executed on every vnode
416 * allocation or deallocation.
421 vnode_ctor(void *obj, void *private, int ocflags)
423 struct vnode *vp = obj;
425 lwkt_token_init(&vp->v_token, "vnode");
426 lockinit(&vp->v_lock, "vnode", 0, 0);
427 TAILQ_INIT(&vp->v_namecache);
428 RB_INIT(&vp->v_rbclean_tree);
429 RB_INIT(&vp->v_rbdirty_tree);
430 RB_INIT(&vp->v_rbhash_tree);
431 spin_init(&vp->v_spin);
439 vnode_dtor(void *obj, void *private)
441 struct vnode *vp __debugvar = obj;
443 KKASSERT((vp->v_flag & (VCACHED|VFREE)) == 0);
446 /****************************************************************
447 * VX LOCKING FUNCTIONS *
448 ****************************************************************
450 * These functions lock vnodes for reclamation and deactivation related
451 * activities. The caller must already be holding some sort of reference
457 vx_lock(struct vnode *vp)
459 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
463 * The non-blocking version also uses a slightly different mechanic.
464 * This function will explicitly fail not only if it cannot acquire
465 * the lock normally, but also if the caller already holds a lock.
467 * The adjusted mechanic is used to close a loophole where complex
468 * VOP_RECLAIM code can circle around recursively and allocate the
469 * same vnode it is trying to destroy from the freelist.
471 * Any filesystem (aka UFS) which puts LK_CANRECURSE in lk_flags can
472 * cause the incorrect behavior to occur. If not for that lockmgr()
473 * would do the right thing.
476 vx_lock_nonblock(struct vnode *vp)
478 if (lockcountnb(&vp->v_lock))
480 return(lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT));
484 vx_unlock(struct vnode *vp)
486 lockmgr(&vp->v_lock, LK_RELEASE);
489 /****************************************************************
490 * VNODE ACQUISITION FUNCTIONS *
491 ****************************************************************
493 * These functions must be used when accessing a vnode via an auxiliary
494 * reference such as the namecache or free list, or when you wish to
495 * do a combo ref+lock sequence.
497 * These functions are MANDATORY for any code chain accessing a vnode
498 * whos activation state is not known.
500 * vget() can be called with LK_NOWAIT and will return EBUSY if the
501 * lock cannot be immediately acquired.
503 * vget()/vput() are used when reactivation is desired.
505 * vx_get() and vx_put() are used when reactivation is not desired.
508 vget(struct vnode *vp, int flags)
513 * A lock type must be passed
515 if ((flags & LK_TYPE_MASK) == 0) {
516 panic("vget() called with no lock specified!");
521 * Reference the structure and then acquire the lock. 0->1
522 * transitions and refs during termination are allowed here so
523 * call sysref directly.
525 * NOTE: The requested lock might be a shared lock and does
526 * not protect our access to the refcnt or other fields.
528 sysref_get(&vp->v_sysref);
529 if ((error = vn_lock(vp, flags)) != 0) {
531 * The lock failed, undo and return an error.
533 sysref_put(&vp->v_sysref);
534 } else if (vp->v_flag & VRECLAIMED) {
536 * The node is being reclaimed and cannot be reactivated
537 * any more, undo and return ENOENT.
544 * If the vnode is marked VFREE or VCACHED it needs to be
545 * reactivated, otherwise it had better already be active.
546 * VINACTIVE must also be cleared.
548 * In the VFREE/VCACHED case we have to throw away the
549 * sysref that was earmarking those cases and preventing
550 * the vnode from being destroyed. Our sysref is still held.
552 * We are allowed to reactivate the vnode while we hold
553 * the VX lock, assuming it can be reactivated.
555 spin_lock(&vp->v_spin);
556 if (vp->v_flag & VFREE) {
558 sysref_activate(&vp->v_sysref);
559 spin_unlock(&vp->v_spin);
560 sysref_put(&vp->v_sysref);
561 } else if (vp->v_flag & VCACHED) {
562 _vclrflags(vp, VCACHED);
563 sysref_activate(&vp->v_sysref);
564 spin_unlock(&vp->v_spin);
565 sysref_put(&vp->v_sysref);
567 if (sysref_isinactive(&vp->v_sysref)) {
568 sysref_activate(&vp->v_sysref);
569 kprintf("Warning vp %p reactivation race\n",
572 spin_unlock(&vp->v_spin);
574 _vclrflags(vp, VINACTIVE);
583 debug_vput(struct vnode *vp, const char *filename, int line)
585 kprintf("vput(%p) %s:%d\n", vp, filename, line);
596 vput(struct vnode *vp)
605 * XXX The vx_*() locks should use auxrefs, not the main reference counter.
610 vx_get(struct vnode *vp)
612 sysref_get(&vp->v_sysref);
613 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
620 vx_get_nonblock(struct vnode *vp)
624 sysref_get(&vp->v_sysref);
625 error = lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT);
627 sysref_put(&vp->v_sysref);
632 * Relase a VX lock that also held a ref on the vnode.
634 * vx_put needs to check for a VCACHED->VFREE transition to catch the
635 * case where e.g. vnlru issues a vgone*().
640 vx_put(struct vnode *vp)
642 spin_lock(&vp->v_spin);
643 if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
644 _vclrflags(vp, VCACHED);
647 spin_unlock(&vp->v_spin);
648 lockmgr(&vp->v_lock, LK_RELEASE);
649 sysref_put(&vp->v_sysref);
653 * The rover looks for vnodes past the midline with no cached data and
654 * moves them to before the midline. If we do not do this the midline
655 * can wind up in a degenerate state.
659 vnode_free_rover_scan_locked(void)
664 * Get the vnode after the rover. The rover roves between mid1 and
665 * the end so the only special vnode it can encounter is mid2.
667 vp = TAILQ_NEXT(&vnode_free_rover, v_freelist);
668 if (vp == &vnode_free_mid2) {
669 vp = TAILQ_NEXT(vp, v_freelist);
670 rover_state = ROVER_MID2;
672 KKASSERT(vp != &vnode_free_mid1);
675 * Start over if we finished the scan.
677 TAILQ_REMOVE(&vnode_free_list, &vnode_free_rover, v_freelist);
679 TAILQ_INSERT_AFTER(&vnode_free_list, &vnode_free_mid1,
680 &vnode_free_rover, v_freelist);
681 rover_state = ROVER_MID1;
684 TAILQ_INSERT_AFTER(&vnode_free_list, vp, &vnode_free_rover, v_freelist);
687 * Shift vp if appropriate.
689 if (vp->v_object && vp->v_object->resident_page_count) {
691 * Promote vnode with resident pages to section 3.
693 if (rover_state == ROVER_MID1) {
694 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
695 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
697 } else if (vp->v_object && vp->v_object->swblock_count) {
699 * Demote vnode with only swap pages to section 2
701 if (rover_state == ROVER_MID2) {
702 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
703 TAILQ_INSERT_BEFORE(&vnode_free_mid2, vp, v_freelist);
707 * Demote vnode with no cached data to section 1
709 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
710 TAILQ_INSERT_BEFORE(&vnode_free_mid1, vp, v_freelist);
715 vnode_free_rover_scan(int count)
717 spin_lock(&vfs_spin);
720 vnode_free_rover_scan_locked();
722 spin_unlock(&vfs_spin);
726 * Try to reuse a vnode from the free list. This function is somewhat
727 * advisory in that NULL can be returned as a normal case, even if free
728 * vnodes are present.
730 * The scan is limited because it can result in excessive CPU use during
731 * periods of extreme vnode use.
733 * NOTE: The returned vnode is not completely initialized.
739 allocfreevnode(int maxcount)
744 for (count = 0; count < maxcount; count++) {
746 * Try to lock the first vnode on the free list.
749 * We use a bad hack in vx_lock_nonblock() which avoids
750 * the lock order reversal between vfs_spin and v_spin.
751 * This is very fragile code and I don't want to use
754 spin_lock(&vfs_spin);
755 vnode_free_rover_scan_locked();
756 vnode_free_rover_scan_locked();
757 vp = TAILQ_FIRST(&vnode_free_list);
758 while (vp == &vnode_free_mid1 || vp == &vnode_free_mid2 ||
759 vp == &vnode_free_rover) {
760 vp = TAILQ_NEXT(vp, v_freelist);
763 spin_unlock(&vfs_spin);
766 if (vx_lock_nonblock(vp)) {
767 KKASSERT(vp->v_flag & VFREE);
768 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
769 TAILQ_INSERT_TAIL(&vnode_free_list,
771 spin_unlock(&vfs_spin);
776 * We inherit the sysref associated the vnode on the free
777 * list. Because VCACHED is clear the vnode will not
778 * be placed back on the free list. We own the sysref
779 * free and clear and thus control the disposition of
782 __vbusy_interlocked(vp);
783 spin_unlock(&vfs_spin);
785 if ((ulong)vp == trackvnode)
786 kprintf("allocfreevnode %p %08x\n", vp, vp->v_flag);
789 * Do not reclaim/reuse a vnode while auxillary refs exists.
790 * This includes namecache refs due to a related ncp being
791 * locked or having children, a VM object association, or
794 * We will make this test several times as auxrefs can
795 * get incremented on us without any spinlocks being held
796 * until we have removed all namecache and inode references
799 * Because VCACHED is already in the correct state (cleared)
800 * we cannot race other vdrop()s occuring at the same time
801 * and can safely place vp on the free list.
803 * The free list association reinherits the sysref.
812 * We inherit the reference that was previously associated
813 * with the vnode being on the free list. VCACHED had better
814 * not be set because the reference and VX lock prevents
815 * the sysref from transitioning to an active state.
817 KKASSERT((vp->v_flag & (VINACTIVE|VCACHED)) == VINACTIVE);
818 KKASSERT(sysref_isinactive(&vp->v_sysref));
821 * Holding the VX lock on an inactive vnode prevents it
822 * from being reactivated or reused. New namecache
823 * associations can only be made using active vnodes.
825 * Another thread may be blocked on our vnode lock while
826 * holding a namecache lock. We can only reuse this vnode
827 * if we can clear all namecache associations without
830 * Because VCACHED is already in the correct state (cleared)
831 * we cannot race other vdrop()s occuring at the same time
832 * and can safely place vp on the free list.
834 if ((vp->v_flag & VRECLAIMED) == 0) {
835 if (cache_inval_vp_nonblock(vp)) {
841 /* vnode is still VX locked */
845 * We can reuse the vnode if no primary or auxiliary
846 * references remain other then ours, else put it
847 * back on the free list and keep looking.
849 * Either the free list inherits the last reference
850 * or we fall through and sysref_activate() the last
853 * Since the vnode is in a VRECLAIMED state, no new
854 * namecache associations could have been made.
856 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
858 !sysref_islastdeactivation(&vp->v_sysref)) {
865 * Return a VX locked vnode suitable for reuse. The caller
866 * inherits the sysref.
874 * Obtain a new vnode. The returned vnode is VX locked & vrefd.
876 * All new vnodes set the VAGE flags. An open() of the vnode will
877 * decrement the (2-bit) flags. Vnodes which are opened several times
878 * are thus retained in the cache over vnodes which are merely stat()d.
880 * We always allocate the vnode. Attempting to recycle existing vnodes
881 * here can lead to numerous deadlocks, particularly with softupdates.
884 allocvnode(int lktimeout, int lkflags)
889 * Do not flag for recyclement unless there are enough free vnodes
890 * to recycle and the number of vnodes has exceeded our target.
892 if (freevnodes >= wantfreevnodes && numvnodes >= desiredvnodes) {
893 struct thread *td = curthread;
895 atomic_set_int(&td->td_lwp->lwp_mpflags, LWP_MP_VNLRU);
897 vp = sysref_alloc(&vnode_sysref_class);
898 KKASSERT((vp->v_flag & (VCACHED|VFREE)) == 0);
899 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
900 atomic_add_int(&numvnodes, 1);
903 * We are using a managed sysref class, vnode fields are only
904 * zerod on initial allocation from the backing store, not
905 * on reallocation. Thus we have to clear these fields for both
906 * reallocation and reuse.
910 panic("cleaned vnode isn't");
911 if (bio_track_active(&vp->v_track_read) ||
912 bio_track_active(&vp->v_track_write)) {
913 panic("Clean vnode has pending I/O's");
915 if (vp->v_flag & VONWORKLST)
916 panic("Clean vnode still pending on syncer worklist!");
917 if (!RB_EMPTY(&vp->v_rbdirty_tree))
918 panic("Clean vnode still has dirty buffers!");
919 if (!RB_EMPTY(&vp->v_rbclean_tree))
920 panic("Clean vnode still has clean buffers!");
921 if (!RB_EMPTY(&vp->v_rbhash_tree))
922 panic("Clean vnode still on hash tree!");
923 KKASSERT(vp->v_mount == NULL);
925 vp->v_flag = VAGE0 | VAGE1;
932 vp->v_writecount = 0; /* XXX */
935 * lktimeout only applies when LK_TIMELOCK is used, and only
936 * the pageout daemon uses it. The timeout may not be zero
937 * or the pageout daemon can deadlock in low-VM situations.
941 lockreinit(&vp->v_lock, "vnode", lktimeout, lkflags);
942 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
943 /* exclusive lock still held */
946 * Note: sysref needs to be activated to convert -0x40000000 to +1.
947 * The -0x40000000 comes from the last ref on reuse, and from
948 * sysref_init() on allocate.
950 sysref_activate(&vp->v_sysref);
951 vp->v_filesize = NOOFFSET;
957 KKASSERT(vp->v_mount == NULL);
963 * Called after a process has allocated a vnode via allocvnode()
964 * and we detected that too many vnodes were present.
966 * Try to reuse vnodes if we hit the max. This situation only
967 * occurs in certain large-memory (2G+) situations on 32 bit systems,
968 * or if kern.maxvnodes is set to very low values.
970 * This function is called just prior to a return to userland if the
971 * process at some point had to allocate a new vnode during the last
972 * system call and the vnode count was found to be excessive.
974 * WARNING: Sometimes numvnodes can blow out due to children being
975 * present under directory vnodes in the namecache. For the
976 * moment use an if() instead of a while() and note that if
977 * we were to use a while() we would still have to break out
978 * if freesomevnodes() returned 0.
983 if (numvnodes > desiredvnodes && freevnodes > wantfreevnodes) {
984 freesomevnodes(batchfreevnodes);
992 freesomevnodes(int n)
998 if ((vp = allocfreevnode(n * 2)) == NULL)
1003 atomic_add_int(&numvnodes, -1);