2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/kern/vfs_lock.c,v 1.30 2008/06/30 03:57:41 dillon Exp $
38 * External virtual filesystem routines
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/mount.h>
48 #include <sys/vnode.h>
50 #include <sys/sysctl.h>
52 #include <machine/limits.h>
55 #include <vm/vm_object.h>
58 #include <sys/thread2.h>
59 #include <sys/sysref2.h>
61 static void vnode_terminate(struct vnode *vp);
62 static boolean_t vnode_ctor(void *obj, void *private, int ocflags);
63 static void vnode_dtor(void *obj, void *private);
65 static MALLOC_DEFINE(M_VNODE, "vnodes", "vnode structures");
66 static struct sysref_class vnode_sysref_class = {
69 .proto = SYSREF_PROTO_VNODE,
70 .offset = offsetof(struct vnode, v_sysref),
71 .objsize = sizeof(struct vnode),
73 .flags = SRC_MANAGEDINIT,
77 .terminate = (sysref_terminate_func_t)vnode_terminate
82 * The vnode free list hold inactive vnodes. Aged inactive vnodes
83 * are inserted prior to the mid point, and otherwise inserted
86 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
87 static struct vnode vnode_free_mid;
88 static struct spinlock vfs_spin = SPINLOCK_INITIALIZER(vfs_spin);
91 SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD,
93 static int wantfreevnodes = 25;
94 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
95 &wantfreevnodes, 0, "");
97 static ulong trackvnode;
98 SYSCTL_ULONG(_debug, OID_AUTO, trackvnode, CTLFLAG_RW,
103 * Called from vfsinit()
108 TAILQ_INIT(&vnode_free_list);
109 TAILQ_INSERT_HEAD(&vnode_free_list, &vnode_free_mid, v_freelist);
110 spin_init(&vfs_spin);
111 kmalloc_raise_limit(M_VNODE, 0); /* unlimited */
119 _vsetflags(struct vnode *vp, int flags)
121 atomic_set_int(&vp->v_flag, flags);
126 _vclrflags(struct vnode *vp, int flags)
128 atomic_clear_int(&vp->v_flag, flags);
132 vsetflags(struct vnode *vp, int flags)
134 _vsetflags(vp, flags);
138 vclrflags(struct vnode *vp, int flags)
140 _vclrflags(vp, flags);
144 * Inline helper functions.
146 * WARNING: vbusy() may only be called while the vnode lock or VX lock
147 * is held. The vnode spinlock need not be held.
153 __vbusy_interlocked(struct vnode *vp)
155 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
157 _vclrflags(vp, VFREE);
162 __vbusy(struct vnode *vp)
165 if ((ulong)vp == trackvnode)
166 kprintf("__vbusy %p %08x\n", vp, vp->v_flag);
168 spin_lock_wr(&vfs_spin);
169 __vbusy_interlocked(vp);
170 spin_unlock_wr(&vfs_spin);
174 * Put a vnode on the free list. The caller has cleared VCACHED or owns the
175 * implied sysref related to having removed the vnode from the freelist
176 * (and VCACHED is already clear in that case).
182 __vfree(struct vnode *vp)
185 if ((ulong)vp == trackvnode) {
186 kprintf("__vfree %p %08x\n", vp, vp->v_flag);
190 spin_lock_wr(&vfs_spin);
191 if (vp->v_flag & VRECLAIMED)
192 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
193 else if (vp->v_flag & (VAGE0 | VAGE1))
194 TAILQ_INSERT_BEFORE(&vnode_free_mid, vp, v_freelist);
196 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
198 _vsetflags(vp, VFREE);
199 spin_unlock_wr(&vfs_spin);
203 * Put a vnode on the free list. The caller has cleared VCACHED or owns the
204 * implied sysref related to having removed the vnode from the freelist
205 * (and VCACHED is already clear in that case).
211 __vfreetail(struct vnode *vp)
214 if ((ulong)vp == trackvnode)
215 kprintf("__vfreetail %p %08x\n", vp, vp->v_flag);
217 spin_lock_wr(&vfs_spin);
218 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
220 _vsetflags(vp, VFREE);
221 spin_unlock_wr(&vfs_spin);
225 * Return a C boolean if we should put the vnode on the freelist (VFREE),
226 * or leave it / mark it as VCACHED.
228 * This routine is only valid if the vnode is already either VFREE or
229 * VCACHED, or if it can become VFREE or VCACHED via vnode_terminate().
231 * WARNING! This functions is typically called with v_spinlock held.
235 static __inline boolean_t
236 vshouldfree(struct vnode *vp)
238 return (vp->v_auxrefs == 0 &&
239 (vp->v_object == NULL || vp->v_object->resident_page_count == 0));
243 * Add a ref to an active vnode. This function should never be called
244 * with an inactive vnode (use vget() instead).
249 vref(struct vnode *vp)
251 KKASSERT(vp->v_sysref.refcnt > 0 &&
252 (vp->v_flag & (VFREE|VINACTIVE)) == 0);
253 sysref_get(&vp->v_sysref);
257 * Release a ref on an active or inactive vnode. The sysref termination
258 * function will be called when the active last active reference is released,
259 * and the vnode is returned to the objcache when the last inactive
260 * reference is released.
263 vrele(struct vnode *vp)
265 sysref_put(&vp->v_sysref);
269 * Add an auxiliary data structure reference to the vnode. Auxiliary
270 * references do not change the state of the vnode or prevent them
271 * from being deactivated, reclaimed, or placed on or removed from
274 * An auxiliary reference DOES prevent the vnode from being destroyed,
275 * allowing you to vx_lock() it, test state, etc.
277 * An auxiliary reference DOES NOT move a vnode out of the VFREE state
278 * once it has entered it.
280 * WARNING! vhold() and vhold_interlocked() must not acquire v_spinlock.
281 * The spinlock may or may not already be held by the caller.
282 * vdrop() will clean up the free list state.
287 vhold(struct vnode *vp)
289 KKASSERT(vp->v_sysref.refcnt != 0);
290 atomic_add_int(&vp->v_auxrefs, 1);
294 vhold_interlocked(struct vnode *vp)
296 atomic_add_int(&vp->v_auxrefs, 1);
300 * Remove an auxiliary reference from the vnode.
302 * vdrop needs to check for a VCACHE->VFREE transition to catch cases
303 * where a vnode is held past its reclamation. We use v_spinlock to
304 * interlock VCACHED -> !VCACHED transitions.
309 vdrop(struct vnode *vp)
311 KKASSERT(vp->v_sysref.refcnt != 0 && vp->v_auxrefs > 0);
312 spin_lock_wr(&vp->v_spinlock);
313 atomic_subtract_int(&vp->v_auxrefs, 1);
314 if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
315 _vclrflags(vp, VCACHED);
318 spin_unlock_wr(&vp->v_spinlock);
322 * This function is called when the last active reference on the vnode
323 * is released, typically via vrele(). SYSREF will give the vnode a
324 * negative ref count, indicating that it is undergoing termination or
325 * is being set aside for the cache, and one final sysref_put() is
326 * required to actually return it to the memory subsystem.
328 * However, because vnodes may have auxiliary structural references via
329 * v_auxrefs, we must interlock auxiliary references against termination
330 * via the VX lock mechanism. It is possible for a vnode to be reactivated
331 * while we were blocked on the lock.
336 vnode_terminate(struct vnode *vp)
339 if (sysref_isinactive(&vp->v_sysref)) {
341 * Deactivate the vnode by marking it VFREE or VCACHED.
342 * The vnode can be reactivated from either state until
343 * reclaimed. These states inherit the 'last' sysref on the
346 * NOTE: There may be additional inactive references from
347 * other entities blocking on the VX lock while we hold it,
348 * but this does not prevent us from changing the vnode's
351 * NOTE: The vnode could already be marked inactive. XXX
354 * NOTE: v_mount may be NULL due to assignment to
357 * NOTE: The vnode may be marked inactive with dirty buffers
358 * or dirty pages in its cached VM object still present.
360 * NOTE: VCACHED should not be set on entry. We lose control
361 * of the sysref the instant the vnode is placed on the
362 * free list or when VCACHED is set.
364 * The VX lock is sufficient when transitioning
365 * to +VCACHED but not sufficient for the vshouldfree()
368 if ((vp->v_flag & VINACTIVE) == 0) {
369 _vsetflags(vp, VINACTIVE);
373 spin_lock_wr(&vp->v_spinlock);
374 KKASSERT((vp->v_flag & (VFREE|VCACHED)) == 0);
378 _vsetflags(vp, VCACHED); /* inactive but not yet free*/
379 spin_unlock_wr(&vp->v_spinlock);
383 * Someone reactivated the vnode while were blocked on the
384 * VX lock. Release the VX lock and release the (now active)
385 * last reference which is no longer last.
393 * Physical vnode constructor / destructor. These are only executed on
394 * the backend of the objcache. They are NOT executed on every vnode
395 * allocation or deallocation.
400 vnode_ctor(void *obj, void *private, int ocflags)
402 struct vnode *vp = obj;
404 lwkt_token_init(&vp->v_token);
405 lockinit(&vp->v_lock, "vnode", 0, 0);
406 ccms_dataspace_init(&vp->v_ccms);
407 TAILQ_INIT(&vp->v_namecache);
408 RB_INIT(&vp->v_rbclean_tree);
409 RB_INIT(&vp->v_rbdirty_tree);
410 RB_INIT(&vp->v_rbhash_tree);
418 vnode_dtor(void *obj, void *private)
420 struct vnode *vp = obj;
422 ccms_dataspace_destroy(&vp->v_ccms);
425 /****************************************************************
426 * VX LOCKING FUNCTIONS *
427 ****************************************************************
429 * These functions lock vnodes for reclamation and deactivation related
430 * activities. The caller must already be holding some sort of reference
436 vx_lock(struct vnode *vp)
438 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
442 * The non-blocking version also uses a slightly different mechanic.
443 * This function will explicitly fail not only if it cannot acquire
444 * the lock normally, but also if the caller already holds a lock.
446 * The adjusted mechanic is used to close a loophole where complex
447 * VOP_RECLAIM code can circle around recursively and allocate the
448 * same vnode it is trying to destroy from the freelist.
450 * Any filesystem (aka UFS) which puts LK_CANRECURSE in lk_flags can
451 * cause the incorrect behavior to occur. If not for that lockmgr()
452 * would do the right thing.
455 vx_lock_nonblock(struct vnode *vp)
457 if (lockcountnb(&vp->v_lock))
459 return(lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT | LK_NOSPINWAIT));
463 vx_unlock(struct vnode *vp)
465 lockmgr(&vp->v_lock, LK_RELEASE);
468 /****************************************************************
469 * VNODE ACQUISITION FUNCTIONS *
470 ****************************************************************
472 * These functions must be used when accessing a vnode via an auxiliary
473 * reference such as the namecache or free list, or when you wish to
474 * do a combo ref+lock sequence.
476 * These functions are MANDATORY for any code chain accessing a vnode
477 * whos activation state is not known.
479 * vget() can be called with LK_NOWAIT and will return EBUSY if the
480 * lock cannot be immediately acquired.
482 * vget()/vput() are used when reactivation is desired.
484 * vx_get() and vx_put() are used when reactivation is not desired.
487 vget(struct vnode *vp, int flags)
492 * A lock type must be passed
494 if ((flags & LK_TYPE_MASK) == 0) {
495 panic("vget() called with no lock specified!");
500 * Reference the structure and then acquire the lock. 0->1
501 * transitions and refs during termination are allowed here so
502 * call sysref directly.
504 * NOTE: The requested lock might be a shared lock and does
505 * not protect our access to the refcnt or other fields.
507 sysref_get(&vp->v_sysref);
508 if ((error = vn_lock(vp, flags)) != 0) {
510 * The lock failed, undo and return an error.
512 sysref_put(&vp->v_sysref);
513 } else if (vp->v_flag & VRECLAIMED) {
515 * The node is being reclaimed and cannot be reactivated
516 * any more, undo and return ENOENT.
523 * If the vnode is marked VFREE or VCACHED it needs to be
524 * reactivated, otherwise it had better already be active.
525 * VINACTIVE must also be cleared.
527 * In the VFREE/VCACHED case we have to throw away the
528 * sysref that was earmarking those cases and preventing
529 * the vnode from being destroyed. Our sysref is still held.
531 * The spinlock is our only real protection here.
533 spin_lock_wr(&vp->v_spinlock);
534 if (vp->v_flag & VFREE) {
536 sysref_activate(&vp->v_sysref);
537 spin_unlock_wr(&vp->v_spinlock);
538 sysref_put(&vp->v_sysref);
539 } else if (vp->v_flag & VCACHED) {
540 _vclrflags(vp, VCACHED);
541 sysref_activate(&vp->v_sysref);
542 spin_unlock_wr(&vp->v_spinlock);
543 sysref_put(&vp->v_sysref);
545 if (sysref_isinactive(&vp->v_sysref)) {
546 sysref_activate(&vp->v_sysref);
547 kprintf("Warning vp %p reactivation race\n",
550 spin_unlock_wr(&vp->v_spinlock);
552 _vclrflags(vp, VINACTIVE);
562 vput(struct vnode *vp)
569 * XXX The vx_*() locks should use auxrefs, not the main reference counter.
574 vx_get(struct vnode *vp)
576 sysref_get(&vp->v_sysref);
577 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
584 vx_get_nonblock(struct vnode *vp)
588 sysref_get(&vp->v_sysref);
589 error = lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT);
591 sysref_put(&vp->v_sysref);
596 * Relase a VX lock that also held a ref on the vnode.
598 * vx_put needs to check for a VCACHE->VFREE transition to catch the
599 * case where e.g. vnlru issues a vgone*().
604 vx_put(struct vnode *vp)
606 spin_lock_wr(&vp->v_spinlock);
607 if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
608 _vclrflags(vp, VCACHED);
611 spin_unlock_wr(&vp->v_spinlock);
612 lockmgr(&vp->v_lock, LK_RELEASE);
613 sysref_put(&vp->v_sysref);
617 * Try to reuse a vnode from the free list.
619 * NOTE: The returned vnode is not completely initialized.
621 * WARNING: The freevnodes count can race, NULL can be returned even if
633 for (count = 0; count < freevnodes; count++) {
635 * Try to lock the first vnode on the free list.
638 * We use a bad hack in vx_lock_nonblock() which avoids
639 * the lock order reversal between vfs_spin and v_spinlock.
640 * This is very fragile code and I don't want to use
643 spin_lock_wr(&vfs_spin);
644 vp = TAILQ_FIRST(&vnode_free_list);
645 if (vp == &vnode_free_mid)
646 vp = TAILQ_NEXT(vp, v_freelist);
647 if (vx_lock_nonblock(vp)) {
648 KKASSERT(vp->v_flag & VFREE);
649 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
650 TAILQ_INSERT_TAIL(&vnode_free_list,
652 spin_unlock_wr(&vfs_spin);
657 * We inherit the sysref associated the vnode on the free
658 * list. Because VCACHED is clear the vnode will not
659 * be placed back on the free list. We own the sysref
660 * free and clear and thus control the disposition of
663 __vbusy_interlocked(vp);
664 spin_unlock_wr(&vfs_spin);
666 if ((ulong)vp == trackvnode)
667 kprintf("allocfreevnode %p %08x\n", vp, vp->v_flag);
670 * Do not reclaim/reuse a vnode while auxillary refs exists.
671 * This includes namecache refs due to a related ncp being
672 * locked or having children.
674 * We will make this test several times as auxrefs can
675 * get incremented on us without any spinlocks being held
676 * until we have removed all namecache and inode references
679 * Because VCACHED is already in the correct state (cleared)
680 * we cannot race other vdrop()s occuring at the same time
681 * and can safely place vp on the free list.
683 * The free list association reinherits the sysref.
692 * We inherit the reference that was previously associated
693 * with the vnode being on the free list. VCACHED had better
694 * not be set because the reference and VX lock prevents
695 * the sysref from transitioning to an active state.
697 KKASSERT((vp->v_flag & (VINACTIVE|VCACHED)) == VINACTIVE);
698 KKASSERT(sysref_isinactive(&vp->v_sysref));
701 * Holding the VX lock on an inactive vnode prevents it
702 * from being reactivated or reused. New namecache
703 * associations can only be made using active vnodes.
705 * Another thread may be blocked on our vnode lock while
706 * holding a namecache lock. We can only reuse this vnode
707 * if we can clear all namecache associations without
710 * Because VCACHED is already in the correct state (cleared)
711 * we cannot race other vdrop()s occuring at the same time
712 * and can safely place vp on the free list.
714 if ((vp->v_flag & VRECLAIMED) == 0) {
715 if (cache_inval_vp_nonblock(vp)) {
721 /* vnode is still VX locked */
725 * We can reuse the vnode if no primary or auxiliary
726 * references remain other then ours, else put it
727 * back on the free list and keep looking.
729 * Either the free list inherits the last reference
730 * or we fall through and sysref_activate() the last
733 * Since the vnode is in a VRECLAIMED state, no new
734 * namecache associations could have been made.
736 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
738 !sysref_islastdeactivation(&vp->v_sysref)) {
745 * Return a VX locked vnode suitable for reuse. The caller
746 * inherits the sysref.
754 * Obtain a new vnode from the freelist, allocating more if necessary.
755 * The returned vnode is VX locked & refd.
757 * All new vnodes set the VAGE flags. An open() of the vnode will
758 * decrement the (2-bit) flags. Vnodes which are opened several times
759 * are thus retained in the cache over vnodes which are merely stat()d.
764 allocvnode(int lktimeout, int lkflags)
769 * Try to reuse vnodes if we hit the max. This situation only
770 * occurs in certain large-memory (2G+) situations. We cannot
771 * attempt to directly reclaim vnodes due to nasty recursion
774 while (numvnodes - freevnodes > desiredvnodes)
778 * Try to build up as many vnodes as we can before reallocating
779 * from the free list. A vnode on the free list simply means
780 * that it is inactive with no resident pages. It may or may not
781 * have been reclaimed and could have valuable information associated
782 * with it that we shouldn't throw away unless we really need to.
784 * HAMMER NOTE: Re-establishing a vnode is a fairly expensive
785 * operation for HAMMER but this should benefit UFS as well.
787 if (freevnodes >= wantfreevnodes && numvnodes >= desiredvnodes)
788 vp = allocfreevnode();
792 vp = sysref_alloc(&vnode_sysref_class);
793 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
798 * We are using a managed sysref class, vnode fields are only
799 * zerod on initial allocation from the backing store, not
800 * on reallocation. Thus we have to clear these fields for both
801 * reallocation and reuse.
805 panic("cleaned vnode isn't");
806 if (bio_track_active(&vp->v_track_read) ||
807 bio_track_active(&vp->v_track_write)) {
808 panic("Clean vnode has pending I/O's");
810 if (vp->v_flag & VONWORKLST)
811 panic("Clean vnode still pending on syncer worklist!");
812 if (!RB_EMPTY(&vp->v_rbdirty_tree))
813 panic("Clean vnode still has dirty buffers!");
814 if (!RB_EMPTY(&vp->v_rbclean_tree))
815 panic("Clean vnode still has clean buffers!");
816 if (!RB_EMPTY(&vp->v_rbhash_tree))
817 panic("Clean vnode still on hash tree!");
818 KKASSERT(vp->v_mount == NULL);
820 vp->v_flag = VAGE0 | VAGE1;
827 vp->v_writecount = 0; /* XXX */
830 * lktimeout only applies when LK_TIMELOCK is used, and only
831 * the pageout daemon uses it. The timeout may not be zero
832 * or the pageout daemon can deadlock in low-VM situations.
836 lockreinit(&vp->v_lock, "vnode", lktimeout, lkflags);
837 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
838 /* exclusive lock still held */
841 * Note: sysref needs to be activated to convert -0x40000000 to +1.
842 * The -0x40000000 comes from the last ref on reuse, and from
843 * sysref_init() on allocate.
845 sysref_activate(&vp->v_sysref);
846 vp->v_filesize = NOOFFSET;
851 KKASSERT(vp->v_mount == NULL);
860 freesomevnodes(int n)
867 if ((vp = allocfreevnode()) == NULL)