kernel - lwkt_token revamp
[dragonfly.git] / sys / kern / vfs_lock.c
CommitLineData
5fd012e0
MD
1/*
2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
f043c4c7 34 * $DragonFly: src/sys/kern/vfs_lock.c,v 1.30 2008/06/30 03:57:41 dillon Exp $
5fd012e0
MD
35 */
36
37/*
38 * External virtual filesystem routines
39 */
40#include "opt_ddb.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/malloc.h>
46#include <sys/mount.h>
47#include <sys/proc.h>
48#include <sys/vnode.h>
49#include <sys/buf.h>
50#include <sys/sysctl.h>
51
52#include <machine/limits.h>
53
54#include <vm/vm.h>
55#include <vm/vm_object.h>
56
57#include <sys/buf2.h>
58#include <sys/thread2.h>
3c37c940 59#include <sys/sysref2.h>
5fd012e0 60
3c37c940
MD
61static void vnode_terminate(struct vnode *vp);
62static boolean_t vnode_ctor(void *obj, void *private, int ocflags);
63static void vnode_dtor(void *obj, void *private);
5fd012e0
MD
64
65static MALLOC_DEFINE(M_VNODE, "vnodes", "vnode structures");
3c37c940
MD
66static struct sysref_class vnode_sysref_class = {
67 .name = "vnode",
68 .mtype = M_VNODE,
69 .proto = SYSREF_PROTO_VNODE,
70 .offset = offsetof(struct vnode, v_sysref),
71 .objsize = sizeof(struct vnode),
72 .mag_capacity = 256,
73 .flags = SRC_MANAGEDINIT,
74 .ctor = vnode_ctor,
75 .dtor = vnode_dtor,
76 .ops = {
e654922c
MD
77 .terminate = (sysref_terminate_func_t)vnode_terminate,
78 .lock = (sysref_terminate_func_t)vx_lock,
79 .unlock = (sysref_terminate_func_t)vx_unlock
3c37c940
MD
80 }
81};
5fd012e0 82
0e8bd897
MD
83/*
84 * The vnode free list hold inactive vnodes. Aged inactive vnodes
85 * are inserted prior to the mid point, and otherwise inserted
86 * at the tail.
87 */
88static TAILQ_HEAD(freelst, vnode) vnode_free_list;
04bd6171
MD
89static struct vnode vnode_free_mid1;
90static struct vnode vnode_free_mid2;
91static struct vnode vnode_free_rover;
2247fe02 92static struct spinlock vfs_spin = SPINLOCK_INITIALIZER(vfs_spin);
04bd6171 93static enum { ROVER_MID1, ROVER_MID2 } rover_state = ROVER_MID2;
5fd012e0
MD
94
95int freevnodes = 0;
96SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD,
97 &freevnodes, 0, "");
98static int wantfreevnodes = 25;
99SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
100 &wantfreevnodes, 0, "");
0e8bd897
MD
101#ifdef TRACKVNODE
102static ulong trackvnode;
103SYSCTL_ULONG(_debug, OID_AUTO, trackvnode, CTLFLAG_RW,
104 &trackvnode, 0, "");
105#endif
5fd012e0
MD
106
107/*
108 * Called from vfsinit()
109 */
110void
111vfs_lock_init(void)
112{
5fd012e0 113 TAILQ_INIT(&vnode_free_list);
04bd6171
MD
114 TAILQ_INSERT_TAIL(&vnode_free_list, &vnode_free_mid1, v_freelist);
115 TAILQ_INSERT_TAIL(&vnode_free_list, &vnode_free_mid2, v_freelist);
116 TAILQ_INSERT_TAIL(&vnode_free_list, &vnode_free_rover, v_freelist);
2247fe02 117 spin_init(&vfs_spin);
7c457ac8 118 kmalloc_raise_limit(M_VNODE, 0); /* unlimited */
5fd012e0
MD
119}
120
121/*
2247fe02
MD
122 * Misc functions
123 */
124static __inline
125void
126_vsetflags(struct vnode *vp, int flags)
127{
128 atomic_set_int(&vp->v_flag, flags);
129}
130
131static __inline
132void
133_vclrflags(struct vnode *vp, int flags)
134{
135 atomic_clear_int(&vp->v_flag, flags);
136}
137
138void
139vsetflags(struct vnode *vp, int flags)
140{
141 _vsetflags(vp, flags);
142}
143
144void
145vclrflags(struct vnode *vp, int flags)
146{
147 _vclrflags(vp, flags);
148}
149
150/*
ac88f01a 151 * Inline helper functions.
2247fe02 152 *
ac88f01a
MD
153 * WARNING: vbusy() may only be called while the vnode lock or VX lock
154 * is held. The vnode spinlock need not be held.
5b287bba 155 *
2247fe02 156 * MPSAFE
5fd012e0 157 */
ac88f01a
MD
158static __inline
159void
160__vbusy_interlocked(struct vnode *vp)
161{
4f51b8ae 162 KKASSERT(vp->v_flag & VFREE);
ac88f01a
MD
163 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
164 freevnodes--;
165 _vclrflags(vp, VFREE);
166}
167
5fd012e0
MD
168static __inline
169void
170__vbusy(struct vnode *vp)
171{
0e8bd897
MD
172#ifdef TRACKVNODE
173 if ((ulong)vp == trackvnode)
174 kprintf("__vbusy %p %08x\n", vp, vp->v_flag);
175#endif
2247fe02 176 spin_lock_wr(&vfs_spin);
ac88f01a 177 __vbusy_interlocked(vp);
2247fe02 178 spin_unlock_wr(&vfs_spin);
5fd012e0
MD
179}
180
2247fe02 181/*
ac88f01a
MD
182 * Put a vnode on the free list. The caller has cleared VCACHED or owns the
183 * implied sysref related to having removed the vnode from the freelist
184 * (and VCACHED is already clear in that case).
2247fe02
MD
185 *
186 * MPSAFE
187 */
5fd012e0
MD
188static __inline
189void
190__vfree(struct vnode *vp)
191{
0e8bd897
MD
192#ifdef TRACKVNODE
193 if ((ulong)vp == trackvnode) {
194 kprintf("__vfree %p %08x\n", vp, vp->v_flag);
7ce2998e 195 print_backtrace(-1);
0e8bd897
MD
196 }
197#endif
2247fe02 198 spin_lock_wr(&vfs_spin);
4f51b8ae 199 KKASSERT((vp->v_flag & VFREE) == 0);
04bd6171
MD
200
201 /*
202 * Distinguish between basically dead vnodes, vnodes with cached
203 * data, and vnodes without cached data. A rover will shift the
204 * vnodes around as their cache status is lost.
205 */
206 if (vp->v_flag & VRECLAIMED) {
5fd012e0 207 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
04bd6171 208 } else if (vp->v_object && vp->v_object->resident_page_count) {
5fd012e0 209 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
04bd6171
MD
210 } else if (vp->v_object && vp->v_object->swblock_count) {
211 TAILQ_INSERT_BEFORE(&vnode_free_mid2, vp, v_freelist);
212 } else {
213 TAILQ_INSERT_BEFORE(&vnode_free_mid1, vp, v_freelist);
214 }
5fd012e0 215 freevnodes++;
2247fe02
MD
216 _vsetflags(vp, VFREE);
217 spin_unlock_wr(&vfs_spin);
5fd012e0
MD
218}
219
2247fe02 220/*
ac88f01a
MD
221 * Put a vnode on the free list. The caller has cleared VCACHED or owns the
222 * implied sysref related to having removed the vnode from the freelist
223 * (and VCACHED is already clear in that case).
2247fe02
MD
224 *
225 * MPSAFE
226 */
3c37c940
MD
227static __inline
228void
229__vfreetail(struct vnode *vp)
230{
0e8bd897
MD
231#ifdef TRACKVNODE
232 if ((ulong)vp == trackvnode)
233 kprintf("__vfreetail %p %08x\n", vp, vp->v_flag);
234#endif
2247fe02 235 spin_lock_wr(&vfs_spin);
4f51b8ae 236 KKASSERT((vp->v_flag & VFREE) == 0);
3c37c940
MD
237 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
238 freevnodes++;
2247fe02
MD
239 _vsetflags(vp, VFREE);
240 spin_unlock_wr(&vfs_spin);
3c37c940
MD
241}
242
5fd012e0 243/*
3c37c940
MD
244 * Return a C boolean if we should put the vnode on the freelist (VFREE),
245 * or leave it / mark it as VCACHED.
246 *
247 * This routine is only valid if the vnode is already either VFREE or
248 * VCACHED, or if it can become VFREE or VCACHED via vnode_terminate().
2247fe02
MD
249 *
250 * WARNING! This functions is typically called with v_spinlock held.
251 *
252 * MPSAFE
5fd012e0 253 */
3c37c940
MD
254static __inline boolean_t
255vshouldfree(struct vnode *vp)
5fd012e0 256{
3c37c940
MD
257 return (vp->v_auxrefs == 0 &&
258 (vp->v_object == NULL || vp->v_object->resident_page_count == 0));
5fd012e0
MD
259}
260
261/*
3c37c940
MD
262 * Add a ref to an active vnode. This function should never be called
263 * with an inactive vnode (use vget() instead).
2247fe02
MD
264 *
265 * MPSAFE
5b287bba 266 */
5fd012e0
MD
267void
268vref(struct vnode *vp)
269{
3c37c940
MD
270 KKASSERT(vp->v_sysref.refcnt > 0 &&
271 (vp->v_flag & (VFREE|VINACTIVE)) == 0);
272 sysref_get(&vp->v_sysref);
44b1cf3d
MD
273}
274
275/*
3c37c940
MD
276 * Release a ref on an active or inactive vnode. The sysref termination
277 * function will be called when the active last active reference is released,
278 * and the vnode is returned to the objcache when the last inactive
279 * reference is released.
280 */
281void
282vrele(struct vnode *vp)
283{
284 sysref_put(&vp->v_sysref);
285}
286
287/*
288 * Add an auxiliary data structure reference to the vnode. Auxiliary
289 * references do not change the state of the vnode or prevent them
ac88f01a
MD
290 * from being deactivated, reclaimed, or placed on or removed from
291 * the free list.
e3332475 292 *
3c37c940
MD
293 * An auxiliary reference DOES prevent the vnode from being destroyed,
294 * allowing you to vx_lock() it, test state, etc.
44b1cf3d 295 *
3c37c940
MD
296 * An auxiliary reference DOES NOT move a vnode out of the VFREE state
297 * once it has entered it.
b1c20cfa 298 *
ac88f01a
MD
299 * WARNING! vhold() and vhold_interlocked() must not acquire v_spinlock.
300 * The spinlock may or may not already be held by the caller.
301 * vdrop() will clean up the free list state.
302 *
b1c20cfa 303 * MPSAFE
44b1cf3d
MD
304 */
305void
3c37c940 306vhold(struct vnode *vp)
44b1cf3d 307{
3c37c940
MD
308 KKASSERT(vp->v_sysref.refcnt != 0);
309 atomic_add_int(&vp->v_auxrefs, 1);
5fd012e0
MD
310}
311
ac88f01a
MD
312void
313vhold_interlocked(struct vnode *vp)
314{
315 atomic_add_int(&vp->v_auxrefs, 1);
316}
317
e3332475 318/*
3c37c940 319 * Remove an auxiliary reference from the vnode.
e3332475 320 *
3c37c940 321 * vdrop needs to check for a VCACHE->VFREE transition to catch cases
ac88f01a
MD
322 * where a vnode is held past its reclamation. We use v_spinlock to
323 * interlock VCACHED -> !VCACHED transitions.
2247fe02
MD
324 *
325 * MPSAFE
e3332475 326 */
5fd012e0 327void
3c37c940 328vdrop(struct vnode *vp)
5fd012e0 329{
3c37c940 330 KKASSERT(vp->v_sysref.refcnt != 0 && vp->v_auxrefs > 0);
2247fe02 331 spin_lock_wr(&vp->v_spinlock);
3c37c940
MD
332 atomic_subtract_int(&vp->v_auxrefs, 1);
333 if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
2247fe02 334 _vclrflags(vp, VCACHED);
e3332475 335 __vfree(vp);
3c37c940 336 }
2247fe02 337 spin_unlock_wr(&vp->v_spinlock);
5fd012e0
MD
338}
339
340/*
3c37c940 341 * This function is called when the last active reference on the vnode
e654922c
MD
342 * is released, typically via vrele(). SYSREF will VX lock the vnode
343 * and then give the vnode a negative ref count, indicating that it is
344 * undergoing termination or is being set aside for the cache, and one
345 * final sysref_put() is required to actually return it to the memory
346 * subsystem.
5fd012e0 347 *
e654922c
MD
348 * Additional inactive sysrefs may race us but that's ok. Reactivations
349 * cannot race us because the sysref code interlocked with the VX lock
350 * (which is held on call).
2247fe02
MD
351 *
352 * MPSAFE
5fd012e0
MD
353 */
354void
3c37c940 355vnode_terminate(struct vnode *vp)
5fd012e0 356{
e654922c
MD
357 /*
358 * We own the VX lock, it should not be possible for someone else
359 * to have reactivated the vp.
360 */
361 KKASSERT(sysref_isinactive(&vp->v_sysref));
362
363 /*
364 * Deactivate the vnode by marking it VFREE or VCACHED.
365 * The vnode can be reactivated from either state until
366 * reclaimed. These states inherit the 'last' sysref on the
367 * vnode.
368 *
369 * NOTE: There may be additional inactive references from
370 * other entities blocking on the VX lock while we hold it,
371 * but this does not prevent us from changing the vnode's
372 * state.
373 *
374 * NOTE: The vnode could already be marked inactive. XXX
375 * how?
376 *
377 * NOTE: v_mount may be NULL due to assignment to
378 * dead_vnode_vops
379 *
380 * NOTE: The vnode may be marked inactive with dirty buffers
381 * or dirty pages in its cached VM object still present.
382 *
383 * NOTE: VCACHED should not be set on entry. We lose control
384 * of the sysref the instant the vnode is placed on the
385 * free list or when VCACHED is set.
386 *
4f51b8ae
MD
387 * The VX lock is required when transitioning to
388 * +VCACHED but is not sufficient for the vshouldfree()
389 * interlocked test or when transitioning to -VCACHED.
e654922c
MD
390 */
391 if ((vp->v_flag & VINACTIVE) == 0) {
392 _vsetflags(vp, VINACTIVE);
393 if (vp->v_mount)
394 VOP_INACTIVE(vp);
3c37c940 395 }
e654922c
MD
396 spin_lock_wr(&vp->v_spinlock);
397 KKASSERT((vp->v_flag & (VFREE|VCACHED)) == 0);
398 if (vshouldfree(vp))
399 __vfree(vp);
400 else
401 _vsetflags(vp, VCACHED); /* inactive but not yet free*/
402 spin_unlock_wr(&vp->v_spinlock);
403 vx_unlock(vp);
5fd012e0
MD
404}
405
e3332475 406/*
3c37c940
MD
407 * Physical vnode constructor / destructor. These are only executed on
408 * the backend of the objcache. They are NOT executed on every vnode
409 * allocation or deallocation.
2247fe02
MD
410 *
411 * MPSAFE
e3332475 412 */
3c37c940
MD
413boolean_t
414vnode_ctor(void *obj, void *private, int ocflags)
415{
416 struct vnode *vp = obj;
417
3b998fa9 418 lwkt_token_init(&vp->v_token, 1);
3c37c940
MD
419 lockinit(&vp->v_lock, "vnode", 0, 0);
420 ccms_dataspace_init(&vp->v_ccms);
421 TAILQ_INIT(&vp->v_namecache);
8f7279b9
MD
422 RB_INIT(&vp->v_rbclean_tree);
423 RB_INIT(&vp->v_rbdirty_tree);
424 RB_INIT(&vp->v_rbhash_tree);
3c37c940
MD
425 return(TRUE);
426}
427
2247fe02
MD
428/*
429 * MPSAFE
430 */
5fd012e0 431void
3c37c940 432vnode_dtor(void *obj, void *private)
5fd012e0 433{
3c37c940
MD
434 struct vnode *vp = obj;
435
4f51b8ae 436 KKASSERT((vp->v_flag & (VCACHED|VFREE)) == 0);
3c37c940 437 ccms_dataspace_destroy(&vp->v_ccms);
5fd012e0
MD
438}
439
440/****************************************************************
441 * VX LOCKING FUNCTIONS *
442 ****************************************************************
443 *
44b1cf3d 444 * These functions lock vnodes for reclamation and deactivation related
3c37c940
MD
445 * activities. The caller must already be holding some sort of reference
446 * on the vnode.
2247fe02
MD
447 *
448 * MPSAFE
5fd012e0 449 */
e3332475 450void
5fd012e0
MD
451vx_lock(struct vnode *vp)
452{
e3332475 453 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
5fd012e0
MD
454}
455
b8b1dca3
MD
456/*
457 * The non-blocking version also uses a slightly different mechanic.
458 * This function will explicitly fail not only if it cannot acquire
459 * the lock normally, but also if the caller already holds a lock.
460 *
461 * The adjusted mechanic is used to close a loophole where complex
462 * VOP_RECLAIM code can circle around recursively and allocate the
463 * same vnode it is trying to destroy from the freelist.
464 *
465 * Any filesystem (aka UFS) which puts LK_CANRECURSE in lk_flags can
466 * cause the incorrect behavior to occur. If not for that lockmgr()
467 * would do the right thing.
468 */
3c37c940
MD
469static int
470vx_lock_nonblock(struct vnode *vp)
5fd012e0 471{
b8b1dca3
MD
472 if (lockcountnb(&vp->v_lock))
473 return(EBUSY);
298693f7 474 return(lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT | LK_NOSPINWAIT));
5fd012e0
MD
475}
476
e3332475 477void
3c37c940 478vx_unlock(struct vnode *vp)
5fd012e0 479{
e3332475 480 lockmgr(&vp->v_lock, LK_RELEASE);
5fd012e0
MD
481}
482
483/****************************************************************
484 * VNODE ACQUISITION FUNCTIONS *
485 ****************************************************************
486 *
3c37c940
MD
487 * These functions must be used when accessing a vnode via an auxiliary
488 * reference such as the namecache or free list, or when you wish to
489 * do a combo ref+lock sequence.
5fd012e0 490 *
3c37c940
MD
491 * These functions are MANDATORY for any code chain accessing a vnode
492 * whos activation state is not known.
632d9efa 493 *
2247fe02
MD
494 * vget() can be called with LK_NOWAIT and will return EBUSY if the
495 * lock cannot be immediately acquired.
496 *
3c37c940 497 * vget()/vput() are used when reactivation is desired.
5fd012e0 498 *
3c37c940 499 * vx_get() and vx_put() are used when reactivation is not desired.
5fd012e0
MD
500 */
501int
87de5057 502vget(struct vnode *vp, int flags)
5fd012e0
MD
503{
504 int error;
505
3c37c940
MD
506 /*
507 * A lock type must be passed
508 */
509 if ((flags & LK_TYPE_MASK) == 0) {
510 panic("vget() called with no lock specified!");
511 /* NOT REACHED */
512 }
513
514 /*
515 * Reference the structure and then acquire the lock. 0->1
516 * transitions and refs during termination are allowed here so
517 * call sysref directly.
b0911300
MD
518 *
519 * NOTE: The requested lock might be a shared lock and does
520 * not protect our access to the refcnt or other fields.
3c37c940 521 */
3c37c940
MD
522 sysref_get(&vp->v_sysref);
523 if ((error = vn_lock(vp, flags)) != 0) {
524 /*
525 * The lock failed, undo and return an error.
526 */
527 sysref_put(&vp->v_sysref);
528 } else if (vp->v_flag & VRECLAIMED) {
529 /*
530 * The node is being reclaimed and cannot be reactivated
531 * any more, undo and return ENOENT.
532 */
533 vn_unlock(vp);
534 vrele(vp);
535 error = ENOENT;
536 } else {
537 /*
538 * If the vnode is marked VFREE or VCACHED it needs to be
539 * reactivated, otherwise it had better already be active.
540 * VINACTIVE must also be cleared.
541 *
542 * In the VFREE/VCACHED case we have to throw away the
543 * sysref that was earmarking those cases and preventing
544 * the vnode from being destroyed. Our sysref is still held.
b0911300 545 *
e654922c
MD
546 * We are allowed to reactivate the vnode while we hold
547 * the VX lock, assuming it can be reactivated.
3c37c940 548 */
2247fe02 549 spin_lock_wr(&vp->v_spinlock);
3c37c940
MD
550 if (vp->v_flag & VFREE) {
551 __vbusy(vp);
b0911300 552 sysref_activate(&vp->v_sysref);
2247fe02 553 spin_unlock_wr(&vp->v_spinlock);
3c37c940 554 sysref_put(&vp->v_sysref);
3c37c940 555 } else if (vp->v_flag & VCACHED) {
2247fe02 556 _vclrflags(vp, VCACHED);
b0911300 557 sysref_activate(&vp->v_sysref);
2247fe02 558 spin_unlock_wr(&vp->v_spinlock);
3c37c940 559 sysref_put(&vp->v_sysref);
5fd012e0 560 } else {
2247fe02
MD
561 if (sysref_isinactive(&vp->v_sysref)) {
562 sysref_activate(&vp->v_sysref);
563 kprintf("Warning vp %p reactivation race\n",
564 vp);
565 }
b0911300 566 spin_unlock_wr(&vp->v_spinlock);
5fd012e0 567 }
2247fe02 568 _vclrflags(vp, VINACTIVE);
3c37c940 569 error = 0;
5fd012e0 570 }
5fd012e0
MD
571 return(error);
572}
573
2247fe02
MD
574/*
575 * MPSAFE
576 */
5fd012e0
MD
577void
578vput(struct vnode *vp)
579{
a11aaa81 580 vn_unlock(vp);
5fd012e0
MD
581 vrele(vp);
582}
583
3c37c940
MD
584/*
585 * XXX The vx_*() locks should use auxrefs, not the main reference counter.
2247fe02
MD
586 *
587 * MPSAFE
3c37c940
MD
588 */
589void
590vx_get(struct vnode *vp)
591{
592 sysref_get(&vp->v_sysref);
593 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
594}
595
2247fe02
MD
596/*
597 * MPSAFE
598 */
3c37c940
MD
599int
600vx_get_nonblock(struct vnode *vp)
601{
602 int error;
603
604 sysref_get(&vp->v_sysref);
605 error = lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT);
606 if (error)
607 sysref_put(&vp->v_sysref);
608 return(error);
609}
610
611/*
612 * Relase a VX lock that also held a ref on the vnode.
613 *
4f51b8ae 614 * vx_put needs to check for a VCACHED->VFREE transition to catch the
3c37c940 615 * case where e.g. vnlru issues a vgone*().
2247fe02
MD
616 *
617 * MPSAFE
3c37c940
MD
618 */
619void
620vx_put(struct vnode *vp)
621{
2247fe02 622 spin_lock_wr(&vp->v_spinlock);
3c37c940 623 if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
2247fe02 624 _vclrflags(vp, VCACHED);
3c37c940
MD
625 __vfree(vp);
626 }
2247fe02 627 spin_unlock_wr(&vp->v_spinlock);
3c37c940
MD
628 lockmgr(&vp->v_lock, LK_RELEASE);
629 sysref_put(&vp->v_sysref);
630}
631
632/*
04bd6171
MD
633 * The rover looks for vnodes past the midline with no cached data and
634 * moves them to before the midline. If we do not do this the midline
635 * can wind up in a degenerate state.
636 */
637static
638void
639vnode_rover_locked(void)
640{
641 struct vnode *vp;
642
643 /*
644 * Get the vnode after the rover. The rover roves between mid1 and
645 * the end so the only special vnode it can encounter is mid2.
646 */
647 vp = TAILQ_NEXT(&vnode_free_rover, v_freelist);
648 if (vp == &vnode_free_mid2) {
649 vp = TAILQ_NEXT(vp, v_freelist);
650 rover_state = ROVER_MID2;
651 }
652 KKASSERT(vp != &vnode_free_mid1);
653
654 /*
655 * Start over if we finished the scan.
656 */
657 TAILQ_REMOVE(&vnode_free_list, &vnode_free_rover, v_freelist);
658 if (vp == NULL) {
659 TAILQ_INSERT_AFTER(&vnode_free_list, &vnode_free_mid1,
660 &vnode_free_rover, v_freelist);
661 rover_state = ROVER_MID1;
662 return;
663 }
664 TAILQ_INSERT_AFTER(&vnode_free_list, vp, &vnode_free_rover, v_freelist);
665
666 /*
667 * Shift vp if appropriate.
668 */
669 if (vp->v_object && vp->v_object->resident_page_count) {
670 /*
671 * Promote vnode with resident pages to section 3.
672 * (This case shouldn't happen).
673 */
674 if (rover_state == ROVER_MID1) {
675 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
676 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
677 }
678 } else if (vp->v_object && vp->v_object->swblock_count) {
679 /*
680 * Demote vnode with only swap pages to section 2
681 */
682 if (rover_state == ROVER_MID2) {
683 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
684 TAILQ_INSERT_BEFORE(&vnode_free_mid2, vp, v_freelist);
685 }
686 } else {
687 /*
688 * Demote vnode with no cached data to section 1
689 */
690 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
691 TAILQ_INSERT_BEFORE(&vnode_free_mid1, vp, v_freelist);
692 }
693}
694
695/*
ac88f01a
MD
696 * Try to reuse a vnode from the free list.
697 *
698 * NOTE: The returned vnode is not completely initialized.
699 *
700 * WARNING: The freevnodes count can race, NULL can be returned even if
701 * freevnodes != 0.
2247fe02
MD
702 *
703 * MPSAFE
3c37c940
MD
704 */
705static
706struct vnode *
707allocfreevnode(void)
708{
709 struct vnode *vp;
710 int count;
711
712 for (count = 0; count < freevnodes; count++) {
713 /*
3c37c940
MD
714 * Try to lock the first vnode on the free list.
715 * Cycle if we can't.
298693f7
MD
716 *
717 * We use a bad hack in vx_lock_nonblock() which avoids
718 * the lock order reversal between vfs_spin and v_spinlock.
719 * This is very fragile code and I don't want to use
720 * vhold here.
3c37c940 721 */
2247fe02 722 spin_lock_wr(&vfs_spin);
04bd6171
MD
723 vnode_rover_locked();
724 vnode_rover_locked();
3c37c940 725 vp = TAILQ_FIRST(&vnode_free_list);
04bd6171
MD
726 while (vp == &vnode_free_mid1 || vp == &vnode_free_mid2 ||
727 vp == &vnode_free_rover) {
0e8bd897 728 vp = TAILQ_NEXT(vp, v_freelist);
04bd6171
MD
729 }
730 if (vp == NULL)
731 break;
3c37c940
MD
732 if (vx_lock_nonblock(vp)) {
733 KKASSERT(vp->v_flag & VFREE);
734 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
735 TAILQ_INSERT_TAIL(&vnode_free_list,
736 vp, v_freelist);
2247fe02 737 spin_unlock_wr(&vfs_spin);
3c37c940
MD
738 continue;
739 }
ac88f01a
MD
740
741 /*
742 * We inherit the sysref associated the vnode on the free
743 * list. Because VCACHED is clear the vnode will not
744 * be placed back on the free list. We own the sysref
745 * free and clear and thus control the disposition of
746 * the vnode.
747 */
748 __vbusy_interlocked(vp);
2247fe02 749 spin_unlock_wr(&vfs_spin);
0e8bd897
MD
750#ifdef TRACKVNODE
751 if ((ulong)vp == trackvnode)
752 kprintf("allocfreevnode %p %08x\n", vp, vp->v_flag);
753#endif
18c4feea 754 /*
ac88f01a
MD
755 * Do not reclaim/reuse a vnode while auxillary refs exists.
756 * This includes namecache refs due to a related ncp being
757 * locked or having children.
758 *
759 * We will make this test several times as auxrefs can
760 * get incremented on us without any spinlocks being held
761 * until we have removed all namecache and inode references
762 * to the vnode.
763 *
764 * Because VCACHED is already in the correct state (cleared)
765 * we cannot race other vdrop()s occuring at the same time
766 * and can safely place vp on the free list.
767 *
768 * The free list association reinherits the sysref.
18c4feea
MD
769 */
770 if (vp->v_auxrefs) {
771 __vfreetail(vp);
772 vx_unlock(vp);
773 continue;
774 }
3c37c940
MD
775
776 /*
ac88f01a
MD
777 * We inherit the reference that was previously associated
778 * with the vnode being on the free list. VCACHED had better
779 * not be set because the reference and VX lock prevents
780 * the sysref from transitioning to an active state.
3c37c940 781 */
ac88f01a 782 KKASSERT((vp->v_flag & (VINACTIVE|VCACHED)) == VINACTIVE);
3c37c940 783 KKASSERT(sysref_isinactive(&vp->v_sysref));
3c37c940
MD
784
785 /*
799f78b8
MD
786 * Holding the VX lock on an inactive vnode prevents it
787 * from being reactivated or reused. New namecache
788 * associations can only be made using active vnodes.
789 *
790 * Another thread may be blocked on our vnode lock while
791 * holding a namecache lock. We can only reuse this vnode
792 * if we can clear all namecache associations without
793 * blocking.
ac88f01a
MD
794 *
795 * Because VCACHED is already in the correct state (cleared)
796 * we cannot race other vdrop()s occuring at the same time
797 * and can safely place vp on the free list.
3c37c940 798 */
799f78b8
MD
799 if ((vp->v_flag & VRECLAIMED) == 0) {
800 if (cache_inval_vp_nonblock(vp)) {
801 __vfreetail(vp);
802 vx_unlock(vp);
803 continue;
804 }
3c37c940 805 vgone_vxlocked(vp);
799f78b8
MD
806 /* vnode is still VX locked */
807 }
3c37c940
MD
808
809 /*
810 * We can reuse the vnode if no primary or auxiliary
811 * references remain other then ours, else put it
812 * back on the free list and keep looking.
813 *
814 * Either the free list inherits the last reference
815 * or we fall through and sysref_activate() the last
816 * reference.
799f78b8
MD
817 *
818 * Since the vnode is in a VRECLAIMED state, no new
819 * namecache associations could have been made.
3c37c940 820 */
799f78b8 821 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
3c37c940
MD
822 if (vp->v_auxrefs ||
823 !sysref_islastdeactivation(&vp->v_sysref)) {
824 __vfreetail(vp);
799f78b8 825 vx_unlock(vp);
3c37c940
MD
826 continue;
827 }
5c6c3cac
MD
828
829 /*
799f78b8
MD
830 * Return a VX locked vnode suitable for reuse. The caller
831 * inherits the sysref.
5c6c3cac 832 */
3c37c940
MD
833 return(vp);
834 }
835 return(NULL);
836}
837
838/*
5fd012e0
MD
839 * Obtain a new vnode from the freelist, allocating more if necessary.
840 * The returned vnode is VX locked & refd.
0e8bd897
MD
841 *
842 * All new vnodes set the VAGE flags. An open() of the vnode will
843 * decrement the (2-bit) flags. Vnodes which are opened several times
844 * are thus retained in the cache over vnodes which are merely stat()d.
2247fe02
MD
845 *
846 * MPSAFE
5fd012e0
MD
847 */
848struct vnode *
849allocvnode(int lktimeout, int lkflags)
850{
5fd012e0
MD
851 struct vnode *vp;
852
853 /*
854 * Try to reuse vnodes if we hit the max. This situation only
855 * occurs in certain large-memory (2G+) situations. We cannot
856 * attempt to directly reclaim vnodes due to nasty recursion
857 * problems.
858 */
859 while (numvnodes - freevnodes > desiredvnodes)
860 vnlru_proc_wait();
861
5fd012e0 862 /*
e92ca23a
MD
863 * Try to build up as many vnodes as we can before reallocating
864 * from the free list. A vnode on the free list simply means
865 * that it is inactive with no resident pages. It may or may not
866 * have been reclaimed and could have valuable information associated
867 * with it that we shouldn't throw away unless we really need to.
868 *
869 * HAMMER NOTE: Re-establishing a vnode is a fairly expensive
870 * operation for HAMMER but this should benefit UFS as well.
5fd012e0 871 */
e92ca23a 872 if (freevnodes >= wantfreevnodes && numvnodes >= desiredvnodes)
3c37c940
MD
873 vp = allocfreevnode();
874 else
875 vp = NULL;
876 if (vp == NULL) {
877 vp = sysref_alloc(&vnode_sysref_class);
4f51b8ae 878 KKASSERT((vp->v_flag & (VCACHED|VFREE)) == 0);
3c37c940
MD
879 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
880 numvnodes++;
5fd012e0
MD
881 }
882
883 /*
3c37c940
MD
884 * We are using a managed sysref class, vnode fields are only
885 * zerod on initial allocation from the backing store, not
886 * on reallocation. Thus we have to clear these fields for both
887 * reallocation and reuse.
5fd012e0 888 */
5fd012e0 889#ifdef INVARIANTS
3c37c940
MD
890 if (vp->v_data)
891 panic("cleaned vnode isn't");
a9a20f98
MD
892 if (bio_track_active(&vp->v_track_read) ||
893 bio_track_active(&vp->v_track_write)) {
3c37c940 894 panic("Clean vnode has pending I/O's");
a9a20f98 895 }
8f7279b9
MD
896 if (vp->v_flag & VONWORKLST)
897 panic("Clean vnode still pending on syncer worklist!");
898 if (!RB_EMPTY(&vp->v_rbdirty_tree))
899 panic("Clean vnode still has dirty buffers!");
900 if (!RB_EMPTY(&vp->v_rbclean_tree))
901 panic("Clean vnode still has clean buffers!");
902 if (!RB_EMPTY(&vp->v_rbhash_tree))
903 panic("Clean vnode still on hash tree!");
3c37c940 904 KKASSERT(vp->v_mount == NULL);
5fd012e0 905#endif
0e8bd897 906 vp->v_flag = VAGE0 | VAGE1;
3c37c940
MD
907 vp->v_lastw = 0;
908 vp->v_lasta = 0;
909 vp->v_cstart = 0;
910 vp->v_clen = 0;
911 vp->v_socket = 0;
912 vp->v_opencount = 0;
913 vp->v_writecount = 0; /* XXX */
f043c4c7
MD
914
915 /*
916 * lktimeout only applies when LK_TIMELOCK is used, and only
917 * the pageout daemon uses it. The timeout may not be zero
918 * or the pageout daemon can deadlock in low-VM situations.
919 */
920 if (lktimeout == 0)
921 lktimeout = hz / 10;
3c37c940 922 lockreinit(&vp->v_lock, "vnode", lktimeout, lkflags);
2779ba31 923 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
3c37c940 924 /* exclusive lock still held */
5fd012e0 925
3c37c940
MD
926 /*
927 * Note: sysref needs to be activated to convert -0x40000000 to +1.
928 * The -0x40000000 comes from the last ref on reuse, and from
929 * sysref_init() on allocate.
930 */
931 sysref_activate(&vp->v_sysref);
57f7b636 932 vp->v_filesize = NOOFFSET;
5fd012e0
MD
933 vp->v_type = VNON;
934 vp->v_tag = 0;
935 vp->v_ops = NULL;
936 vp->v_data = NULL;
937 KKASSERT(vp->v_mount == NULL);
3c37c940 938
5fd012e0
MD
939 return (vp);
940}
941
2247fe02
MD
942/*
943 * MPSAFE
944 */
3c37c940
MD
945int
946freesomevnodes(int n)
947{
948 struct vnode *vp;
949 int count = 0;
950
951 while (n) {
952 --n;
953 if ((vp = allocfreevnode()) == NULL)
954 break;
955 vx_put(vp);
956 --numvnodes;
957 }
958 return(count);
959}
960