kernel - MPSAFE stabilization
[dragonfly.git] / sys / kern / vfs_lock.c
CommitLineData
5fd012e0
MD
1/*
2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
f043c4c7 34 * $DragonFly: src/sys/kern/vfs_lock.c,v 1.30 2008/06/30 03:57:41 dillon Exp $
5fd012e0
MD
35 */
36
37/*
38 * External virtual filesystem routines
39 */
40#include "opt_ddb.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/malloc.h>
46#include <sys/mount.h>
47#include <sys/proc.h>
48#include <sys/vnode.h>
49#include <sys/buf.h>
50#include <sys/sysctl.h>
51
52#include <machine/limits.h>
53
54#include <vm/vm.h>
55#include <vm/vm_object.h>
56
57#include <sys/buf2.h>
58#include <sys/thread2.h>
3c37c940 59#include <sys/sysref2.h>
5fd012e0 60
3c37c940
MD
61static void vnode_terminate(struct vnode *vp);
62static boolean_t vnode_ctor(void *obj, void *private, int ocflags);
63static void vnode_dtor(void *obj, void *private);
5fd012e0
MD
64
65static MALLOC_DEFINE(M_VNODE, "vnodes", "vnode structures");
3c37c940
MD
66static struct sysref_class vnode_sysref_class = {
67 .name = "vnode",
68 .mtype = M_VNODE,
69 .proto = SYSREF_PROTO_VNODE,
70 .offset = offsetof(struct vnode, v_sysref),
71 .objsize = sizeof(struct vnode),
72 .mag_capacity = 256,
73 .flags = SRC_MANAGEDINIT,
74 .ctor = vnode_ctor,
75 .dtor = vnode_dtor,
76 .ops = {
77 .terminate = (sysref_terminate_func_t)vnode_terminate
78 }
79};
5fd012e0 80
0e8bd897
MD
81/*
82 * The vnode free list hold inactive vnodes. Aged inactive vnodes
83 * are inserted prior to the mid point, and otherwise inserted
84 * at the tail.
85 */
86static TAILQ_HEAD(freelst, vnode) vnode_free_list;
87static struct vnode vnode_free_mid;
2247fe02 88static struct spinlock vfs_spin = SPINLOCK_INITIALIZER(vfs_spin);
5fd012e0
MD
89
90int freevnodes = 0;
91SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD,
92 &freevnodes, 0, "");
93static int wantfreevnodes = 25;
94SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
95 &wantfreevnodes, 0, "");
0e8bd897
MD
96#ifdef TRACKVNODE
97static ulong trackvnode;
98SYSCTL_ULONG(_debug, OID_AUTO, trackvnode, CTLFLAG_RW,
99 &trackvnode, 0, "");
100#endif
5fd012e0
MD
101
102/*
103 * Called from vfsinit()
104 */
105void
106vfs_lock_init(void)
107{
5fd012e0 108 TAILQ_INIT(&vnode_free_list);
0e8bd897 109 TAILQ_INSERT_HEAD(&vnode_free_list, &vnode_free_mid, v_freelist);
2247fe02 110 spin_init(&vfs_spin);
5fd012e0
MD
111}
112
113/*
2247fe02
MD
114 * Misc functions
115 */
116static __inline
117void
118_vsetflags(struct vnode *vp, int flags)
119{
120 atomic_set_int(&vp->v_flag, flags);
121}
122
123static __inline
124void
125_vclrflags(struct vnode *vp, int flags)
126{
127 atomic_clear_int(&vp->v_flag, flags);
128}
129
130void
131vsetflags(struct vnode *vp, int flags)
132{
133 _vsetflags(vp, flags);
134}
135
136void
137vclrflags(struct vnode *vp, int flags)
138{
139 _vclrflags(vp, flags);
140}
141
142/*
143 * Inline helper functions. vbusy() and vfree() must be called while
144 * vp->v_spinlock is held.
145 *
146 * WARNING! This functions is typically called with v_spinlock held.
5b287bba 147 *
2247fe02 148 * MPSAFE
5fd012e0
MD
149 */
150static __inline
151void
152__vbusy(struct vnode *vp)
153{
0e8bd897
MD
154#ifdef TRACKVNODE
155 if ((ulong)vp == trackvnode)
156 kprintf("__vbusy %p %08x\n", vp, vp->v_flag);
157#endif
2247fe02 158 spin_lock_wr(&vfs_spin);
5fd012e0
MD
159 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
160 freevnodes--;
2247fe02
MD
161 _vclrflags(vp, VFREE);
162 spin_unlock_wr(&vfs_spin);
5fd012e0
MD
163}
164
2247fe02
MD
165/*
166 * WARNING! This functions is typically called with v_spinlock held.
167 *
168 * MPSAFE
169 */
5fd012e0
MD
170static __inline
171void
172__vfree(struct vnode *vp)
173{
0e8bd897
MD
174#ifdef TRACKVNODE
175 if ((ulong)vp == trackvnode) {
176 kprintf("__vfree %p %08x\n", vp, vp->v_flag);
177 print_backtrace();
178 }
179#endif
2247fe02 180 spin_lock_wr(&vfs_spin);
0e8bd897 181 if (vp->v_flag & VRECLAIMED)
5fd012e0 182 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
0e8bd897
MD
183 else if (vp->v_flag & (VAGE0 | VAGE1))
184 TAILQ_INSERT_BEFORE(&vnode_free_mid, vp, v_freelist);
5fd012e0
MD
185 else
186 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
187 freevnodes++;
2247fe02
MD
188 _vsetflags(vp, VFREE);
189 spin_unlock_wr(&vfs_spin);
5fd012e0
MD
190}
191
2247fe02
MD
192/*
193 * WARNING! This functions is typically called with v_spinlock held.
194 *
195 * MPSAFE
196 */
3c37c940
MD
197static __inline
198void
199__vfreetail(struct vnode *vp)
200{
0e8bd897
MD
201#ifdef TRACKVNODE
202 if ((ulong)vp == trackvnode)
203 kprintf("__vfreetail %p %08x\n", vp, vp->v_flag);
204#endif
2247fe02 205 spin_lock_wr(&vfs_spin);
3c37c940
MD
206 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
207 freevnodes++;
2247fe02
MD
208 _vsetflags(vp, VFREE);
209 spin_unlock_wr(&vfs_spin);
3c37c940
MD
210}
211
5fd012e0 212/*
3c37c940
MD
213 * Return a C boolean if we should put the vnode on the freelist (VFREE),
214 * or leave it / mark it as VCACHED.
215 *
216 * This routine is only valid if the vnode is already either VFREE or
217 * VCACHED, or if it can become VFREE or VCACHED via vnode_terminate().
2247fe02
MD
218 *
219 * WARNING! This functions is typically called with v_spinlock held.
220 *
221 * MPSAFE
5fd012e0 222 */
3c37c940
MD
223static __inline boolean_t
224vshouldfree(struct vnode *vp)
5fd012e0 225{
3c37c940
MD
226 return (vp->v_auxrefs == 0 &&
227 (vp->v_object == NULL || vp->v_object->resident_page_count == 0));
5fd012e0
MD
228}
229
230/*
3c37c940
MD
231 * Add a ref to an active vnode. This function should never be called
232 * with an inactive vnode (use vget() instead).
2247fe02
MD
233 *
234 * MPSAFE
5b287bba 235 */
5fd012e0
MD
236void
237vref(struct vnode *vp)
238{
3c37c940
MD
239 KKASSERT(vp->v_sysref.refcnt > 0 &&
240 (vp->v_flag & (VFREE|VINACTIVE)) == 0);
241 sysref_get(&vp->v_sysref);
44b1cf3d
MD
242}
243
244/*
3c37c940
MD
245 * Release a ref on an active or inactive vnode. The sysref termination
246 * function will be called when the active last active reference is released,
247 * and the vnode is returned to the objcache when the last inactive
248 * reference is released.
249 */
250void
251vrele(struct vnode *vp)
252{
253 sysref_put(&vp->v_sysref);
254}
255
256/*
257 * Add an auxiliary data structure reference to the vnode. Auxiliary
258 * references do not change the state of the vnode or prevent them
259 * from being deactivated, reclaimed, or placed on the free list.
e3332475 260 *
3c37c940
MD
261 * An auxiliary reference DOES prevent the vnode from being destroyed,
262 * allowing you to vx_lock() it, test state, etc.
44b1cf3d 263 *
3c37c940
MD
264 * An auxiliary reference DOES NOT move a vnode out of the VFREE state
265 * once it has entered it.
b1c20cfa
MD
266 *
267 * MPSAFE
44b1cf3d
MD
268 */
269void
3c37c940 270vhold(struct vnode *vp)
44b1cf3d 271{
3c37c940
MD
272 KKASSERT(vp->v_sysref.refcnt != 0);
273 atomic_add_int(&vp->v_auxrefs, 1);
5fd012e0
MD
274}
275
e3332475 276/*
3c37c940 277 * Remove an auxiliary reference from the vnode.
e3332475 278 *
3c37c940
MD
279 * vdrop needs to check for a VCACHE->VFREE transition to catch cases
280 * where a vnode is held past its reclamation.
2247fe02
MD
281 *
282 * MPSAFE
e3332475 283 */
5fd012e0 284void
3c37c940 285vdrop(struct vnode *vp)
5fd012e0 286{
3c37c940 287 KKASSERT(vp->v_sysref.refcnt != 0 && vp->v_auxrefs > 0);
2247fe02 288 spin_lock_wr(&vp->v_spinlock);
3c37c940
MD
289 atomic_subtract_int(&vp->v_auxrefs, 1);
290 if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
2247fe02 291 _vclrflags(vp, VCACHED);
e3332475 292 __vfree(vp);
3c37c940 293 }
2247fe02 294 spin_unlock_wr(&vp->v_spinlock);
5fd012e0
MD
295}
296
297/*
3c37c940
MD
298 * This function is called when the last active reference on the vnode
299 * is released, typically via vrele(). SYSREF will give the vnode a
300 * negative ref count, indicating that it is undergoing termination or
301 * is being set aside for the cache, and one final sysref_put() is
302 * required to actually return it to the memory subsystem.
5fd012e0 303 *
3c37c940
MD
304 * However, because vnodes may have auxiliary structural references via
305 * v_auxrefs, we must interlock auxiliary references against termination
306 * via the VX lock mechanism. It is possible for a vnode to be reactivated
307 * while we were blocked on the lock.
2247fe02
MD
308 *
309 * MPSAFE
5fd012e0
MD
310 */
311void
3c37c940 312vnode_terminate(struct vnode *vp)
5fd012e0 313{
3c37c940
MD
314 vx_lock(vp);
315 if (sysref_isinactive(&vp->v_sysref)) {
316 /*
317 * Deactivate the vnode by marking it VFREE or VCACHED.
318 * The vnode can be reactivated from either state until
319 * reclaimed. These states inherit the 'last' sysref on the
320 * vnode.
321 *
322 * NOTE: There may be additional inactive references from
323 * other entities blocking on the VX lock while we hold it,
324 * but this does not prevent us from changing the vnode's
325 * state.
326 *
327 * NOTE: The vnode could already be marked inactive. XXX
2779ba31
MD
328 * how?
329 *
330 * NOTE: v_mount may be NULL due to assignment to
331 * dead_vnode_vops
3c37c940
MD
332 *
333 * NOTE: The vnode may be marked inactive with dirty buffers
2779ba31 334 * or dirty pages in its cached VM object still present.
3c37c940
MD
335 */
336 if ((vp->v_flag & VINACTIVE) == 0) {
2247fe02 337 _vsetflags(vp, VINACTIVE);
2779ba31
MD
338 if (vp->v_mount)
339 VOP_INACTIVE(vp);
3c37c940 340 }
2247fe02 341 spin_lock_wr(&vp->v_spinlock);
3c37c940
MD
342 KKASSERT((vp->v_flag & (VFREE|VCACHED)) == 0);
343 if (vshouldfree(vp))
344 __vfree(vp);
345 else
2247fe02
MD
346 _vsetflags(vp, VCACHED); /* inactive but not yet free*/
347 spin_unlock_wr(&vp->v_spinlock);
3c37c940
MD
348 vx_unlock(vp);
349 } else {
350 /*
351 * Someone reactivated the vnode while were blocked on the
352 * VX lock. Release the VX lock and release the (now active)
353 * last reference which is no longer last.
354 */
355 vx_unlock(vp);
356 vrele(vp);
357 }
5fd012e0
MD
358}
359
e3332475 360/*
3c37c940
MD
361 * Physical vnode constructor / destructor. These are only executed on
362 * the backend of the objcache. They are NOT executed on every vnode
363 * allocation or deallocation.
2247fe02
MD
364 *
365 * MPSAFE
e3332475 366 */
3c37c940
MD
367boolean_t
368vnode_ctor(void *obj, void *private, int ocflags)
369{
370 struct vnode *vp = obj;
371
6de32ba2 372 lwkt_token_init(&vp->v_token);
3c37c940
MD
373 lockinit(&vp->v_lock, "vnode", 0, 0);
374 ccms_dataspace_init(&vp->v_ccms);
375 TAILQ_INIT(&vp->v_namecache);
8f7279b9
MD
376 RB_INIT(&vp->v_rbclean_tree);
377 RB_INIT(&vp->v_rbdirty_tree);
378 RB_INIT(&vp->v_rbhash_tree);
3c37c940
MD
379 return(TRUE);
380}
381
2247fe02
MD
382/*
383 * MPSAFE
384 */
5fd012e0 385void
3c37c940 386vnode_dtor(void *obj, void *private)
5fd012e0 387{
3c37c940
MD
388 struct vnode *vp = obj;
389
390 ccms_dataspace_destroy(&vp->v_ccms);
5fd012e0
MD
391}
392
393/****************************************************************
394 * VX LOCKING FUNCTIONS *
395 ****************************************************************
396 *
44b1cf3d 397 * These functions lock vnodes for reclamation and deactivation related
3c37c940
MD
398 * activities. The caller must already be holding some sort of reference
399 * on the vnode.
2247fe02
MD
400 *
401 * MPSAFE
5fd012e0 402 */
e3332475 403void
5fd012e0
MD
404vx_lock(struct vnode *vp)
405{
e3332475 406 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
5fd012e0
MD
407}
408
3c37c940
MD
409static int
410vx_lock_nonblock(struct vnode *vp)
5fd012e0 411{
3c37c940 412 return(lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT));
5fd012e0
MD
413}
414
e3332475 415void
3c37c940 416vx_unlock(struct vnode *vp)
5fd012e0 417{
e3332475 418 lockmgr(&vp->v_lock, LK_RELEASE);
5fd012e0
MD
419}
420
421/****************************************************************
422 * VNODE ACQUISITION FUNCTIONS *
423 ****************************************************************
424 *
3c37c940
MD
425 * These functions must be used when accessing a vnode via an auxiliary
426 * reference such as the namecache or free list, or when you wish to
427 * do a combo ref+lock sequence.
5fd012e0 428 *
3c37c940
MD
429 * These functions are MANDATORY for any code chain accessing a vnode
430 * whos activation state is not known.
632d9efa 431 *
2247fe02
MD
432 * vget() can be called with LK_NOWAIT and will return EBUSY if the
433 * lock cannot be immediately acquired.
434 *
3c37c940 435 * vget()/vput() are used when reactivation is desired.
5fd012e0 436 *
3c37c940 437 * vx_get() and vx_put() are used when reactivation is not desired.
5fd012e0
MD
438 */
439int
87de5057 440vget(struct vnode *vp, int flags)
5fd012e0
MD
441{
442 int error;
443
3c37c940
MD
444 /*
445 * A lock type must be passed
446 */
447 if ((flags & LK_TYPE_MASK) == 0) {
448 panic("vget() called with no lock specified!");
449 /* NOT REACHED */
450 }
451
452 /*
453 * Reference the structure and then acquire the lock. 0->1
454 * transitions and refs during termination are allowed here so
455 * call sysref directly.
b0911300
MD
456 *
457 * NOTE: The requested lock might be a shared lock and does
458 * not protect our access to the refcnt or other fields.
3c37c940 459 */
3c37c940
MD
460 sysref_get(&vp->v_sysref);
461 if ((error = vn_lock(vp, flags)) != 0) {
462 /*
463 * The lock failed, undo and return an error.
464 */
465 sysref_put(&vp->v_sysref);
466 } else if (vp->v_flag & VRECLAIMED) {
467 /*
468 * The node is being reclaimed and cannot be reactivated
469 * any more, undo and return ENOENT.
470 */
471 vn_unlock(vp);
472 vrele(vp);
473 error = ENOENT;
474 } else {
475 /*
476 * If the vnode is marked VFREE or VCACHED it needs to be
477 * reactivated, otherwise it had better already be active.
478 * VINACTIVE must also be cleared.
479 *
480 * In the VFREE/VCACHED case we have to throw away the
481 * sysref that was earmarking those cases and preventing
482 * the vnode from being destroyed. Our sysref is still held.
b0911300
MD
483 *
484 * The spinlock is our only real protection here.
3c37c940 485 */
2247fe02 486 spin_lock_wr(&vp->v_spinlock);
3c37c940
MD
487 if (vp->v_flag & VFREE) {
488 __vbusy(vp);
b0911300 489 sysref_activate(&vp->v_sysref);
2247fe02 490 spin_unlock_wr(&vp->v_spinlock);
3c37c940 491 sysref_put(&vp->v_sysref);
3c37c940 492 } else if (vp->v_flag & VCACHED) {
2247fe02 493 _vclrflags(vp, VCACHED);
b0911300 494 sysref_activate(&vp->v_sysref);
2247fe02 495 spin_unlock_wr(&vp->v_spinlock);
3c37c940 496 sysref_put(&vp->v_sysref);
5fd012e0 497 } else {
2247fe02
MD
498 if (sysref_isinactive(&vp->v_sysref)) {
499 sysref_activate(&vp->v_sysref);
500 kprintf("Warning vp %p reactivation race\n",
501 vp);
502 }
b0911300 503 spin_unlock_wr(&vp->v_spinlock);
5fd012e0 504 }
2247fe02 505 _vclrflags(vp, VINACTIVE);
3c37c940 506 error = 0;
5fd012e0 507 }
5fd012e0
MD
508 return(error);
509}
510
2247fe02
MD
511/*
512 * MPSAFE
513 */
5fd012e0
MD
514void
515vput(struct vnode *vp)
516{
a11aaa81 517 vn_unlock(vp);
5fd012e0
MD
518 vrele(vp);
519}
520
3c37c940
MD
521/*
522 * XXX The vx_*() locks should use auxrefs, not the main reference counter.
2247fe02
MD
523 *
524 * MPSAFE
3c37c940
MD
525 */
526void
527vx_get(struct vnode *vp)
528{
529 sysref_get(&vp->v_sysref);
530 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
531}
532
2247fe02
MD
533/*
534 * MPSAFE
535 */
3c37c940
MD
536int
537vx_get_nonblock(struct vnode *vp)
538{
539 int error;
540
541 sysref_get(&vp->v_sysref);
542 error = lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT);
543 if (error)
544 sysref_put(&vp->v_sysref);
545 return(error);
546}
547
548/*
549 * Relase a VX lock that also held a ref on the vnode.
550 *
551 * vx_put needs to check for a VCACHE->VFREE transition to catch the
552 * case where e.g. vnlru issues a vgone*().
2247fe02
MD
553 *
554 * MPSAFE
3c37c940
MD
555 */
556void
557vx_put(struct vnode *vp)
558{
2247fe02 559 spin_lock_wr(&vp->v_spinlock);
3c37c940 560 if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
2247fe02 561 _vclrflags(vp, VCACHED);
3c37c940
MD
562 __vfree(vp);
563 }
2247fe02 564 spin_unlock_wr(&vp->v_spinlock);
3c37c940
MD
565 lockmgr(&vp->v_lock, LK_RELEASE);
566 sysref_put(&vp->v_sysref);
567}
568
569/*
3c37c940
MD
570 * Try to reuse a vnode from the free list. NOTE: The returned vnode
571 * is not completely initialized.
2247fe02
MD
572 *
573 * MPSAFE
3c37c940
MD
574 */
575static
576struct vnode *
577allocfreevnode(void)
578{
579 struct vnode *vp;
580 int count;
581
582 for (count = 0; count < freevnodes; count++) {
583 /*
584 * Note that regardless of how we block in this loop,
585 * we only get here if freevnodes != 0 so there
586 * had better be something on the list.
587 *
588 * Try to lock the first vnode on the free list.
589 * Cycle if we can't.
590 *
591 * XXX NOT MP SAFE
592 */
2247fe02 593 spin_lock_wr(&vfs_spin);
3c37c940 594 vp = TAILQ_FIRST(&vnode_free_list);
0e8bd897
MD
595 if (vp == &vnode_free_mid)
596 vp = TAILQ_NEXT(vp, v_freelist);
3c37c940
MD
597 if (vx_lock_nonblock(vp)) {
598 KKASSERT(vp->v_flag & VFREE);
599 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
600 TAILQ_INSERT_TAIL(&vnode_free_list,
601 vp, v_freelist);
2247fe02 602 spin_unlock_wr(&vfs_spin);
3c37c940
MD
603 continue;
604 }
2247fe02 605 spin_unlock_wr(&vfs_spin);
0e8bd897
MD
606#ifdef TRACKVNODE
607 if ((ulong)vp == trackvnode)
608 kprintf("allocfreevnode %p %08x\n", vp, vp->v_flag);
609#endif
3c37c940
MD
610
611 /*
612 * With the vnode locked we can safely remove it
613 * from the free list. We inherit the reference
614 * that was previously associated with the vnode
615 * being on the free list.
616 */
617 KKASSERT((vp->v_flag & (VFREE|VINACTIVE)) ==
618 (VFREE|VINACTIVE));
619 KKASSERT(sysref_isinactive(&vp->v_sysref));
620 __vbusy(vp);
621
622 /*
799f78b8
MD
623 * Holding the VX lock on an inactive vnode prevents it
624 * from being reactivated or reused. New namecache
625 * associations can only be made using active vnodes.
626 *
627 * Another thread may be blocked on our vnode lock while
628 * holding a namecache lock. We can only reuse this vnode
629 * if we can clear all namecache associations without
630 * blocking.
3c37c940 631 */
799f78b8
MD
632 if ((vp->v_flag & VRECLAIMED) == 0) {
633 if (cache_inval_vp_nonblock(vp)) {
634 __vfreetail(vp);
635 vx_unlock(vp);
636 continue;
637 }
3c37c940 638 vgone_vxlocked(vp);
799f78b8
MD
639 /* vnode is still VX locked */
640 }
3c37c940
MD
641
642 /*
643 * We can reuse the vnode if no primary or auxiliary
644 * references remain other then ours, else put it
645 * back on the free list and keep looking.
646 *
647 * Either the free list inherits the last reference
648 * or we fall through and sysref_activate() the last
649 * reference.
799f78b8
MD
650 *
651 * Since the vnode is in a VRECLAIMED state, no new
652 * namecache associations could have been made.
3c37c940 653 */
799f78b8 654 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
3c37c940
MD
655 if (vp->v_auxrefs ||
656 !sysref_islastdeactivation(&vp->v_sysref)) {
657 __vfreetail(vp);
799f78b8 658 vx_unlock(vp);
3c37c940
MD
659 continue;
660 }
5c6c3cac
MD
661
662 /*
799f78b8
MD
663 * Return a VX locked vnode suitable for reuse. The caller
664 * inherits the sysref.
5c6c3cac 665 */
3c37c940
MD
666 return(vp);
667 }
668 return(NULL);
669}
670
671/*
5fd012e0
MD
672 * Obtain a new vnode from the freelist, allocating more if necessary.
673 * The returned vnode is VX locked & refd.
0e8bd897
MD
674 *
675 * All new vnodes set the VAGE flags. An open() of the vnode will
676 * decrement the (2-bit) flags. Vnodes which are opened several times
677 * are thus retained in the cache over vnodes which are merely stat()d.
2247fe02
MD
678 *
679 * MPSAFE
5fd012e0
MD
680 */
681struct vnode *
682allocvnode(int lktimeout, int lkflags)
683{
5fd012e0
MD
684 struct vnode *vp;
685
686 /*
687 * Try to reuse vnodes if we hit the max. This situation only
688 * occurs in certain large-memory (2G+) situations. We cannot
689 * attempt to directly reclaim vnodes due to nasty recursion
690 * problems.
691 */
692 while (numvnodes - freevnodes > desiredvnodes)
693 vnlru_proc_wait();
694
5fd012e0 695 /*
e92ca23a
MD
696 * Try to build up as many vnodes as we can before reallocating
697 * from the free list. A vnode on the free list simply means
698 * that it is inactive with no resident pages. It may or may not
699 * have been reclaimed and could have valuable information associated
700 * with it that we shouldn't throw away unless we really need to.
701 *
702 * HAMMER NOTE: Re-establishing a vnode is a fairly expensive
703 * operation for HAMMER but this should benefit UFS as well.
5fd012e0 704 */
e92ca23a 705 if (freevnodes >= wantfreevnodes && numvnodes >= desiredvnodes)
3c37c940
MD
706 vp = allocfreevnode();
707 else
708 vp = NULL;
709 if (vp == NULL) {
710 vp = sysref_alloc(&vnode_sysref_class);
711 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
712 numvnodes++;
5fd012e0
MD
713 }
714
715 /*
3c37c940
MD
716 * We are using a managed sysref class, vnode fields are only
717 * zerod on initial allocation from the backing store, not
718 * on reallocation. Thus we have to clear these fields for both
719 * reallocation and reuse.
5fd012e0 720 */
5fd012e0 721#ifdef INVARIANTS
3c37c940
MD
722 if (vp->v_data)
723 panic("cleaned vnode isn't");
a9a20f98
MD
724 if (bio_track_active(&vp->v_track_read) ||
725 bio_track_active(&vp->v_track_write)) {
3c37c940 726 panic("Clean vnode has pending I/O's");
a9a20f98 727 }
8f7279b9
MD
728 if (vp->v_flag & VONWORKLST)
729 panic("Clean vnode still pending on syncer worklist!");
730 if (!RB_EMPTY(&vp->v_rbdirty_tree))
731 panic("Clean vnode still has dirty buffers!");
732 if (!RB_EMPTY(&vp->v_rbclean_tree))
733 panic("Clean vnode still has clean buffers!");
734 if (!RB_EMPTY(&vp->v_rbhash_tree))
735 panic("Clean vnode still on hash tree!");
3c37c940 736 KKASSERT(vp->v_mount == NULL);
5fd012e0 737#endif
0e8bd897 738 vp->v_flag = VAGE0 | VAGE1;
3c37c940
MD
739 vp->v_lastw = 0;
740 vp->v_lasta = 0;
741 vp->v_cstart = 0;
742 vp->v_clen = 0;
743 vp->v_socket = 0;
744 vp->v_opencount = 0;
745 vp->v_writecount = 0; /* XXX */
f043c4c7
MD
746
747 /*
748 * lktimeout only applies when LK_TIMELOCK is used, and only
749 * the pageout daemon uses it. The timeout may not be zero
750 * or the pageout daemon can deadlock in low-VM situations.
751 */
752 if (lktimeout == 0)
753 lktimeout = hz / 10;
3c37c940 754 lockreinit(&vp->v_lock, "vnode", lktimeout, lkflags);
2779ba31 755 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
3c37c940 756 /* exclusive lock still held */
5fd012e0 757
3c37c940
MD
758 /*
759 * Note: sysref needs to be activated to convert -0x40000000 to +1.
760 * The -0x40000000 comes from the last ref on reuse, and from
761 * sysref_init() on allocate.
762 */
763 sysref_activate(&vp->v_sysref);
57f7b636 764 vp->v_filesize = NOOFFSET;
5fd012e0
MD
765 vp->v_type = VNON;
766 vp->v_tag = 0;
767 vp->v_ops = NULL;
768 vp->v_data = NULL;
769 KKASSERT(vp->v_mount == NULL);
3c37c940 770
5fd012e0
MD
771 return (vp);
772}
773
2247fe02
MD
774/*
775 * MPSAFE
776 */
3c37c940
MD
777int
778freesomevnodes(int n)
779{
780 struct vnode *vp;
781 int count = 0;
782
783 while (n) {
784 --n;
785 if ((vp = allocfreevnode()) == NULL)
786 break;
787 vx_put(vp);
788 --numvnodes;
789 }
790 return(count);
791}
792