kernel - lwkt_token revamp
[dragonfly.git] / sys / kern / vfs_mount.c
CommitLineData
5fd012e0
MD
1/*
2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
67863d04 70 * $DragonFly: src/sys/kern/vfs_mount.c,v 1.37 2008/09/17 21:44:18 dillon Exp $
5fd012e0
MD
71 */
72
73/*
74 * External virtual filesystem routines
75 */
76#include "opt_ddb.h"
77
78#include <sys/param.h>
79#include <sys/systm.h>
80#include <sys/kernel.h>
81#include <sys/malloc.h>
82#include <sys/mount.h>
83#include <sys/proc.h>
84#include <sys/vnode.h>
85#include <sys/buf.h>
86#include <sys/eventhandler.h>
87#include <sys/kthread.h>
88#include <sys/sysctl.h>
89
90#include <machine/limits.h>
91
92#include <sys/buf2.h>
93#include <sys/thread2.h>
3c37c940 94#include <sys/sysref2.h>
5fd012e0
MD
95
96#include <vm/vm.h>
97#include <vm/vm_object.h>
98
861905fb
MD
99struct mountscan_info {
100 TAILQ_ENTRY(mountscan_info) msi_entry;
101 int msi_how;
102 struct mount *msi_node;
103};
104
be6c08cb
MD
105struct vmntvnodescan_info {
106 TAILQ_ENTRY(vmntvnodescan_info) entry;
107 struct vnode *vp;
108};
109
0e8bd897
MD
110struct vnlru_info {
111 int pass;
112};
113
5fd012e0 114static int vnlru_nowhere = 0;
58552887 115SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD,
5fd012e0
MD
116 &vnlru_nowhere, 0,
117 "Number of times the vnlru process ran without success");
118
119
120static struct lwkt_token mntid_token;
aac0aabd 121static struct mount dummymount;
5fd012e0 122
460426e6
MD
123/* note: mountlist exported to pstat */
124struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
861905fb
MD
125static TAILQ_HEAD(,mountscan_info) mountscan_list;
126static struct lwkt_token mountlist_token;
be6c08cb 127static TAILQ_HEAD(,vmntvnodescan_info) mntvnodescan_list;
5fd012e0
MD
128struct lwkt_token mntvnode_token;
129
408357d8
MD
130static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list);
131
5fd012e0
MD
132/*
133 * Called from vfsinit()
134 */
135void
136vfs_mount_init(void)
137{
3b998fa9
MD
138 lwkt_token_init(&mountlist_token, 1);
139 lwkt_token_init(&mntvnode_token, 1);
140 lwkt_token_init(&mntid_token, 1);
861905fb 141 TAILQ_INIT(&mountscan_list);
be6c08cb 142 TAILQ_INIT(&mntvnodescan_list);
aac0aabd
MD
143 mount_init(&dummymount);
144 dummymount.mnt_flag |= MNT_RDONLY;
be6c08cb
MD
145}
146
147/*
148 * Support function called with mntvnode_token held to remove a vnode
149 * from the mountlist. We must update any list scans which are in progress.
150 */
151static void
152vremovevnodemnt(struct vnode *vp)
153{
154 struct vmntvnodescan_info *info;
155
156 TAILQ_FOREACH(info, &mntvnodescan_list, entry) {
157 if (info->vp == vp)
158 info->vp = TAILQ_NEXT(vp, v_nmntvnodes);
159 }
160 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
161}
162
163/*
5fd012e0
MD
164 * Allocate a new vnode and associate it with a tag, mount point, and
165 * operations vector.
166 *
167 * A VX locked and refd vnode is returned. The caller should setup the
168 * remaining fields and vx_put() or, if he wishes to leave a vref,
169 * vx_unlock() the vnode.
170 */
171int
6ddb7618
MD
172getnewvnode(enum vtagtype tag, struct mount *mp,
173 struct vnode **vpp, int lktimeout, int lkflags)
174{
175 struct vnode *vp;
176
177 KKASSERT(mp != NULL);
178
179 vp = allocvnode(lktimeout, lkflags);
180 vp->v_tag = tag;
181 vp->v_data = NULL;
182
183 /*
184 * By default the vnode is assigned the mount point's normal
185 * operations vector.
186 */
187 vp->v_ops = &mp->mnt_vn_use_ops;
188
189 /*
190 * Placing the vnode on the mount point's queue makes it visible.
191 * VNON prevents it from being messed with, however.
192 */
193 insmntque(vp, mp);
6ddb7618
MD
194
195 /*
196 * A VX locked & refd vnode is returned.
197 */
198 *vpp = vp;
199 return (0);
200}
201
202/*
203 * This function creates vnodes with special operations vectors. The
204 * mount point is optional.
205 *
aac0aabd
MD
206 * This routine is being phased out but is still used by vfs_conf to
207 * create vnodes for devices prior to the root mount (with mp == NULL).
6ddb7618
MD
208 */
209int
210getspecialvnode(enum vtagtype tag, struct mount *mp,
66a1ddf5 211 struct vop_ops **ops,
5fd012e0
MD
212 struct vnode **vpp, int lktimeout, int lkflags)
213{
214 struct vnode *vp;
215
216 vp = allocvnode(lktimeout, lkflags);
217 vp->v_tag = tag;
5fd012e0 218 vp->v_data = NULL;
66a1ddf5 219 vp->v_ops = ops;
5fd012e0 220
aac0aabd
MD
221 if (mp == NULL)
222 mp = &dummymount;
223
5fd012e0
MD
224 /*
225 * Placing the vnode on the mount point's queue makes it visible.
226 * VNON prevents it from being messed with, however.
227 */
228 insmntque(vp, mp);
5fd012e0
MD
229
230 /*
231 * A VX locked & refd vnode is returned.
232 */
233 *vpp = vp;
234 return (0);
235}
236
237/*
861905fb
MD
238 * Interlock against an unmount, return 0 on success, non-zero on failure.
239 *
240 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
241 * is in-progress.
242 *
243 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits
244 * are used. A shared locked will be obtained and the filesystem will not
245 * be unmountable until the lock is released.
5fd012e0
MD
246 */
247int
f9642f56 248vfs_busy(struct mount *mp, int flags)
5fd012e0
MD
249{
250 int lkflags;
251
252 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
253 if (flags & LK_NOWAIT)
254 return (ENOENT);
861905fb 255 /* XXX not MP safe */
5fd012e0
MD
256 mp->mnt_kern_flag |= MNTK_MWAIT;
257 /*
258 * Since all busy locks are shared except the exclusive
259 * lock granted when unmounting, the only place that a
260 * wakeup needs to be done is at the release of the
261 * exclusive lock at the end of dounmount.
5fd012e0
MD
262 */
263 tsleep((caddr_t)mp, 0, "vfs_busy", 0);
264 return (ENOENT);
265 }
ab6f251b 266 lkflags = LK_SHARED;
df4f70a6 267 if (lockmgr(&mp->mnt_lock, lkflags))
5fd012e0
MD
268 panic("vfs_busy: unexpected lock failure");
269 return (0);
270}
271
272/*
273 * Free a busy filesystem.
274 */
275void
f9642f56 276vfs_unbusy(struct mount *mp)
5fd012e0 277{
df4f70a6 278 lockmgr(&mp->mnt_lock, LK_RELEASE);
5fd012e0
MD
279}
280
281/*
282 * Lookup a filesystem type, and if found allocate and initialize
283 * a mount structure for it.
284 *
285 * Devname is usually updated by mount(8) after booting.
286 */
287int
288vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
289{
5fd012e0
MD
290 struct vfsconf *vfsp;
291 struct mount *mp;
292
293 if (fstypename == NULL)
294 return (ENODEV);
2613053d
MN
295
296 vfsp = vfsconf_find_by_name(fstypename);
5fd012e0
MD
297 if (vfsp == NULL)
298 return (ENODEV);
e7b4468c 299 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
aac0aabd 300 mount_init(mp);
ab6f251b 301 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0);
aac0aabd 302
f9642f56 303 vfs_busy(mp, LK_NOWAIT);
5fd012e0
MD
304 mp->mnt_vfc = vfsp;
305 mp->mnt_op = vfsp->vfc_vfsops;
5fd012e0 306 vfsp->vfc_refcount++;
5fd012e0 307 mp->mnt_stat.f_type = vfsp->vfc_typenum;
aac0aabd 308 mp->mnt_flag |= MNT_RDONLY;
5fd012e0
MD
309 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
310 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
069b825e 311 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
5fd012e0
MD
312 *mpp = mp;
313 return (0);
314}
315
316/*
aac0aabd
MD
317 * Basic mount structure initialization
318 */
319void
320mount_init(struct mount *mp)
321{
322 lockinit(&mp->mnt_lock, "vfslock", 0, 0);
3b998fa9 323 lwkt_token_init(&mp->mnt_token, 1);
aac0aabd
MD
324
325 TAILQ_INIT(&mp->mnt_nvnodelist);
326 TAILQ_INIT(&mp->mnt_reservedvnlist);
327 TAILQ_INIT(&mp->mnt_jlist);
328 mp->mnt_nvnodelistsize = 0;
329 mp->mnt_flag = 0;
330 mp->mnt_iosize_max = DFLTPHYS;
331}
332
333/*
5fd012e0
MD
334 * Lookup a mount point by filesystem identifier.
335 */
336struct mount *
337vfs_getvfs(fsid_t *fsid)
338{
339 struct mount *mp;
5fd012e0 340
3b998fa9 341 lwkt_gettoken(&mountlist_token);
5fd012e0
MD
342 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
343 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
344 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
345 break;
02dede15 346 }
5fd012e0 347 }
3b998fa9 348 lwkt_reltoken(&mountlist_token);
5fd012e0
MD
349 return (mp);
350}
351
352/*
353 * Get a new unique fsid. Try to make its val[0] unique, since this value
354 * will be used to create fake device numbers for stat(). Also try (but
355 * not so hard) make its val[0] unique mod 2^16, since some emulators only
356 * support 16-bit device numbers. We end up with unique val[0]'s for the
357 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
358 *
359 * Keep in mind that several mounts may be running in parallel. Starting
360 * the search one past where the previous search terminated is both a
361 * micro-optimization and a defense against returning the same fsid to
362 * different mounts.
363 */
364void
365vfs_getnewfsid(struct mount *mp)
366{
367 static u_int16_t mntid_base;
5fd012e0
MD
368 fsid_t tfsid;
369 int mtype;
370
3b998fa9 371 lwkt_gettoken(&mntid_token);
5fd012e0
MD
372 mtype = mp->mnt_vfc->vfc_typenum;
373 tfsid.val[1] = mtype;
374 mtype = (mtype & 0xFF) << 24;
375 for (;;) {
376 tfsid.val[0] = makeudev(255,
377 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
378 mntid_base++;
379 if (vfs_getvfs(&tfsid) == NULL)
380 break;
381 }
382 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
383 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
3b998fa9 384 lwkt_reltoken(&mntid_token);
5fd012e0
MD
385}
386
387/*
67863d04
MD
388 * Set the FSID for a new mount point to the template. Adjust
389 * the FSID to avoid collisions.
390 */
391int
392vfs_setfsid(struct mount *mp, fsid_t *template)
393{
394 int didmunge = 0;
395
396 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid));
397 for (;;) {
398 if (vfs_getvfs(template) == NULL)
399 break;
400 didmunge = 1;
401 ++template->val[1];
402 }
403 mp->mnt_stat.f_fsid = *template;
404 return(didmunge);
405}
406
407/*
5fd012e0
MD
408 * This routine is called when we have too many vnodes. It attempts
409 * to free <count> vnodes and will potentially free vnodes that still
410 * have VM backing store (VM backing store is typically the cause
411 * of a vnode blowout so we want to do this). Therefore, this operation
412 * is not considered cheap.
413 *
414 * A number of conditions may prevent a vnode from being reclaimed.
415 * the buffer cache may have references on the vnode, a directory
416 * vnode may still have references due to the namei cache representing
417 * underlying files, or the vnode may be in active use. It is not
418 * desireable to reuse such vnodes. These conditions may cause the
419 * number of vnodes to reach some minimum value regardless of what
420 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
421 */
422
423/*
58552887
MD
424 * This is a quick non-blocking check to determine if the vnode is a good
425 * candidate for being (eventually) vgone()'d. Returns 0 if the vnode is
426 * not a good candidate, 1 if it is.
5fd012e0
MD
427 */
428static __inline int
0e8bd897 429vmightfree(struct vnode *vp, int page_count, int pass)
5fd012e0 430{
58552887
MD
431 if (vp->v_flag & VRECLAIMED)
432 return (0);
44b1cf3d 433#if 0
58552887 434 if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache))
5fd012e0 435 return (0);
44b1cf3d 436#endif
3c37c940 437 if (sysref_isactive(&vp->v_sysref))
5fd012e0
MD
438 return (0);
439 if (vp->v_object && vp->v_object->resident_page_count >= page_count)
440 return (0);
0e8bd897
MD
441
442 /*
443 * XXX horrible hack. Up to four passes will be taken. Each pass
444 * makes a larger set of vnodes eligible. For now what this really
445 * means is that we try to recycle files opened only once before
446 * recycling files opened multiple times.
447 */
448 switch(vp->v_flag & (VAGE0 | VAGE1)) {
449 case 0:
450 if (pass < 3)
451 return(0);
452 break;
453 case VAGE0:
454 if (pass < 2)
455 return(0);
456 break;
457 case VAGE1:
458 if (pass < 1)
459 return(0);
460 break;
461 case VAGE0 | VAGE1:
462 break;
463 }
5fd012e0
MD
464 return (1);
465}
466
250d127c 467/*
58552887 468 * The vnode was found to be possibly vgone()able and the caller has locked it
250d127c 469 * (thus the usecount should be 1 now). Determine if the vnode is actually
58552887
MD
470 * vgone()able, doing some cleanups in the process. Returns 1 if the vnode
471 * can be vgone()'d, 0 otherwise.
250d127c 472 *
3c37c940 473 * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf
250d127c
MD
474 * in the namecache topology and (B) this vnode has buffer cache bufs.
475 * We cannot remove vnodes with non-leaf namecache associations. We do a
476 * tentitive leaf check prior to attempting to flush out any buffers but the
3c37c940 477 * 'real' test when all is said in done is that v_auxrefs must become 0 for
250d127c
MD
478 * the vnode to be freeable.
479 *
3c37c940 480 * We could theoretically just unconditionally flush when v_auxrefs != 0,
250d127c
MD
481 * but flushing data associated with non-leaf nodes (which are always
482 * directories), just throws it away for no benefit. It is the buffer
483 * cache's responsibility to choose buffers to recycle from the cached
484 * data point of view.
485 */
486static int
487visleaf(struct vnode *vp)
488{
489 struct namecache *ncp;
490
f63911bf 491 spin_lock_wr(&vp->v_spinlock);
250d127c 492 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
f63911bf
MD
493 if (!TAILQ_EMPTY(&ncp->nc_list)) {
494 spin_unlock_wr(&vp->v_spinlock);
250d127c 495 return(0);
f63911bf 496 }
250d127c 497 }
f63911bf 498 spin_unlock_wr(&vp->v_spinlock);
250d127c
MD
499 return(1);
500}
501
58552887
MD
502/*
503 * Try to clean up the vnode to the point where it can be vgone()'d, returning
504 * 0 if it cannot be vgone()'d (or already has been), 1 if it can. Unlike
505 * vmightfree() this routine may flush the vnode and block. Vnodes marked
506 * VFREE are still candidates for vgone()ing because they may hold namecache
507 * resources and could be blocking the namecache directory hierarchy (and
508 * related vnodes) from being freed.
509 */
250d127c 510static int
58552887 511vtrytomakegoneable(struct vnode *vp, int page_count)
250d127c 512{
58552887 513 if (vp->v_flag & VRECLAIMED)
250d127c 514 return (0);
3c37c940 515 if (vp->v_sysref.refcnt > 1)
250d127c
MD
516 return (0);
517 if (vp->v_object && vp->v_object->resident_page_count >= page_count)
518 return (0);
3c37c940 519 if (vp->v_auxrefs && visleaf(vp)) {
87de5057 520 vinvalbuf(vp, V_SAVE, 0, 0);
250d127c 521#if 0 /* DEBUG */
3c37c940 522 kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" :
250d127c
MD
523 "vrecycle: vp %p succeeded: %s\n"), vp,
524 (TAILQ_FIRST(&vp->v_namecache) ?
525 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"));
526#endif
527 }
5c6c3cac
MD
528
529 /*
530 * This sequence may seem a little strange, but we need to optimize
531 * the critical path a bit. We can't recycle vnodes with other
532 * references and because we are trying to recycle an otherwise
533 * perfectly fine vnode we have to invalidate the namecache in a
534 * way that avoids possible deadlocks (since the vnode lock is being
535 * held here). Finally, we have to check for other references one
536 * last time in case something snuck in during the inval.
537 */
538 if (vp->v_sysref.refcnt > 1 || vp->v_auxrefs != 0)
539 return (0);
540 if (cache_inval_vp_nonblock(vp))
541 return (0);
542 return (vp->v_sysref.refcnt <= 1 && vp->v_auxrefs == 0);
250d127c 543}
5fd012e0 544
58552887
MD
545/*
546 * Reclaim up to 1/10 of the vnodes associated with a mount point. Try
547 * to avoid vnodes which have lots of resident pages (we are trying to free
861905fb
MD
548 * vnodes, not memory).
549 *
550 * This routine is a callback from the mountlist scan. The mount point
551 * in question will be busied.
0e8bd897
MD
552 *
553 * NOTE: The 1/10 reclamation also ensures that the inactive data set
554 * (the vnodes being recycled by the one-time use) does not degenerate
555 * into too-small a set. This is important because once a vnode is
556 * marked as not being one-time-use (VAGE0/VAGE1 both 0) that vnode
557 * will not be destroyed EXCEPT by this mechanism. VM pages can still
558 * be cleaned/freed by the pageout daemon.
58552887 559 */
5fd012e0 560static int
861905fb 561vlrureclaim(struct mount *mp, void *data)
5fd012e0 562{
0e8bd897 563 struct vnlru_info *info = data;
5fd012e0 564 struct vnode *vp;
5fd012e0
MD
565 int done;
566 int trigger;
567 int usevnodes;
568 int count;
861905fb 569 int trigger_mult = vnlru_nowhere;
5fd012e0
MD
570
571 /*
58552887
MD
572 * Calculate the trigger point for the resident pages check. The
573 * minimum trigger value is approximately the number of pages in
574 * the system divded by the number of vnodes. However, due to
575 * various other system memory overheads unrelated to data caching
576 * it is a good idea to double the trigger (at least).
577 *
578 * trigger_mult starts at 0. If the recycler is having problems
579 * finding enough freeable vnodes it will increase trigger_mult.
580 * This should not happen in normal operation, even on machines with
581 * low amounts of memory, but extraordinary memory use by the system
582 * verses the amount of cached data can trigger it.
5fd012e0
MD
583 */
584 usevnodes = desiredvnodes;
585 if (usevnodes <= 0)
586 usevnodes = 1;
58552887 587 trigger = vmstats.v_page_count * (trigger_mult + 2) / usevnodes;
5fd012e0
MD
588
589 done = 0;
3b998fa9 590 lwkt_gettoken(&mntvnode_token);
5fd012e0 591 count = mp->mnt_nvnodelistsize / 10 + 1;
0e8bd897 592
2ec4b00d
MD
593 while (count && mp->mnt_syncer) {
594 /*
595 * Next vnode. Use the special syncer vnode to placemark
596 * the LRU. This way the LRU code does not interfere with
597 * vmntvnodescan().
598 */
599 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes);
600 TAILQ_REMOVE(&mp->mnt_nvnodelist, mp->mnt_syncer, v_nmntvnodes);
601 if (vp) {
602 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp,
603 mp->mnt_syncer, v_nmntvnodes);
604 } else {
605 TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, mp->mnt_syncer,
606 v_nmntvnodes);
607 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes);
608 if (vp == NULL)
609 break;
610 }
611
5fd012e0
MD
612 /*
613 * __VNODESCAN__
614 *
615 * The VP will stick around while we hold mntvnode_token,
616 * at least until we block, so we can safely do an initial
617 * check, and then must check again after we lock the vnode.
618 */
986e7cda 619 if (vp->v_type == VNON || /* syncer or indeterminant */
0e8bd897 620 !vmightfree(vp, trigger, info->pass) /* critical path opt */
5fd012e0 621 ) {
5fd012e0
MD
622 --count;
623 continue;
624 }
625
626 /*
627 * VX get the candidate vnode. If the VX get fails the
628 * vnode might still be on the mountlist. Our loop depends
629 * on us at least cycling the vnode to the end of the
630 * mountlist.
631 */
632 if (vx_get_nonblock(vp) != 0) {
5fd012e0
MD
633 --count;
634 continue;
635 }
636
637 /*
638 * Since we blocked locking the vp, make sure it is still
639 * a candidate for reclamation. That is, it has not already
640 * been reclaimed and only has our VX reference associated
641 * with it.
642 */
986e7cda 643 if (vp->v_type == VNON || /* syncer or indeterminant */
5fd012e0
MD
644 (vp->v_flag & VRECLAIMED) ||
645 vp->v_mount != mp ||
58552887 646 !vtrytomakegoneable(vp, trigger) /* critical path opt */
5fd012e0 647 ) {
5fd012e0
MD
648 --count;
649 vx_put(vp);
650 continue;
651 }
652
653 /*
654 * All right, we are good, move the vp to the end of the
655 * mountlist and clean it out. The vget will have returned
656 * an error if the vnode was destroyed (VRECLAIMED set), so we
657 * do not have to check again. The vput() will move the
658 * vnode to the free list if the vgone() was successful.
659 */
660 KKASSERT(vp->v_mount == mp);
3c37c940 661 vgone_vxlocked(vp);
5fd012e0
MD
662 vx_put(vp);
663 ++done;
664 --count;
665 }
3b998fa9 666 lwkt_reltoken(&mntvnode_token);
5fd012e0
MD
667 return (done);
668}
669
670/*
671 * Attempt to recycle vnodes in a context that is always safe to block.
672 * Calling vlrurecycle() from the bowels of file system code has some
673 * interesting deadlock problems.
674 */
675static struct thread *vnlruthread;
676static int vnlruproc_sig;
677
678void
679vnlru_proc_wait(void)
680{
681 if (vnlruproc_sig == 0) {
682 vnlruproc_sig = 1; /* avoid unnecessary wakeups */
683 wakeup(vnlruthread);
684 }
685 tsleep(&vnlruproc_sig, 0, "vlruwk", hz);
686}
687
688static void
689vnlru_proc(void)
690{
5fd012e0 691 struct thread *td = curthread;
0e8bd897 692 struct vnlru_info info;
861905fb 693 int done;
5fd012e0
MD
694
695 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
696 SHUTDOWN_PRI_FIRST);
697
e43a034f 698 crit_enter();
5fd012e0
MD
699 for (;;) {
700 kproc_suspend_loop();
3c37c940
MD
701
702 /*
703 * Try to free some vnodes if we have too many
704 */
705 if (numvnodes > desiredvnodes &&
706 freevnodes > desiredvnodes * 2 / 10) {
707 int count = numvnodes - desiredvnodes;
708
709 if (count > freevnodes / 100)
710 count = freevnodes / 100;
711 if (count < 5)
712 count = 5;
713 freesomevnodes(count);
714 }
715
716 /*
717 * Nothing to do if most of our vnodes are already on
718 * the free list.
719 */
5fd012e0
MD
720 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
721 vnlruproc_sig = 0;
722 wakeup(&vnlruproc_sig);
723 tsleep(td, 0, "vlruwt", hz);
724 continue;
725 }
65870584 726 cache_hysteresis();
0e8bd897
MD
727
728 /*
729 * The pass iterates through the four combinations of
730 * VAGE0/VAGE1. We want to get rid of aged small files
731 * first.
732 */
733 info.pass = 0;
734 done = 0;
735 while (done == 0 && info.pass < 4) {
736 done = mountlist_scan(vlrureclaim, &info,
737 MNTSCAN_FORWARD);
738 ++info.pass;
739 }
58552887
MD
740
741 /*
742 * The vlrureclaim() call only processes 1/10 of the vnodes
743 * on each mount. If we couldn't find any repeat the loop
744 * at least enough times to cover all available vnodes before
745 * we start sleeping. Complain if the failure extends past
746 * 30 second, every 30 seconds.
747 */
5fd012e0
MD
748 if (done == 0) {
749 ++vnlru_nowhere;
5fd012e0 750 if (vnlru_nowhere % 10 == 0)
58552887
MD
751 tsleep(td, 0, "vlrup", hz * 3);
752 if (vnlru_nowhere % 100 == 0)
6ea70f76 753 kprintf("vnlru_proc: vnode recycler stopped working!\n");
58552887
MD
754 if (vnlru_nowhere == 1000)
755 vnlru_nowhere = 900;
5fd012e0
MD
756 } else {
757 vnlru_nowhere = 0;
758 }
759 }
e43a034f 760 crit_exit();
5fd012e0
MD
761}
762
861905fb
MD
763/*
764 * MOUNTLIST FUNCTIONS
765 */
766
767/*
768 * mountlist_insert (MP SAFE)
769 *
770 * Add a new mount point to the mount list.
771 */
772void
773mountlist_insert(struct mount *mp, int how)
774{
3b998fa9 775 lwkt_gettoken(&mountlist_token);
861905fb
MD
776 if (how == MNTINS_FIRST)
777 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
778 else
779 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
3b998fa9 780 lwkt_reltoken(&mountlist_token);
861905fb
MD
781}
782
783/*
784 * mountlist_interlock (MP SAFE)
785 *
786 * Execute the specified interlock function with the mountlist token
787 * held. The function will be called in a serialized fashion verses
788 * other functions called through this mechanism.
789 */
790int
791mountlist_interlock(int (*callback)(struct mount *), struct mount *mp)
792{
861905fb
MD
793 int error;
794
3b998fa9 795 lwkt_gettoken(&mountlist_token);
861905fb 796 error = callback(mp);
3b998fa9 797 lwkt_reltoken(&mountlist_token);
861905fb
MD
798 return (error);
799}
800
801/*
802 * mountlist_boot_getfirst (DURING BOOT ONLY)
803 *
804 * This function returns the first mount on the mountlist, which is
805 * expected to be the root mount. Since no interlocks are obtained
806 * this function is only safe to use during booting.
807 */
808
809struct mount *
810mountlist_boot_getfirst(void)
811{
812 return(TAILQ_FIRST(&mountlist));
813}
814
815/*
816 * mountlist_remove (MP SAFE)
817 *
818 * Remove a node from the mountlist. If this node is the next scan node
819 * for any active mountlist scans, the active mountlist scan will be
820 * adjusted to skip the node, thus allowing removals during mountlist
821 * scans.
822 */
823void
824mountlist_remove(struct mount *mp)
825{
826 struct mountscan_info *msi;
861905fb 827
3b998fa9 828 lwkt_gettoken(&mountlist_token);
861905fb
MD
829 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) {
830 if (msi->msi_node == mp) {
831 if (msi->msi_how & MNTSCAN_FORWARD)
832 msi->msi_node = TAILQ_NEXT(mp, mnt_list);
833 else
834 msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
835 }
836 }
837 TAILQ_REMOVE(&mountlist, mp, mnt_list);
3b998fa9 838 lwkt_reltoken(&mountlist_token);
861905fb
MD
839}
840
841/*
842 * mountlist_scan (MP SAFE)
843 *
844 * Safely scan the mount points on the mount list. Unless otherwise
845 * specified each mount point will be busied prior to the callback and
846 * unbusied afterwords. The callback may safely remove any mount point
847 * without interfering with the scan. If the current callback
848 * mount is removed the scanner will not attempt to unbusy it.
849 *
850 * If a mount node cannot be busied it is silently skipped.
851 *
852 * The callback return value is aggregated and a total is returned. A return
853 * value of < 0 is not aggregated and will terminate the scan.
854 *
855 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction
856 * MNTSCAN_REVERSE - the mountlist is scanned in reverse
857 * MNTSCAN_NOBUSY - the scanner will make the callback without busying
858 * the mount node.
859 */
860int
861mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how)
862{
863 struct mountscan_info info;
861905fb
MD
864 struct mount *mp;
865 thread_t td;
866 int count;
867 int res;
868
3b998fa9 869 lwkt_gettoken(&mountlist_token);
861905fb
MD
870
871 info.msi_how = how;
872 info.msi_node = NULL; /* paranoia */
873 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry);
874
875 res = 0;
876 td = curthread;
877
878 if (how & MNTSCAN_FORWARD) {
879 info.msi_node = TAILQ_FIRST(&mountlist);
880 while ((mp = info.msi_node) != NULL) {
881 if (how & MNTSCAN_NOBUSY) {
882 count = callback(mp, data);
f9642f56 883 } else if (vfs_busy(mp, LK_NOWAIT) == 0) {
861905fb
MD
884 count = callback(mp, data);
885 if (mp == info.msi_node)
f9642f56 886 vfs_unbusy(mp);
861905fb
MD
887 } else {
888 count = 0;
889 }
890 if (count < 0)
891 break;
892 res += count;
893 if (mp == info.msi_node)
894 info.msi_node = TAILQ_NEXT(mp, mnt_list);
895 }
896 } else if (how & MNTSCAN_REVERSE) {
897 info.msi_node = TAILQ_LAST(&mountlist, mntlist);
898 while ((mp = info.msi_node) != NULL) {
899 if (how & MNTSCAN_NOBUSY) {
900 count = callback(mp, data);
f9642f56 901 } else if (vfs_busy(mp, LK_NOWAIT) == 0) {
861905fb
MD
902 count = callback(mp, data);
903 if (mp == info.msi_node)
f9642f56 904 vfs_unbusy(mp);
861905fb
MD
905 } else {
906 count = 0;
907 }
908 if (count < 0)
909 break;
910 res += count;
911 if (mp == info.msi_node)
912 info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
913 }
914 }
915 TAILQ_REMOVE(&mountscan_list, &info, msi_entry);
3b998fa9 916 lwkt_reltoken(&mountlist_token);
861905fb
MD
917 return(res);
918}
919
920/*
921 * MOUNT RELATED VNODE FUNCTIONS
922 */
923
5fd012e0
MD
924static struct kproc_desc vnlru_kp = {
925 "vnlru",
926 vnlru_proc,
927 &vnlruthread
928};
929SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
930
931/*
932 * Move a vnode from one mount queue to another.
2247fe02
MD
933 *
934 * MPSAFE
5fd012e0
MD
935 */
936void
937insmntque(struct vnode *vp, struct mount *mp)
938{
3b998fa9 939 lwkt_gettoken(&mntvnode_token);
5fd012e0
MD
940 /*
941 * Delete from old mount point vnode list, if on one.
942 */
943 if (vp->v_mount != NULL) {
944 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
945 ("bad mount point vnode list size"));
be6c08cb 946 vremovevnodemnt(vp);
5fd012e0
MD
947 vp->v_mount->mnt_nvnodelistsize--;
948 }
949 /*
950 * Insert into list of vnodes for the new mount point, if available.
2ec4b00d 951 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer.
5fd012e0
MD
952 */
953 if ((vp->v_mount = mp) == NULL) {
3b998fa9 954 lwkt_reltoken(&mntvnode_token);
5fd012e0
MD
955 return;
956 }
2ec4b00d
MD
957 if (mp->mnt_syncer) {
958 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes);
959 } else {
960 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
961 }
5fd012e0 962 mp->mnt_nvnodelistsize++;
3b998fa9 963 lwkt_reltoken(&mntvnode_token);
5fd012e0
MD
964}
965
966
967/*
be6c08cb
MD
968 * Scan the vnodes under a mount point and issue appropriate callbacks.
969 *
970 * The fastfunc() callback is called with just the mountlist token held
971 * (no vnode lock). It may not block and the vnode may be undergoing
972 * modifications while the caller is processing it. The vnode will
973 * not be entirely destroyed, however, due to the fact that the mountlist
974 * token is held. A return value < 0 skips to the next vnode without calling
975 * the slowfunc(), a return value > 0 terminates the loop.
976 *
977 * The slowfunc() callback is called after the vnode has been successfully
978 * locked based on passed flags. The vnode is skipped if it gets rearranged
979 * or destroyed while blocking on the lock. A non-zero return value from
980 * the slow function terminates the loop. The slow function is allowed to
981 * arbitrarily block. The scanning code guarentees consistency of operation
982 * even if the slow function deletes or moves the node, or blocks and some
983 * other thread deletes or moves the node.
5fd012e0
MD
984 */
985int
986vmntvnodescan(
987 struct mount *mp,
988 int flags,
989 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
990 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
991 void *data
992) {
be6c08cb 993 struct vmntvnodescan_info info;
5fd012e0
MD
994 struct vnode *vp;
995 int r = 0;
be6c08cb 996 int maxcount = 1000000;
19b97e01 997 int stopcount = 0;
9fe8385f 998 int count = 0;
5fd012e0 999
3b998fa9 1000 lwkt_gettoken(&mntvnode_token);
5fd012e0 1001
19b97e01
MD
1002 /*
1003 * If asked to do one pass stop after iterating available vnodes.
1004 * Under heavy loads new vnodes can be added while we are scanning,
1005 * so this isn't perfect. Create a slop factor of 2x.
1006 */
1007 if (flags & VMSC_ONEPASS)
1008 stopcount = mp->mnt_nvnodelistsize * 2;
1009
be6c08cb
MD
1010 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
1011 TAILQ_INSERT_TAIL(&mntvnodescan_list, &info, entry);
1012 while ((vp = info.vp) != NULL) {
1013 if (--maxcount == 0)
1014 panic("maxcount reached during vmntvnodescan");
1015
2ec4b00d
MD
1016 /*
1017 * Skip if visible but not ready, or special (e.g.
1018 * mp->mnt_syncer)
1019 */
1020 if (vp->v_type == VNON)
be6c08cb 1021 goto next;
5fd012e0
MD
1022 KKASSERT(vp->v_mount == mp);
1023
1024 /*
1025 * Quick test. A negative return continues the loop without
1026 * calling the slow test. 0 continues onto the slow test.
1027 * A positive number aborts the loop.
1028 */
1029 if (fastfunc) {
c66c09cf
MD
1030 if ((r = fastfunc(mp, vp, data)) < 0) {
1031 r = 0;
be6c08cb 1032 goto next;
c66c09cf 1033 }
5fd012e0
MD
1034 if (r)
1035 break;
1036 }
1037
1038 /*
1039 * Get a vxlock on the vnode, retry if it has moved or isn't
1040 * in the mountlist where we expect it.
1041 */
1042 if (slowfunc) {
1043 int error;
1044
19b97e01 1045 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
5fd012e0 1046 case VMSC_GETVP:
87de5057 1047 error = vget(vp, LK_EXCLUSIVE);
5fd012e0
MD
1048 break;
1049 case VMSC_GETVP|VMSC_NOWAIT:
87de5057 1050 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT);
5fd012e0
MD
1051 break;
1052 case VMSC_GETVX:
e3332475
MD
1053 vx_get(vp);
1054 error = 0;
5fd012e0 1055 break;
5fd012e0
MD
1056 default:
1057 error = 0;
1058 break;
1059 }
1060 if (error)
be6c08cb
MD
1061 goto next;
1062 /*
1063 * Do not call the slow function if the vnode is
1064 * invalid or if it was ripped out from under us
1065 * while we (potentially) blocked.
1066 */
1067 if (info.vp == vp && vp->v_type != VNON)
1068 r = slowfunc(mp, vp, data);
1069
1070 /*
1071 * Cleanup
1072 */
19b97e01 1073 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
5fd012e0
MD
1074 case VMSC_GETVP:
1075 case VMSC_GETVP|VMSC_NOWAIT:
1076 vput(vp);
1077 break;
1078 case VMSC_GETVX:
1079 vx_put(vp);
1080 break;
5fd012e0
MD
1081 default:
1082 break;
1083 }
1084 if (r != 0)
1085 break;
1086 }
be6c08cb 1087
9fe8385f
SS
1088next:
1089 /*
1090 * Yield after some processing. Depending on the number
1091 * of vnodes, we might wind up running for a long time.
1092 * Because threads are not preemptable, time critical
1093 * userland processes might starve. Give them a chance
1094 * now and then.
1095 */
1096 if (++count == 10000) {
5a96e837
SS
1097 /* We really want to yield a bit, so we simply sleep a tick */
1098 tsleep(mp, 0, "vnodescn", 1);
9fe8385f
SS
1099 count = 0;
1100 }
1101
be6c08cb 1102 /*
19b97e01
MD
1103 * If doing one pass this decrements to zero. If it starts
1104 * at zero it is effectively unlimited for the purposes of
1105 * this loop.
1106 */
1107 if (--stopcount == 0)
1108 break;
1109
1110 /*
be6c08cb
MD
1111 * Iterate. If the vnode was ripped out from under us
1112 * info.vp will already point to the next vnode, otherwise
1113 * we have to obtain the next valid vnode ourselves.
1114 */
be6c08cb
MD
1115 if (info.vp == vp)
1116 info.vp = TAILQ_NEXT(vp, v_nmntvnodes);
5fd012e0 1117 }
be6c08cb 1118 TAILQ_REMOVE(&mntvnodescan_list, &info, entry);
3b998fa9 1119 lwkt_reltoken(&mntvnode_token);
5fd012e0
MD
1120 return(r);
1121}
1122
1123/*
1124 * Remove any vnodes in the vnode table belonging to mount point mp.
1125 *
1126 * If FORCECLOSE is not specified, there should not be any active ones,
1127 * return error if any are found (nb: this is a user error, not a
1128 * system error). If FORCECLOSE is specified, detach any active vnodes
1129 * that are found.
1130 *
1131 * If WRITECLOSE is set, only flush out regular file vnodes open for
1132 * writing.
1133 *
1134 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
1135 *
1136 * `rootrefs' specifies the base reference count for the root vnode
1137 * of this filesystem. The root vnode is considered busy if its
3c37c940 1138 * v_sysref.refcnt exceeds this value. On a successful return, vflush()
5fd012e0
MD
1139 * will call vrele() on the root vnode exactly rootrefs times.
1140 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
1141 * be zero.
1142 */
1143#ifdef DIAGNOSTIC
1144static int busyprt = 0; /* print out busy vnodes */
1145SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
1146#endif
1147
1148static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
1149
1150struct vflush_info {
1151 int flags;
1152 int busy;
1153 thread_t td;
1154};
1155
1156int
1157vflush(struct mount *mp, int rootrefs, int flags)
1158{
1159 struct thread *td = curthread; /* XXX */
1160 struct vnode *rootvp = NULL;
1161 int error;
1162 struct vflush_info vflush_info;
1163
1164 if (rootrefs > 0) {
1165 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
1166 ("vflush: bad args"));
1167 /*
1168 * Get the filesystem root vnode. We can vput() it
1169 * immediately, since with rootrefs > 0, it won't go away.
1170 */
d9adbeaf
MD
1171 if ((error = VFS_ROOT(mp, &rootvp)) != 0) {
1172 if ((flags & FORCECLOSE) == 0)
1173 return (error);
1174 rootrefs = 0;
1175 /* continue anyway */
1176 }
1177 if (rootrefs)
1178 vput(rootvp);
5fd012e0
MD
1179 }
1180
1181 vflush_info.busy = 0;
1182 vflush_info.flags = flags;
1183 vflush_info.td = td;
1184 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
1185
1186 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
1187 /*
1188 * If just the root vnode is busy, and if its refcount
1189 * is equal to `rootrefs', then go ahead and kill it.
1190 */
1191 KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
3c37c940
MD
1192 KASSERT(rootvp->v_sysref.refcnt >= rootrefs, ("vflush: rootrefs"));
1193 if (vflush_info.busy == 1 && rootvp->v_sysref.refcnt == rootrefs) {
e3332475 1194 vx_lock(rootvp);
3c37c940 1195 vgone_vxlocked(rootvp);
e3332475
MD
1196 vx_unlock(rootvp);
1197 vflush_info.busy = 0;
5fd012e0
MD
1198 }
1199 }
1200 if (vflush_info.busy)
1201 return (EBUSY);
1202 for (; rootrefs > 0; rootrefs--)
1203 vrele(rootvp);
1204 return (0);
1205}
1206
1207/*
1208 * The scan callback is made with an VX locked vnode.
1209 */
1210static int
1211vflush_scan(struct mount *mp, struct vnode *vp, void *data)
1212{
1213 struct vflush_info *info = data;
1214 struct vattr vattr;
1215
1216 /*
1217 * Skip over a vnodes marked VSYSTEM.
1218 */
1219 if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1220 return(0);
1221 }
1222
1223 /*
1224 * If WRITECLOSE is set, flush out unlinked but still open
1225 * files (even if open only for reading) and regular file
1226 * vnodes open for writing.
1227 */
1228 if ((info->flags & WRITECLOSE) &&
1229 (vp->v_type == VNON ||
87de5057 1230 (VOP_GETATTR(vp, &vattr) == 0 &&
5fd012e0
MD
1231 vattr.va_nlink > 0)) &&
1232 (vp->v_writecount == 0 || vp->v_type != VREG)) {
1233 return(0);
1234 }
1235
1236 /*
3c37c940
MD
1237 * If we are the only holder (refcnt of 1) or the vnode is in
1238 * termination (refcnt < 0), we can vgone the vnode.
5fd012e0 1239 */
3c37c940
MD
1240 if (vp->v_sysref.refcnt <= 1) {
1241 vgone_vxlocked(vp);
5fd012e0
MD
1242 return(0);
1243 }
1244
1245 /*
1e49b98c
MD
1246 * If FORCECLOSE is set, forcibly destroy the vnode and then move
1247 * it to a dummymount structure so vop_*() functions don't deref
1248 * a NULL pointer.
5fd012e0
MD
1249 */
1250 if (info->flags & FORCECLOSE) {
1e49b98c 1251 vhold(vp);
aec8eea4 1252 vgone_vxlocked(vp);
1e49b98c
MD
1253 if (vp->v_mount == NULL)
1254 insmntque(vp, &dummymount);
1255 vdrop(vp);
5fd012e0
MD
1256 return(0);
1257 }
1258#ifdef DIAGNOSTIC
1259 if (busyprt)
1260 vprint("vflush: busy vnode", vp);
1261#endif
1262 ++info->busy;
1263 return(0);
1264}
1265
408357d8
MD
1266void
1267add_bio_ops(struct bio_ops *ops)
1268{
1269 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry);
1270}
1271
1272void
1273rem_bio_ops(struct bio_ops *ops)
1274{
1275 TAILQ_REMOVE(&bio_ops_list, ops, entry);
1276}
1277
1278/*
1279 * This calls the bio_ops io_sync function either for a mount point
1280 * or generally.
1281 *
1282 * WARNING: softdeps is weirdly coded and just isn't happy unless
1283 * io_sync is called with a NULL mount from the general syncing code.
1284 */
1285void
1286bio_ops_sync(struct mount *mp)
1287{
1288 struct bio_ops *ops;
1289
1290 if (mp) {
1291 if ((ops = mp->mnt_bioops) != NULL)
1292 ops->io_sync(mp);
1293 } else {
1294 TAILQ_FOREACH(ops, &bio_ops_list, entry) {
1295 ops->io_sync(NULL);
1296 }
1297 }
1298}
1299
5b4cfb7e
AH
1300/*
1301 * Lookup a mount point by nch
1302 */
1303struct mount *
1304mount_get_by_nc(struct namecache *ncp)
1305{
1306 struct mount *mp = NULL;
5b4cfb7e 1307
3b998fa9 1308 lwkt_gettoken(&mountlist_token);
5b4cfb7e
AH
1309 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1310 if (ncp == mp->mnt_ncmountpt.ncp)
1311 break;
1312 }
3b998fa9 1313 lwkt_reltoken(&mountlist_token);
5b4cfb7e
AH
1314 return (mp);
1315}
1316