2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $
70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.78 2006/10/26 02:27:19 dillon Exp $
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/mount.h>
78 #include <sys/vnode.h>
79 #include <sys/malloc.h>
80 #include <sys/sysproto.h>
82 #include <sys/namei.h>
83 #include <sys/nlookup.h>
84 #include <sys/filedesc.h>
85 #include <sys/fnv_hash.h>
86 #include <sys/globaldata.h>
87 #include <sys/kern_syscall.h>
88 #include <sys/dirent.h>
91 #define MAX_RECURSION_DEPTH 64
94 * Random lookups in the cache are accomplished with a hash table using
95 * a hash key of (nc_src_vp, name).
97 * Negative entries may exist and correspond to structures where nc_vp
98 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry
99 * corresponds to a whited-out directory entry (verses simply not finding the
102 * Upon reaching the last segment of a path, if the reference is for DELETE,
103 * or NOCACHE is set (rewrite), and the name is located in the cache, it
108 * Structures associated with name cacheing.
110 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
113 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
115 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */
116 static struct namecache_list ncneglist; /* instead of vnode */
119 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
120 * to create the namecache infrastructure leading to a dangling vnode.
122 * 0 Only errors are reported
123 * 1 Successes are reported
124 * 2 Successes + the whole directory scan is reported
125 * 3 Force the directory scan code run as if the parent vnode did not
126 * have a namecache record, even if it does have one.
128 static int ncvp_debug;
129 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, "");
131 static u_long nchash; /* size of hash table */
132 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
134 static u_long ncnegfactor = 16; /* ratio of negative entries */
135 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
137 static int nclockwarn; /* warn on locked entries in ticks */
138 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, "");
140 static u_long numneg; /* number of cache entries allocated */
141 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
143 static u_long numcache; /* number of cache entries allocated */
144 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
146 static u_long numunres; /* number of unresolved entries */
147 SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, "");
149 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
150 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
152 static int cache_resolve_mp(struct namecache *ncp);
153 static void cache_rehash(struct namecache *ncp);
156 * The new name cache statistics
158 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
159 #define STATNODE(mode, name, var) \
160 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
161 STATNODE(CTLFLAG_RD, numneg, &numneg);
162 STATNODE(CTLFLAG_RD, numcache, &numcache);
163 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
164 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
165 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
166 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
167 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
168 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
169 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
170 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
171 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
172 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
174 struct nchstats nchstats[SMP_MAXCPU];
176 * Export VFS cache effectiveness statistics to user-land.
178 * The statistics are left for aggregation to user-land so
179 * neat things can be achieved, like observing per-CPU cache
183 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
185 struct globaldata *gd;
189 for (i = 0; i < ncpus; ++i) {
190 gd = globaldata_find(i);
191 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
192 sizeof(struct nchstats))))
198 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
199 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
201 static void cache_zap(struct namecache *ncp);
204 * cache_hold() and cache_drop() prevent the premature deletion of a
205 * namecache entry but do not prevent operations (such as zapping) on
206 * that namecache entry.
208 * This routine may only be called from outside this source module if
209 * nc_refs is already at least 1.
211 * This is a rare case where callers are allowed to hold a spinlock,
212 * so we can't ourselves.
216 _cache_hold(struct namecache *ncp)
218 atomic_add_int(&ncp->nc_refs, 1);
223 * When dropping an entry, if only one ref remains and the entry has not
224 * been resolved, zap it. Since the one reference is being dropped the
225 * entry had better not be locked.
229 _cache_drop(struct namecache *ncp)
231 KKASSERT(ncp->nc_refs > 0);
232 if (ncp->nc_refs == 1 &&
233 (ncp->nc_flag & NCF_UNRESOLVED) &&
234 TAILQ_EMPTY(&ncp->nc_list)
236 KKASSERT(ncp->nc_exlocks == 0);
240 atomic_subtract_int(&ncp->nc_refs, 1);
245 * Link a new namecache entry to its parent. Be careful to avoid races
246 * if vhold() blocks in the future.
249 cache_link_parent(struct namecache *ncp, struct namecache *par)
251 KKASSERT(ncp->nc_parent == NULL);
252 ncp->nc_parent = par;
253 if (TAILQ_EMPTY(&par->nc_list)) {
254 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
256 * Any vp associated with an ncp which has children must
257 * be held to prevent it from being recycled.
262 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
267 * Remove the parent association from a namecache structure. If this is
268 * the last child of the parent the cache_drop(par) will attempt to
269 * recursively zap the parent.
272 cache_unlink_parent(struct namecache *ncp)
274 struct namecache *par;
276 if ((par = ncp->nc_parent) != NULL) {
277 ncp->nc_parent = NULL;
278 par = cache_hold(par);
279 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
280 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
287 * Allocate a new namecache structure. Most of the code does not require
288 * zero-termination of the string but it makes vop_compat_ncreate() easier.
290 static struct namecache *
291 cache_alloc(int nlen)
293 struct namecache *ncp;
295 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
297 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK);
299 ncp->nc_flag = NCF_UNRESOLVED;
300 ncp->nc_error = ENOTCONN; /* needs to be resolved */
304 * Construct a fake FSMID based on the time of day and a 32 bit
305 * roller for uniqueness. This is used to generate a useful
306 * FSMID for filesystems which do not support it.
308 ncp->nc_fsmid = cache_getnewfsmid();
309 TAILQ_INIT(&ncp->nc_list);
315 cache_free(struct namecache *ncp)
317 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1);
319 kfree(ncp->nc_name, M_VFSCACHE);
320 kfree(ncp, M_VFSCACHE);
324 * Ref and deref a namecache structure.
326 * Warning: caller may hold an unrelated read spinlock, which means we can't
327 * use read spinlocks here.
330 cache_hold(struct namecache *ncp)
332 return(_cache_hold(ncp));
336 cache_drop(struct namecache *ncp)
342 * Namespace locking. The caller must already hold a reference to the
343 * namecache structure in order to lock/unlock it. This function prevents
344 * the namespace from being created or destroyed by accessors other then
347 * Note that holding a locked namecache structure prevents other threads
348 * from making namespace changes (e.g. deleting or creating), prevents
349 * vnode association state changes by other threads, and prevents the
350 * namecache entry from being resolved or unresolved by other threads.
352 * The lock owner has full authority to associate/disassociate vnodes
353 * and resolve/unresolve the locked ncp.
355 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
356 * or recycled, but it does NOT help you if the vnode had already initiated
357 * a recyclement. If this is important, use cache_get() rather then
358 * cache_lock() (and deal with the differences in the way the refs counter
359 * is handled). Or, alternatively, make an unconditional call to
360 * cache_validate() or cache_resolve() after cache_lock() returns.
363 cache_lock(struct namecache *ncp)
368 KKASSERT(ncp->nc_refs != 0);
373 if (ncp->nc_exlocks == 0) {
377 * The vp associated with a locked ncp must be held
378 * to prevent it from being recycled (which would
379 * cause the ncp to become unresolved).
381 * WARNING! If VRECLAIMED is set the vnode could
382 * already be in the middle of a recycle. Callers
383 * should not assume that nc_vp is usable when
384 * not NULL. cache_vref() or cache_vget() must be
387 * XXX loop on race for later MPSAFE work.
393 if (ncp->nc_locktd == td) {
397 ncp->nc_flag |= NCF_LOCKREQ;
398 if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) {
402 printf("[diagnostic] cache_lock: blocked on %p", ncp);
403 if ((ncp->nc_flag & NCF_MOUNTPT) && ncp->nc_mount)
404 printf(" [MOUNTFROM %s]\n", ncp->nc_mount->mnt_stat.f_mntfromname);
406 printf(" \"%*.*s\"\n",
407 ncp->nc_nlen, ncp->nc_nlen,
413 printf("[diagnostic] cache_lock: unblocked %*.*s\n",
414 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
419 cache_lock_nonblock(struct namecache *ncp)
423 KKASSERT(ncp->nc_refs != 0);
425 if (ncp->nc_exlocks == 0) {
429 * The vp associated with a locked ncp must be held
430 * to prevent it from being recycled (which would
431 * cause the ncp to become unresolved).
433 * WARNING! If VRECLAIMED is set the vnode could
434 * already be in the middle of a recycle. Callers
435 * should not assume that nc_vp is usable when
436 * not NULL. cache_vref() or cache_vget() must be
439 * XXX loop on race for later MPSAFE work.
450 cache_unlock(struct namecache *ncp)
452 thread_t td = curthread;
454 KKASSERT(ncp->nc_refs > 0);
455 KKASSERT(ncp->nc_exlocks > 0);
456 KKASSERT(ncp->nc_locktd == td);
457 if (--ncp->nc_exlocks == 0) {
460 ncp->nc_locktd = NULL;
461 if (ncp->nc_flag & NCF_LOCKREQ) {
462 ncp->nc_flag &= ~NCF_LOCKREQ;
469 * ref-and-lock, unlock-and-deref functions.
471 * This function is primarily used by nlookup. Even though cache_lock
472 * holds the vnode, it is possible that the vnode may have already
473 * initiated a recyclement. We want cache_get() to return a definitively
474 * usable vnode or a definitively unresolved ncp.
477 cache_get(struct namecache *ncp)
481 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
482 cache_setunresolved(ncp);
487 cache_get_nonblock(struct namecache *ncp)
490 if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) {
493 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
494 cache_setunresolved(ncp);
501 cache_put(struct namecache *ncp)
508 * Resolve an unresolved ncp by associating a vnode with it. If the
509 * vnode is NULL, a negative cache entry is created.
511 * The ncp should be locked on entry and will remain locked on return.
514 cache_setvp(struct namecache *ncp, struct vnode *vp)
516 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
520 * Any vp associated with an ncp which has children must
521 * be held. Any vp associated with a locked ncp must be held.
523 if (!TAILQ_EMPTY(&ncp->nc_list))
525 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
530 * Set auxillary flags
534 ncp->nc_flag |= NCF_ISDIR;
537 ncp->nc_flag |= NCF_ISSYMLINK;
538 /* XXX cache the contents of the symlink */
546 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
548 ncp->nc_error = ENOENT;
550 ncp->nc_flag &= ~NCF_UNRESOLVED;
554 cache_settimeout(struct namecache *ncp, int nticks)
556 if ((ncp->nc_timeout = ticks + nticks) == 0)
561 * Disassociate the vnode or negative-cache association and mark a
562 * namecache entry as unresolved again. Note that the ncp is still
563 * left in the hash table and still linked to its parent.
565 * The ncp should be locked and refd on entry and will remain locked and refd
568 * This routine is normally never called on a directory containing children.
569 * However, NFS often does just that in its rename() code as a cop-out to
570 * avoid complex namespace operations. This disconnects a directory vnode
571 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
574 * NOTE: NCF_FSMID must be cleared so a refurbishment of the ncp, such as
575 * in a create, properly propogates flag up the chain.
578 cache_setunresolved(struct namecache *ncp)
582 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
583 ncp->nc_flag |= NCF_UNRESOLVED;
585 ncp->nc_error = ENOTCONN;
587 if ((vp = ncp->nc_vp) != NULL) {
590 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
593 * Any vp associated with an ncp with children is
594 * held by that ncp. Any vp associated with a locked
595 * ncp is held by that ncp. These conditions must be
596 * undone when the vp is cleared out from the ncp.
598 if (ncp->nc_flag & NCF_FSMID)
600 if (!TAILQ_EMPTY(&ncp->nc_list))
605 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
608 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK|
614 * Mark the namecache node as containing a mount point.
616 * XXX called with a ref'd but unlocked ncp.
619 cache_setmountpt(struct namecache *ncp, struct mount *mp)
622 ncp->nc_flag |= NCF_MOUNTPT;
623 ncp->nc_parent->nc_flag |= NCF_MOUNTEDHERE;
627 * Clean up a mount point in the namecache topology after an unmount.
629 * XXX we probably need to traverse the entire topology and clear
630 * the nc_mount pointer.
633 cache_clrmountpt(struct namecache *ncp)
636 ncp->nc_parent->nc_flag &= ~NCF_MOUNTEDHERE;
637 ncp->nc_mount = NULL;
641 * Invalidate portions of the namecache topology given a starting entry.
642 * The passed ncp is set to an unresolved state and:
644 * The passed ncp must be locked.
646 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
647 * that the physical underlying nodes have been
648 * destroyed... as in deleted. For example, when
649 * a directory is removed. This will cause record
650 * lookups on the name to no longer be able to find
651 * the record and tells the resolver to return failure
652 * rather then trying to resolve through the parent.
654 * The topology itself, including ncp->nc_name,
657 * This only applies to the passed ncp, if CINV_CHILDREN
658 * is specified the children are not flagged.
660 * CINV_CHILDREN - Set all children (recursively) to an unresolved
663 * Note that this will also have the side effect of
664 * cleaning out any unreferenced nodes in the topology
665 * from the leaves up as the recursion backs out.
667 * Note that the topology for any referenced nodes remains intact.
669 * It is possible for cache_inval() to race a cache_resolve(), meaning that
670 * the namecache entry may not actually be invalidated on return if it was
671 * revalidated while recursing down into its children. This code guarentees
672 * that the node(s) will go through an invalidation cycle, but does not
673 * guarentee that they will remain in an invalidated state.
675 * Returns non-zero if a revalidation was detected during the invalidation
676 * recursion, zero otherwise. Note that since only the original ncp is
677 * locked the revalidation ultimately can only indicate that the original ncp
678 * *MIGHT* no have been reresolved.
680 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
681 * have to avoid blowing out the kernel stack. We do this by saving the
682 * deep namecache node and aborting the recursion, then re-recursing at that
683 * node using a depth-first algorithm in order to allow multiple deep
684 * recursions to chain through each other, then we restart the invalidation
689 struct namecache *resume_ncp;
693 static int cache_inval_internal(struct namecache *, int, struct cinvtrack *);
696 cache_inval(struct namecache *ncp, int flags)
698 struct cinvtrack track;
699 struct namecache *ncp2;
703 track.resume_ncp = NULL;
706 r = cache_inval_internal(ncp, flags, &track);
707 if (track.resume_ncp == NULL)
709 printf("Warning: deep namecache recursion at %s\n",
712 while ((ncp2 = track.resume_ncp) != NULL) {
713 track.resume_ncp = NULL;
715 cache_inval_internal(ncp2, flags & ~CINV_DESTROY,
725 cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track)
727 struct namecache *kid;
728 struct namecache *nextkid;
731 KKASSERT(ncp->nc_exlocks);
733 cache_setunresolved(ncp);
734 if (flags & CINV_DESTROY)
735 ncp->nc_flag |= NCF_DESTROYED;
737 if ((flags & CINV_CHILDREN) &&
738 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL
740 if (++track->depth > MAX_RECURSION_DEPTH) {
741 track->resume_ncp = ncp;
748 if (track->resume_ncp) {
752 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
754 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
755 TAILQ_FIRST(&kid->nc_list)
758 rcnt += cache_inval_internal(kid, flags & ~CINV_DESTROY, track);
769 * Someone could have gotten in there while ncp was unlocked,
772 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
778 * Invalidate a vnode's namecache associations. To avoid races against
779 * the resolver we do not invalidate a node which we previously invalidated
780 * but which was then re-resolved while we were in the invalidation loop.
782 * Returns non-zero if any namecache entries remain after the invalidation
785 * NOTE: unlike the namecache topology which guarentees that ncp's will not
786 * be ripped out of the topology while held, the vnode's v_namecache list
787 * has no such restriction. NCP's can be ripped out of the list at virtually
788 * any time if not locked, even if held.
791 cache_inval_vp(struct vnode *vp, int flags)
793 struct namecache *ncp;
794 struct namecache *next;
797 ncp = TAILQ_FIRST(&vp->v_namecache);
801 /* loop entered with ncp held */
802 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
805 if (ncp->nc_vp != vp) {
806 printf("Warning: cache_inval_vp: race-A detected on "
807 "%s\n", ncp->nc_name);
813 cache_inval(ncp, flags);
814 cache_put(ncp); /* also releases reference */
816 if (ncp && ncp->nc_vp != vp) {
817 printf("Warning: cache_inval_vp: race-B detected on "
818 "%s\n", ncp->nc_name);
823 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
827 * The source ncp has been renamed to the target ncp. Both fncp and tncp
828 * must be locked. Both will be set to unresolved, any children of tncp
829 * will be disconnected (the prior contents of the target is assumed to be
830 * destroyed by the rename operation, e.g. renaming over an empty directory),
831 * and all children of fncp will be moved to tncp.
833 * XXX the disconnection could pose a problem, check code paths to make
834 * sure any code that blocks can handle the parent being changed out from
835 * under it. Maybe we should lock the children (watch out for deadlocks) ?
837 * After we return the caller has the option of calling cache_setvp() if
838 * the vnode of the new target ncp is known.
840 * Any process CD'd into any of the children will no longer be able to ".."
841 * back out. An rm -rf can cause this situation to occur.
844 cache_rename(struct namecache *fncp, struct namecache *tncp)
846 struct namecache *scan;
849 cache_setunresolved(fncp);
850 cache_setunresolved(tncp);
851 while (cache_inval(tncp, CINV_CHILDREN) != 0) {
852 if (didwarn++ % 10 == 0) {
853 printf("Warning: cache_rename: race during "
855 fncp->nc_name, tncp->nc_name);
857 tsleep(tncp, 0, "mvrace", hz / 10);
858 cache_setunresolved(tncp);
860 while ((scan = TAILQ_FIRST(&fncp->nc_list)) != NULL) {
862 cache_unlink_parent(scan);
863 cache_link_parent(scan, tncp);
864 if (scan->nc_flag & NCF_HASHED)
871 * vget the vnode associated with the namecache entry. Resolve the namecache
872 * entry if necessary and deal with namecache/vp races. The passed ncp must
873 * be referenced and may be locked. The ncp's ref/locking state is not
874 * effected by this call.
876 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
877 * (depending on the passed lk_type) will be returned in *vpp with an error
878 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
879 * most typical error is ENOENT, meaning that the ncp represents a negative
880 * cache hit and there is no vnode to retrieve, but other errors can occur
883 * The main race we have to deal with are namecache zaps. The ncp itself
884 * will not disappear since it is referenced, and it turns out that the
885 * validity of the vp pointer can be checked simply by rechecking the
886 * contents of ncp->nc_vp.
889 cache_vget(struct namecache *ncp, struct ucred *cred,
890 int lk_type, struct vnode **vpp)
897 if (ncp->nc_flag & NCF_UNRESOLVED) {
899 error = cache_resolve(ncp, cred);
904 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
906 * Accessing the vnode from the namecache is a bit
907 * dangerous. Because there are no refs on the vnode, it
908 * could be in the middle of a reclaim.
910 if (vp->v_flag & VRECLAIMED) {
911 printf("Warning: vnode reclaim race detected in cache_vget on %p (%s)\n", vp, ncp->nc_name);
913 cache_setunresolved(ncp);
917 error = vget(vp, lk_type);
919 if (vp != ncp->nc_vp)
922 } else if (vp != ncp->nc_vp) {
925 } else if (vp->v_flag & VRECLAIMED) {
926 panic("vget succeeded on a VRECLAIMED node! vp %p", vp);
929 if (error == 0 && vp == NULL)
936 cache_vref(struct namecache *ncp, struct ucred *cred, struct vnode **vpp)
943 if (ncp->nc_flag & NCF_UNRESOLVED) {
945 error = cache_resolve(ncp, cred);
950 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
952 * Since we did not obtain any locks, a cache zap
953 * race can occur here if the vnode is in the middle
954 * of being reclaimed and has not yet been able to
955 * clean out its cache node. If that case occurs,
956 * we must lock and unresolve the cache, then loop
959 if (vp->v_flag & VRECLAIMED) {
960 printf("Warning: vnode reclaim race detected on cache_vref %p (%s)\n", vp, ncp->nc_name);
962 cache_setunresolved(ncp);
968 if (error == 0 && vp == NULL)
975 * Recursively set the FSMID update flag for namecache nodes leading
976 * to root. This will cause the next getattr or reclaim to increment the
977 * fsmid and mark the inode for lazy updating.
979 * Stop recursing when we hit a node whos NCF_FSMID flag is already set.
980 * This makes FSMIDs work in an Einsteinian fashion - where the observation
981 * effects the result. In this case a program monitoring a higher level
982 * node will have detected some prior change and started its scan (clearing
983 * NCF_FSMID in higher level nodes), but since it has not yet observed the
984 * node where we find NCF_FSMID still set, we can safely make the related
985 * modification without interfering with the theorized program.
987 * This also means that FSMIDs cannot represent time-domain quantities
988 * in a hierarchical sense. But the main reason for doing it this way
989 * is to reduce the amount of recursion that occurs in the critical path
990 * when e.g. a program is writing to a file that sits deep in a directory
994 cache_update_fsmid(struct namecache *ncp)
997 struct namecache *scan;
1000 * Warning: even if we get a non-NULL vp it could still be in the
1001 * middle of a recyclement. Don't do anything fancy, just set
1004 if ((vp = ncp->nc_vp) != NULL) {
1005 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
1006 for (scan = ncp; scan; scan = scan->nc_parent) {
1007 if (scan->nc_flag & NCF_FSMID)
1009 scan->nc_flag |= NCF_FSMID;
1013 while (ncp && (ncp->nc_flag & NCF_FSMID) == 0) {
1014 ncp->nc_flag |= NCF_FSMID;
1015 ncp = ncp->nc_parent;
1021 cache_update_fsmid_vp(struct vnode *vp)
1023 struct namecache *ncp;
1024 struct namecache *scan;
1026 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
1027 for (scan = ncp; scan; scan = scan->nc_parent) {
1028 if (scan->nc_flag & NCF_FSMID)
1030 scan->nc_flag |= NCF_FSMID;
1036 * If getattr is called on a vnode (e.g. a stat call), the filesystem
1037 * may call this routine to determine if the namecache has the hierarchical
1038 * change flag set, requiring the fsmid to be updated.
1040 * Since 0 indicates no support, make sure the filesystem fsmid is at least
1044 cache_check_fsmid_vp(struct vnode *vp, int64_t *fsmid)
1046 struct namecache *ncp;
1049 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
1050 if (ncp->nc_flag & NCF_FSMID) {
1051 ncp->nc_flag &= ~NCF_FSMID;
1063 * Obtain the FSMID for a vnode for filesystems which do not support
1067 cache_sync_fsmid_vp(struct vnode *vp)
1069 struct namecache *ncp;
1071 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) {
1072 if (ncp->nc_flag & NCF_FSMID) {
1073 ncp->nc_flag &= ~NCF_FSMID;
1076 return(ncp->nc_fsmid);
1082 * Convert a directory vnode to a namecache record without any other
1083 * knowledge of the topology. This ONLY works with directory vnodes and
1084 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
1085 * returned ncp (if not NULL) will be held and unlocked.
1087 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
1088 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
1089 * for dvp. This will fail only if the directory has been deleted out from
1092 * Callers must always check for a NULL return no matter the value of 'makeit'.
1094 * To avoid underflowing the kernel stack each recursive call increments
1095 * the makeit variable.
1098 static int cache_inefficient_scan(struct namecache *ncp, struct ucred *cred,
1100 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1101 struct vnode **saved_dvp);
1104 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit)
1106 struct namecache *ncp;
1107 struct vnode *saved_dvp;
1115 * Temporary debugging code to force the directory scanning code
1118 if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) {
1119 ncp = TAILQ_FIRST(&dvp->v_namecache);
1120 printf("cache_fromdvp: forcing %s\n", ncp->nc_name);
1125 * Loop until resolution, inside code will break out on error.
1127 while ((ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) {
1130 * If dvp is the root of its filesystem it should already
1131 * have a namecache pointer associated with it as a side
1132 * effect of the mount, but it may have been disassociated.
1134 if (dvp->v_flag & VROOT) {
1135 ncp = cache_get(dvp->v_mount->mnt_ncp);
1136 error = cache_resolve_mp(ncp);
1139 printf("cache_fromdvp: resolve root of mount %p error %d",
1140 dvp->v_mount, error);
1144 printf(" failed\n");
1149 printf(" succeeded\n");
1154 * If we are recursed too deeply resort to an O(n^2)
1155 * algorithm to resolve the namecache topology. The
1156 * resolved pvp is left referenced in saved_dvp to
1157 * prevent the tree from being destroyed while we loop.
1160 error = cache_fromdvp_try(dvp, cred, &saved_dvp);
1162 printf("lookupdotdot(longpath) failed %d "
1163 "dvp %p\n", error, dvp);
1170 * Get the parent directory and resolve its ncp.
1172 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred);
1174 printf("lookupdotdot failed %d dvp %p\n", error, dvp);
1180 * Reuse makeit as a recursion depth counter.
1182 ncp = cache_fromdvp(pvp, cred, makeit + 1);
1188 * Do an inefficient scan of pvp (embodied by ncp) to look
1189 * for dvp. This will create a namecache record for dvp on
1190 * success. We loop up to recheck on success.
1192 * ncp and dvp are both held but not locked.
1194 error = cache_inefficient_scan(ncp, cred, dvp);
1197 printf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
1198 pvp, ncp->nc_name, dvp);
1203 printf("cache_fromdvp: scan %p (%s) succeeded\n",
1215 * Go up the chain of parent directories until we find something
1216 * we can resolve into the namecache. This is very inefficient.
1220 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1221 struct vnode **saved_dvp)
1223 struct namecache *ncp;
1226 static time_t last_fromdvp_report;
1229 * Loop getting the parent directory vnode until we get something we
1230 * can resolve in the namecache.
1234 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred);
1240 if ((ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
1245 if (pvp->v_flag & VROOT) {
1246 ncp = cache_get(pvp->v_mount->mnt_ncp);
1247 error = cache_resolve_mp(ncp);
1260 if (last_fromdvp_report != time_second) {
1261 last_fromdvp_report = time_second;
1262 printf("Warning: extremely inefficient path resolution on %s\n",
1265 error = cache_inefficient_scan(ncp, cred, dvp);
1268 * Hopefully dvp now has a namecache record associated with it.
1269 * Leave it referenced to prevent the kernel from recycling the
1270 * vnode. Otherwise extremely long directory paths could result
1271 * in endless recycling.
1281 * Do an inefficient scan of the directory represented by ncp looking for
1282 * the directory vnode dvp. ncp must be held but not locked on entry and
1283 * will be held on return. dvp must be refd but not locked on entry and
1284 * will remain refd on return.
1286 * Why do this at all? Well, due to its stateless nature the NFS server
1287 * converts file handles directly to vnodes without necessarily going through
1288 * the namecache ops that would otherwise create the namecache topology
1289 * leading to the vnode. We could either (1) Change the namecache algorithms
1290 * to allow disconnect namecache records that are re-merged opportunistically,
1291 * or (2) Make the NFS server backtrack and scan to recover a connected
1292 * namecache topology in order to then be able to issue new API lookups.
1294 * It turns out that (1) is a huge mess. It takes a nice clean set of
1295 * namecache algorithms and introduces a lot of complication in every subsystem
1296 * that calls into the namecache to deal with the re-merge case, especially
1297 * since we are using the namecache to placehold negative lookups and the
1298 * vnode might not be immediately assigned. (2) is certainly far less
1299 * efficient then (1), but since we are only talking about directories here
1300 * (which are likely to remain cached), the case does not actually run all
1301 * that often and has the supreme advantage of not polluting the namecache
1305 cache_inefficient_scan(struct namecache *ncp, struct ucred *cred,
1308 struct nlcomponent nlc;
1309 struct namecache *rncp;
1321 vat.va_blocksize = 0;
1322 if ((error = VOP_GETATTR(dvp, &vat)) != 0)
1324 if ((error = cache_vref(ncp, cred, &pvp)) != 0)
1327 printf("inefficient_scan: directory iosize %ld vattr fileid = %ld\n", vat.va_blocksize, (long)vat.va_fileid);
1328 if ((blksize = vat.va_blocksize) == 0)
1329 blksize = DEV_BSIZE;
1330 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK);
1336 iov.iov_base = rbuf;
1337 iov.iov_len = blksize;
1340 uio.uio_resid = blksize;
1341 uio.uio_segflg = UIO_SYSSPACE;
1342 uio.uio_rw = UIO_READ;
1343 uio.uio_td = curthread;
1345 if (ncvp_debug >= 2)
1346 printf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
1347 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
1349 den = (struct dirent *)rbuf;
1350 bytes = blksize - uio.uio_resid;
1353 if (ncvp_debug >= 2) {
1354 printf("cache_inefficient_scan: %*.*s\n",
1355 den->d_namlen, den->d_namlen,
1358 if (den->d_type != DT_WHT &&
1359 den->d_ino == vat.va_fileid) {
1361 printf("cache_inefficient_scan: "
1362 "MATCHED inode %ld path %s/%*.*s\n",
1363 vat.va_fileid, ncp->nc_name,
1364 den->d_namlen, den->d_namlen,
1367 nlc.nlc_nameptr = den->d_name;
1368 nlc.nlc_namelen = den->d_namlen;
1369 rncp = cache_nlookup(ncp, &nlc);
1370 KKASSERT(rncp != NULL);
1373 bytes -= _DIRENT_DIRSIZ(den);
1374 den = _DIRENT_NEXT(den);
1376 if (rncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
1381 if (rncp->nc_flag & NCF_UNRESOLVED) {
1382 cache_setvp(rncp, dvp);
1383 if (ncvp_debug >= 2) {
1384 printf("cache_inefficient_scan: setvp %s/%s = %p\n",
1385 ncp->nc_name, rncp->nc_name, dvp);
1388 if (ncvp_debug >= 2) {
1389 printf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
1390 ncp->nc_name, rncp->nc_name, dvp,
1394 if (rncp->nc_vp == NULL)
1395 error = rncp->nc_error;
1398 printf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
1402 kfree(rbuf, M_TEMP);
1407 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
1408 * state, which disassociates it from its vnode or ncneglist.
1410 * Then, if there are no additional references to the ncp and no children,
1411 * the ncp is removed from the topology and destroyed. This function will
1412 * also run through the nc_parent chain and destroy parent ncps if possible.
1413 * As a side benefit, it turns out the only conditions that allow running
1414 * up the chain are also the conditions to ensure no deadlock will occur.
1416 * References and/or children may exist if the ncp is in the middle of the
1417 * topology, preventing the ncp from being destroyed.
1419 * This function must be called with the ncp held and locked and will unlock
1420 * and drop it during zapping.
1423 cache_zap(struct namecache *ncp)
1425 struct namecache *par;
1428 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
1430 cache_setunresolved(ncp);
1433 * Try to scrap the entry and possibly tail-recurse on its parent.
1434 * We only scrap unref'd (other then our ref) unresolved entries,
1435 * we do not scrap 'live' entries.
1437 while (ncp->nc_flag & NCF_UNRESOLVED) {
1439 * Someone other then us has a ref, stop.
1441 if (ncp->nc_refs > 1)
1445 * We have children, stop.
1447 if (!TAILQ_EMPTY(&ncp->nc_list))
1451 * Remove ncp from the topology: hash table and parent linkage.
1453 if (ncp->nc_flag & NCF_HASHED) {
1454 ncp->nc_flag &= ~NCF_HASHED;
1455 LIST_REMOVE(ncp, nc_hash);
1457 if ((par = ncp->nc_parent) != NULL) {
1458 par = cache_hold(par);
1459 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
1460 ncp->nc_parent = NULL;
1461 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
1466 * ncp should not have picked up any refs. Physically
1469 KKASSERT(ncp->nc_refs == 1);
1471 /* cache_unlock(ncp) not required */
1472 ncp->nc_refs = -1; /* safety */
1474 kfree(ncp->nc_name, M_VFSCACHE);
1475 kfree(ncp, M_VFSCACHE);
1478 * Loop on the parent (it may be NULL). Only bother looping
1479 * if the parent has a single ref (ours), which also means
1480 * we can lock it trivially.
1485 if (ncp->nc_refs != 1) {
1489 KKASSERT(par->nc_exlocks == 0);
1494 atomic_subtract_int(&ncp->nc_refs, 1);
1497 static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW;
1501 cache_hysteresis(void)
1504 * Don't cache too many negative hits. We use hysteresis to reduce
1505 * the impact on the critical path.
1507 switch(cache_hysteresis_state) {
1509 if (numneg > MINNEG && numneg * ncnegfactor > numcache) {
1511 cache_hysteresis_state = CHI_HIGH;
1515 if (numneg > MINNEG * 9 / 10 &&
1516 numneg * ncnegfactor * 9 / 10 > numcache
1520 cache_hysteresis_state = CHI_LOW;
1527 * NEW NAMECACHE LOOKUP API
1529 * Lookup an entry in the cache. A locked, referenced, non-NULL
1530 * entry is *always* returned, even if the supplied component is illegal.
1531 * The resulting namecache entry should be returned to the system with
1532 * cache_put() or cache_unlock() + cache_drop().
1534 * namecache locks are recursive but care must be taken to avoid lock order
1537 * Nobody else will be able to manipulate the associated namespace (e.g.
1538 * create, delete, rename, rename-target) until the caller unlocks the
1541 * The returned entry will be in one of three states: positive hit (non-null
1542 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
1543 * Unresolved entries must be resolved through the filesystem to associate the
1544 * vnode and/or determine whether a positive or negative hit has occured.
1546 * It is not necessary to lock a directory in order to lock namespace under
1547 * that directory. In fact, it is explicitly not allowed to do that. A
1548 * directory is typically only locked when being created, renamed, or
1551 * The directory (par) may be unresolved, in which case any returned child
1552 * will likely also be marked unresolved. Likely but not guarenteed. Since
1553 * the filesystem lookup requires a resolved directory vnode the caller is
1554 * responsible for resolving the namecache chain top-down. This API
1555 * specifically allows whole chains to be created in an unresolved state.
1558 cache_nlookup(struct namecache *par, struct nlcomponent *nlc)
1560 struct namecache *ncp;
1561 struct namecache *new_ncp;
1562 struct nchashhead *nchpp;
1570 * Try to locate an existing entry
1572 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
1573 hash = fnv_32_buf(&par, sizeof(par), hash);
1576 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1580 * Zap entries that have timed out.
1582 if (ncp->nc_timeout &&
1583 (int)(ncp->nc_timeout - ticks) < 0 &&
1584 (ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
1585 ncp->nc_exlocks == 0
1587 cache_zap(cache_get(ncp));
1592 * Break out if we find a matching entry. Note that
1593 * UNRESOLVED entries may match, but DESTROYED entries
1596 if (ncp->nc_parent == par &&
1597 ncp->nc_nlen == nlc->nlc_namelen &&
1598 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
1599 (ncp->nc_flag & NCF_DESTROYED) == 0
1601 if (cache_get_nonblock(ncp) == 0) {
1603 cache_free(new_ncp);
1613 * We failed to locate an entry, create a new entry and add it to
1614 * the cache. We have to relookup after possibly blocking in
1617 if (new_ncp == NULL) {
1618 new_ncp = cache_alloc(nlc->nlc_namelen);
1625 * Initialize as a new UNRESOLVED entry, lock (non-blocking),
1626 * and link to the parent. The mount point is usually inherited
1627 * from the parent unless this is a special case such as a mount
1628 * point where nlc_namelen is 0. The caller is responsible for
1629 * setting nc_mount in that case. If nlc_namelen is 0 nc_name will
1632 if (nlc->nlc_namelen) {
1633 bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen);
1634 ncp->nc_name[nlc->nlc_namelen] = 0;
1635 ncp->nc_mount = par->nc_mount;
1637 nchpp = NCHHASH(hash);
1638 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
1639 ncp->nc_flag |= NCF_HASHED;
1640 cache_link_parent(ncp, par);
1643 * stats and namecache size management
1645 if (ncp->nc_flag & NCF_UNRESOLVED)
1646 ++gd->gd_nchstats->ncs_miss;
1647 else if (ncp->nc_vp)
1648 ++gd->gd_nchstats->ncs_goodhits;
1650 ++gd->gd_nchstats->ncs_neghits;
1656 * Locate the mount point under a namecache entry. We locate a special
1657 * child ncp with a 0-length name and retrieve the mount point from it.
1660 cache_findmount(struct namecache *par)
1662 struct namecache *ncp;
1665 hash = FNV1_32_INIT; /* special 0-length name */
1666 hash = fnv_32_buf(&par, sizeof(par), hash);
1667 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1668 if (ncp->nc_nlen == 0 && (ncp->nc_flag & NCF_MOUNTPT))
1669 return(ncp->nc_mount);
1675 * Given a locked ncp, validate that the vnode, if present, is actually
1676 * usable. If it is not usable set the ncp to an unresolved state.
1679 cache_validate(struct namecache *ncp)
1681 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1682 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1683 cache_setunresolved(ncp);
1688 * Resolve an unresolved namecache entry, generally by looking it up.
1689 * The passed ncp must be locked and refd.
1691 * Theoretically since a vnode cannot be recycled while held, and since
1692 * the nc_parent chain holds its vnode as long as children exist, the
1693 * direct parent of the cache entry we are trying to resolve should
1694 * have a valid vnode. If not then generate an error that we can
1695 * determine is related to a resolver bug.
1697 * However, if a vnode was in the middle of a recyclement when the NCP
1698 * got locked, ncp->nc_vp might point to a vnode that is about to become
1699 * invalid. cache_resolve() handles this case by unresolving the entry
1700 * and then re-resolving it.
1702 * Note that successful resolution does not necessarily return an error
1703 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
1707 cache_resolve(struct namecache *ncp, struct ucred *cred)
1709 struct namecache *par;
1714 * If the ncp is already resolved we have nothing to do. However,
1715 * we do want to guarentee that a usable vnode is returned when
1716 * a vnode is present, so make sure it hasn't been reclaimed.
1718 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1719 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1720 cache_setunresolved(ncp);
1721 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1722 return (ncp->nc_error);
1726 * Mount points need special handling because the parent does not
1727 * belong to the same filesystem as the ncp.
1729 if (ncp->nc_flag & NCF_MOUNTPT)
1730 return (cache_resolve_mp(ncp));
1733 * We expect an unbroken chain of ncps to at least the mount point,
1734 * and even all the way to root (but this code doesn't have to go
1735 * past the mount point).
1737 if (ncp->nc_parent == NULL) {
1738 printf("EXDEV case 1 %p %*.*s\n", ncp,
1739 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
1740 ncp->nc_error = EXDEV;
1741 return(ncp->nc_error);
1745 * The vp's of the parent directories in the chain are held via vhold()
1746 * due to the existance of the child, and should not disappear.
1747 * However, there are cases where they can disappear:
1749 * - due to filesystem I/O errors.
1750 * - due to NFS being stupid about tracking the namespace and
1751 * destroys the namespace for entire directories quite often.
1752 * - due to forced unmounts.
1753 * - due to an rmdir (parent will be marked DESTROYED)
1755 * When this occurs we have to track the chain backwards and resolve
1756 * it, looping until the resolver catches up to the current node. We
1757 * could recurse here but we might run ourselves out of kernel stack
1758 * so we do it in a more painful manner. This situation really should
1759 * not occur all that often, or if it does not have to go back too
1760 * many nodes to resolve the ncp.
1762 while (ncp->nc_parent->nc_vp == NULL) {
1764 * This case can occur if a process is CD'd into a
1765 * directory which is then rmdir'd. If the parent is marked
1766 * destroyed there is no point trying to resolve it.
1768 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
1771 par = ncp->nc_parent;
1772 while (par->nc_parent && par->nc_parent->nc_vp == NULL)
1773 par = par->nc_parent;
1774 if (par->nc_parent == NULL) {
1775 printf("EXDEV case 2 %*.*s\n",
1776 par->nc_nlen, par->nc_nlen, par->nc_name);
1779 printf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
1780 par->nc_nlen, par->nc_nlen, par->nc_name);
1782 * The parent is not set in stone, ref and lock it to prevent
1783 * it from disappearing. Also note that due to renames it
1784 * is possible for our ncp to move and for par to no longer
1785 * be one of its parents. We resolve it anyway, the loop
1786 * will handle any moves.
1789 if (par->nc_flag & NCF_MOUNTPT) {
1790 cache_resolve_mp(par);
1791 } else if (par->nc_parent->nc_vp == NULL) {
1792 printf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
1795 } else if (par->nc_flag & NCF_UNRESOLVED) {
1796 par->nc_error = VOP_NRESOLVE(par, cred);
1798 if ((error = par->nc_error) != 0) {
1799 if (par->nc_error != EAGAIN) {
1800 printf("EXDEV case 3 %*.*s error %d\n",
1801 par->nc_nlen, par->nc_nlen, par->nc_name,
1806 printf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
1807 par, par->nc_nlen, par->nc_nlen, par->nc_name);
1814 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
1815 * ncp's and reattach them. If this occurs the original ncp is marked
1816 * EAGAIN to force a relookup.
1818 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
1819 * ncp must already be resolved.
1821 KKASSERT((ncp->nc_flag & NCF_MOUNTPT) == 0);
1822 ncp->nc_error = VOP_NRESOLVE(ncp, cred);
1823 /*vop_nresolve(*ncp->nc_parent->nc_vp->v_ops, ncp, cred);*/
1824 if (ncp->nc_error == EAGAIN) {
1825 printf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
1826 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
1829 return(ncp->nc_error);
1833 * Resolve the ncp associated with a mount point. Such ncp's almost always
1834 * remain resolved and this routine is rarely called. NFS MPs tends to force
1835 * re-resolution more often due to its mac-truck-smash-the-namecache
1836 * method of tracking namespace changes.
1838 * The semantics for this call is that the passed ncp must be locked on
1839 * entry and will be locked on return. However, if we actually have to
1840 * resolve the mount point we temporarily unlock the entry in order to
1841 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
1842 * the unlock we have to recheck the flags after we relock.
1845 cache_resolve_mp(struct namecache *ncp)
1848 struct mount *mp = ncp->nc_mount;
1851 KKASSERT(mp != NULL);
1854 * If the ncp is already resolved we have nothing to do. However,
1855 * we do want to guarentee that a usable vnode is returned when
1856 * a vnode is present, so make sure it hasn't been reclaimed.
1858 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1859 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1860 cache_setunresolved(ncp);
1863 if (ncp->nc_flag & NCF_UNRESOLVED) {
1865 while (vfs_busy(mp, 0))
1867 error = VFS_ROOT(mp, &vp);
1871 * recheck the ncp state after relocking.
1873 if (ncp->nc_flag & NCF_UNRESOLVED) {
1874 ncp->nc_error = error;
1876 cache_setvp(ncp, vp);
1879 printf("[diagnostic] cache_resolve_mp: failed to resolve mount %p\n", mp);
1880 cache_setvp(ncp, NULL);
1882 } else if (error == 0) {
1887 return(ncp->nc_error);
1891 cache_cleanneg(int count)
1893 struct namecache *ncp;
1896 * Automode from the vnlru proc - clean out 10% of the negative cache
1900 count = numneg / 10 + 1;
1903 * Attempt to clean out the specified number of negative cache
1907 ncp = TAILQ_FIRST(&ncneglist);
1909 KKASSERT(numneg == 0);
1912 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
1913 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
1914 if (cache_get_nonblock(ncp) == 0)
1921 * Rehash a ncp. Rehashing is typically required if the name changes (should
1922 * not generally occur) or the parent link changes. This function will
1923 * unhash the ncp if the ncp is no longer hashable.
1926 cache_rehash(struct namecache *ncp)
1928 struct nchashhead *nchpp;
1931 if (ncp->nc_flag & NCF_HASHED) {
1932 ncp->nc_flag &= ~NCF_HASHED;
1933 LIST_REMOVE(ncp, nc_hash);
1935 if (ncp->nc_nlen && ncp->nc_parent) {
1936 hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT);
1937 hash = fnv_32_buf(&ncp->nc_parent,
1938 sizeof(ncp->nc_parent), hash);
1939 nchpp = NCHHASH(hash);
1940 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
1941 ncp->nc_flag |= NCF_HASHED;
1946 * Name cache initialization, from vfsinit() when we are booting
1954 /* initialise per-cpu namecache effectiveness statistics. */
1955 for (i = 0; i < ncpus; ++i) {
1956 gd = globaldata_find(i);
1957 gd->gd_nchstats = &nchstats[i];
1959 TAILQ_INIT(&ncneglist);
1960 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash);
1961 nclockwarn = 1 * hz;
1965 * Called from start_init() to bootstrap the root filesystem. Returns
1966 * a referenced, unlocked namecache record.
1969 cache_allocroot(struct mount *mp, struct vnode *vp)
1971 struct namecache *ncp = cache_alloc(0);
1973 ncp->nc_flag |= NCF_MOUNTPT | NCF_ROOT;
1975 cache_setvp(ncp, vp);
1980 * vfs_cache_setroot()
1982 * Create an association between the root of our namecache and
1983 * the root vnode. This routine may be called several times during
1986 * If the caller intends to save the returned namecache pointer somewhere
1987 * it must cache_hold() it.
1990 vfs_cache_setroot(struct vnode *nvp, struct namecache *ncp)
1993 struct namecache *oncp;
2007 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
2008 * topology and is being removed as quickly as possible. The new VOP_N*()
2009 * API calls are required to make specific adjustments using the supplied
2010 * ncp pointers rather then just bogusly purging random vnodes.
2012 * Invalidate all namecache entries to a particular vnode as well as
2013 * any direct children of that vnode in the namecache. This is a
2014 * 'catch all' purge used by filesystems that do not know any better.
2016 * Note that the linkage between the vnode and its namecache entries will
2017 * be removed, but the namecache entries themselves might stay put due to
2018 * active references from elsewhere in the system or due to the existance of
2019 * the children. The namecache topology is left intact even if we do not
2020 * know what the vnode association is. Such entries will be marked
2024 cache_purge(struct vnode *vp)
2026 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
2030 * Flush all entries referencing a particular filesystem.
2032 * Since we need to check it anyway, we will flush all the invalid
2033 * entries at the same time.
2036 cache_purgevfs(struct mount *mp)
2038 struct nchashhead *nchpp;
2039 struct namecache *ncp, *nnp;
2042 * Scan hash tables for applicable entries.
2044 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) {
2045 ncp = LIST_FIRST(nchpp);
2049 nnp = LIST_NEXT(ncp, nc_hash);
2052 if (ncp->nc_mount == mp) {
2064 * Create a new (theoretically) unique fsmid
2067 cache_getnewfsmid(void)
2069 static int fsmid_roller;
2073 fsmid = ((int64_t)time_second << 32) |
2074 (fsmid_roller & 0x7FFFFFFF);
2079 static int disablecwd;
2080 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
2082 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
2083 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
2084 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
2085 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
2086 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
2087 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
2090 sys___getcwd(struct __getcwd_args *uap)
2100 buflen = uap->buflen;
2103 if (buflen > MAXPATHLEN)
2104 buflen = MAXPATHLEN;
2106 buf = kmalloc(buflen, M_TEMP, M_WAITOK);
2107 bp = kern_getcwd(buf, buflen, &error);
2109 error = copyout(bp, uap->buf, strlen(bp) + 1);
2115 kern_getcwd(char *buf, size_t buflen, int *error)
2117 struct proc *p = curproc;
2119 int i, slash_prefixed;
2120 struct filedesc *fdp;
2121 struct namecache *ncp;
2130 ncp = fdp->fd_ncdir;
2131 while (ncp && ncp != fdp->fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) {
2132 if (ncp->nc_flag & NCF_MOUNTPT) {
2133 if (ncp->nc_mount == NULL) {
2134 *error = EBADF; /* forced unmount? */
2137 ncp = ncp->nc_parent;
2140 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
2146 *--bp = ncp->nc_name[i];
2155 ncp = ncp->nc_parent;
2162 if (!slash_prefixed) {
2176 * Thus begins the fullpath magic.
2180 #define STATNODE(name) \
2181 static u_int name; \
2182 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
2184 static int disablefullpath;
2185 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
2186 &disablefullpath, 0, "");
2188 STATNODE(numfullpathcalls);
2189 STATNODE(numfullpathfail1);
2190 STATNODE(numfullpathfail2);
2191 STATNODE(numfullpathfail3);
2192 STATNODE(numfullpathfail4);
2193 STATNODE(numfullpathfound);
2196 cache_fullpath(struct proc *p, struct namecache *ncp, char **retbuf, char **freebuf)
2199 int i, slash_prefixed;
2200 struct namecache *fd_nrdir;
2204 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK);
2205 bp = buf + MAXPATHLEN - 1;
2208 fd_nrdir = p->p_fd->fd_nrdir;
2212 while (ncp && ncp != fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) {
2213 if (ncp->nc_flag & NCF_MOUNTPT) {
2214 if (ncp->nc_mount == NULL) {
2218 ncp = ncp->nc_parent;
2221 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
2227 *--bp = ncp->nc_name[i];
2236 ncp = ncp->nc_parent;
2243 if (p != NULL && (ncp->nc_flag & NCF_ROOT) && ncp != fd_nrdir) {
2244 bp = buf + MAXPATHLEN - 1;
2248 if (!slash_prefixed) {
2264 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf)
2266 struct namecache *ncp;
2269 if (disablefullpath)
2275 /* vn is NULL, client wants us to use p->p_textvp */
2277 if ((vn = p->p_textvp) == NULL)
2280 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
2288 return(cache_fullpath(p, ncp, retbuf, freebuf));