2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $
70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.91 2008/06/14 05:34:06 dillon Exp $
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/mount.h>
78 #include <sys/vnode.h>
79 #include <sys/malloc.h>
80 #include <sys/sysproto.h>
82 #include <sys/namei.h>
83 #include <sys/nlookup.h>
84 #include <sys/filedesc.h>
85 #include <sys/fnv_hash.h>
86 #include <sys/globaldata.h>
87 #include <sys/kern_syscall.h>
88 #include <sys/dirent.h>
91 #include <sys/sysref2.h>
92 #include <sys/mplock2.h>
94 #define MAX_RECURSION_DEPTH 64
97 * Random lookups in the cache are accomplished with a hash table using
98 * a hash key of (nc_src_vp, name).
100 * Negative entries may exist and correspond to structures where nc_vp
101 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry
102 * corresponds to a whited-out directory entry (verses simply not finding the
105 * Upon reaching the last segment of a path, if the reference is for DELETE,
106 * or NOCACHE is set (rewrite), and the name is located in the cache, it
111 * Structures associated with name cacheing.
113 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
116 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
118 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */
119 static struct namecache_list ncneglist; /* instead of vnode */
122 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
123 * to create the namecache infrastructure leading to a dangling vnode.
125 * 0 Only errors are reported
126 * 1 Successes are reported
127 * 2 Successes + the whole directory scan is reported
128 * 3 Force the directory scan code run as if the parent vnode did not
129 * have a namecache record, even if it does have one.
131 static int ncvp_debug;
132 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, "");
134 static u_long nchash; /* size of hash table */
135 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
137 static u_long ncnegfactor = 16; /* ratio of negative entries */
138 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
140 static int nclockwarn; /* warn on locked entries in ticks */
141 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, "");
143 static u_long numneg; /* number of cache entries allocated */
144 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
146 static u_long numcache; /* number of cache entries allocated */
147 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
149 static u_long numunres; /* number of unresolved entries */
150 SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, "");
152 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
153 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
155 static int cache_resolve_mp(struct mount *mp);
156 static struct vnode *cache_dvpref(struct namecache *ncp);
157 static void _cache_rehash(struct namecache *ncp);
158 static void _cache_lock(struct namecache *ncp);
159 static void _cache_setunresolved(struct namecache *ncp);
162 * The new name cache statistics
164 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
165 #define STATNODE(mode, name, var) \
166 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
167 STATNODE(CTLFLAG_RD, numneg, &numneg);
168 STATNODE(CTLFLAG_RD, numcache, &numcache);
169 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
170 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
171 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
172 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
173 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
174 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
175 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
176 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
177 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
178 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
180 struct nchstats nchstats[SMP_MAXCPU];
182 * Export VFS cache effectiveness statistics to user-land.
184 * The statistics are left for aggregation to user-land so
185 * neat things can be achieved, like observing per-CPU cache
189 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
191 struct globaldata *gd;
195 for (i = 0; i < ncpus; ++i) {
196 gd = globaldata_find(i);
197 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
198 sizeof(struct nchstats))))
204 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
205 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
207 static void cache_zap(struct namecache *ncp);
210 * cache_hold() and cache_drop() prevent the premature deletion of a
211 * namecache entry but do not prevent operations (such as zapping) on
212 * that namecache entry.
214 * This routine may only be called from outside this source module if
215 * nc_refs is already at least 1.
217 * This is a rare case where callers are allowed to hold a spinlock,
218 * so we can't ourselves.
224 _cache_hold(struct namecache *ncp)
226 atomic_add_int(&ncp->nc_refs, 1);
231 * When dropping an entry, if only one ref remains and the entry has not
232 * been resolved, zap it. Since the one reference is being dropped the
233 * entry had better not be locked.
237 _cache_drop(struct namecache *ncp)
239 KKASSERT(ncp->nc_refs > 0);
240 if (ncp->nc_refs == 1 &&
241 (ncp->nc_flag & NCF_UNRESOLVED) &&
242 TAILQ_EMPTY(&ncp->nc_list)
244 KKASSERT(ncp->nc_exlocks == 0);
248 atomic_subtract_int(&ncp->nc_refs, 1);
253 * Link a new namecache entry to its parent. Be careful to avoid races
254 * if vhold() blocks in the future.
257 cache_link_parent(struct namecache *ncp, struct namecache *par)
259 KKASSERT(ncp->nc_parent == NULL);
260 ncp->nc_parent = par;
261 if (TAILQ_EMPTY(&par->nc_list)) {
262 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
264 * Any vp associated with an ncp which has children must
265 * be held to prevent it from being recycled.
270 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
275 * Remove the parent association from a namecache structure. If this is
276 * the last child of the parent the cache_drop(par) will attempt to
277 * recursively zap the parent.
280 cache_unlink_parent(struct namecache *ncp)
282 struct namecache *par;
284 if ((par = ncp->nc_parent) != NULL) {
285 ncp->nc_parent = NULL;
286 par = _cache_hold(par);
287 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
288 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
295 * Allocate a new namecache structure. Most of the code does not require
296 * zero-termination of the string but it makes vop_compat_ncreate() easier.
298 static struct namecache *
299 cache_alloc(int nlen)
301 struct namecache *ncp;
303 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
305 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK);
307 ncp->nc_flag = NCF_UNRESOLVED;
308 ncp->nc_error = ENOTCONN; /* needs to be resolved */
311 TAILQ_INIT(&ncp->nc_list);
317 _cache_free(struct namecache *ncp)
319 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1);
321 kfree(ncp->nc_name, M_VFSCACHE);
322 kfree(ncp, M_VFSCACHE);
326 cache_zero(struct nchandle *nch)
333 * Ref and deref a namecache structure.
335 * Warning: caller may hold an unrelated read spinlock, which means we can't
336 * use read spinlocks here.
341 cache_hold(struct nchandle *nch)
343 _cache_hold(nch->ncp);
344 atomic_add_int(&nch->mount->mnt_refs, 1);
349 * Create a copy of a namecache handle for an already-referenced
355 cache_copy(struct nchandle *nch, struct nchandle *target)
358 _cache_hold(target->ncp);
359 atomic_add_int(&nch->mount->mnt_refs, 1);
366 cache_changemount(struct nchandle *nch, struct mount *mp)
368 atomic_add_int(&nch->mount->mnt_refs, -1);
370 atomic_add_int(&nch->mount->mnt_refs, 1);
374 cache_drop(struct nchandle *nch)
376 atomic_add_int(&nch->mount->mnt_refs, -1);
377 _cache_drop(nch->ncp);
383 * Namespace locking. The caller must already hold a reference to the
384 * namecache structure in order to lock/unlock it. This function prevents
385 * the namespace from being created or destroyed by accessors other then
388 * Note that holding a locked namecache structure prevents other threads
389 * from making namespace changes (e.g. deleting or creating), prevents
390 * vnode association state changes by other threads, and prevents the
391 * namecache entry from being resolved or unresolved by other threads.
393 * The lock owner has full authority to associate/disassociate vnodes
394 * and resolve/unresolve the locked ncp.
396 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
397 * or recycled, but it does NOT help you if the vnode had already initiated
398 * a recyclement. If this is important, use cache_get() rather then
399 * cache_lock() (and deal with the differences in the way the refs counter
400 * is handled). Or, alternatively, make an unconditional call to
401 * cache_validate() or cache_resolve() after cache_lock() returns.
405 _cache_lock(struct namecache *ncp)
410 KKASSERT(ncp->nc_refs != 0);
415 if (ncp->nc_exlocks == 0) {
419 * The vp associated with a locked ncp must be held
420 * to prevent it from being recycled (which would
421 * cause the ncp to become unresolved).
423 * WARNING! If VRECLAIMED is set the vnode could
424 * already be in the middle of a recycle. Callers
425 * should not assume that nc_vp is usable when
426 * not NULL. cache_vref() or cache_vget() must be
429 * XXX loop on race for later MPSAFE work.
435 if (ncp->nc_locktd == td) {
439 ncp->nc_flag |= NCF_LOCKREQ;
440 if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) {
444 kprintf("[diagnostic] cache_lock: blocked on %p", ncp);
445 kprintf(" \"%*.*s\"\n",
446 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
451 kprintf("[diagnostic] cache_lock: unblocked %*.*s\n",
452 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
457 cache_lock(struct nchandle *nch)
459 _cache_lock(nch->ncp);
464 _cache_lock_nonblock(struct namecache *ncp)
468 KKASSERT(ncp->nc_refs != 0);
470 if (ncp->nc_exlocks == 0) {
474 * The vp associated with a locked ncp must be held
475 * to prevent it from being recycled (which would
476 * cause the ncp to become unresolved).
478 * WARNING! If VRECLAIMED is set the vnode could
479 * already be in the middle of a recycle. Callers
480 * should not assume that nc_vp is usable when
481 * not NULL. cache_vref() or cache_vget() must be
484 * XXX loop on race for later MPSAFE work.
495 cache_lock_nonblock(struct nchandle *nch)
497 return(_cache_lock_nonblock(nch->ncp));
502 _cache_unlock(struct namecache *ncp)
504 thread_t td __debugvar = curthread;
506 KKASSERT(ncp->nc_refs > 0);
507 KKASSERT(ncp->nc_exlocks > 0);
508 KKASSERT(ncp->nc_locktd == td);
509 if (--ncp->nc_exlocks == 0) {
512 ncp->nc_locktd = NULL;
513 if (ncp->nc_flag & NCF_LOCKREQ) {
514 ncp->nc_flag &= ~NCF_LOCKREQ;
521 cache_unlock(struct nchandle *nch)
523 _cache_unlock(nch->ncp);
527 * ref-and-lock, unlock-and-deref functions.
529 * This function is primarily used by nlookup. Even though cache_lock
530 * holds the vnode, it is possible that the vnode may have already
531 * initiated a recyclement. We want cache_get() to return a definitively
532 * usable vnode or a definitively unresolved ncp.
536 _cache_get(struct namecache *ncp)
540 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
541 _cache_setunresolved(ncp);
546 * note: the same nchandle can be passed for both arguments.
549 cache_get(struct nchandle *nch, struct nchandle *target)
551 target->mount = nch->mount;
552 target->ncp = _cache_get(nch->ncp);
553 atomic_add_int(&target->mount->mnt_refs, 1);
557 _cache_get_nonblock(struct namecache *ncp)
560 if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) {
563 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
564 _cache_setunresolved(ncp);
571 cache_get_nonblock(struct nchandle *nch)
575 if ((error = _cache_get_nonblock(nch->ncp)) == 0)
576 atomic_add_int(&nch->mount->mnt_refs, 1);
582 _cache_put(struct namecache *ncp)
589 cache_put(struct nchandle *nch)
591 atomic_add_int(&nch->mount->mnt_refs, -1);
592 _cache_put(nch->ncp);
598 * Resolve an unresolved ncp by associating a vnode with it. If the
599 * vnode is NULL, a negative cache entry is created.
601 * The ncp should be locked on entry and will remain locked on return.
605 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp)
607 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
611 * Any vp associated with an ncp which has children must
612 * be held. Any vp associated with a locked ncp must be held.
614 if (!TAILQ_EMPTY(&ncp->nc_list))
616 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
621 * Set auxiliary flags
625 ncp->nc_flag |= NCF_ISDIR;
628 ncp->nc_flag |= NCF_ISSYMLINK;
629 /* XXX cache the contents of the symlink */
638 * When creating a negative cache hit we set the
639 * namecache_gen. A later resolve will clean out the
640 * negative cache hit if the mount point's namecache_gen
641 * has changed. Used by devfs, could also be used by
644 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
646 ncp->nc_error = ENOENT;
648 ncp->nc_namecache_gen = mp->mnt_namecache_gen;
650 ncp->nc_flag &= ~NCF_UNRESOLVED;
654 cache_setvp(struct nchandle *nch, struct vnode *vp)
656 _cache_setvp(nch->mount, nch->ncp, vp);
660 cache_settimeout(struct nchandle *nch, int nticks)
662 struct namecache *ncp = nch->ncp;
664 if ((ncp->nc_timeout = ticks + nticks) == 0)
669 * Disassociate the vnode or negative-cache association and mark a
670 * namecache entry as unresolved again. Note that the ncp is still
671 * left in the hash table and still linked to its parent.
673 * The ncp should be locked and refd on entry and will remain locked and refd
676 * This routine is normally never called on a directory containing children.
677 * However, NFS often does just that in its rename() code as a cop-out to
678 * avoid complex namespace operations. This disconnects a directory vnode
679 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
684 _cache_setunresolved(struct namecache *ncp)
688 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
689 ncp->nc_flag |= NCF_UNRESOLVED;
691 ncp->nc_error = ENOTCONN;
693 if ((vp = ncp->nc_vp) != NULL) {
696 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
699 * Any vp associated with an ncp with children is
700 * held by that ncp. Any vp associated with a locked
701 * ncp is held by that ncp. These conditions must be
702 * undone when the vp is cleared out from the ncp.
704 if (!TAILQ_EMPTY(&ncp->nc_list))
709 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
712 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK);
717 * The cache_nresolve() code calls this function to automatically
718 * set a resolved cache element to unresolved if it has timed out
719 * or if it is a negative cache hit and the mount point namecache_gen
723 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp)
726 * Already in an unresolved state, nothing to do.
728 if (ncp->nc_flag & NCF_UNRESOLVED)
732 * Try to zap entries that have timed out. We have
733 * to be careful here because locked leafs may depend
734 * on the vnode remaining intact in a parent, so only
735 * do this under very specific conditions.
737 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 &&
738 TAILQ_EMPTY(&ncp->nc_list)) {
739 _cache_setunresolved(ncp);
744 * If a resolved negative cache hit is invalid due to
745 * the mount's namecache generation being bumped, zap it.
747 if (ncp->nc_vp == NULL &&
748 ncp->nc_namecache_gen != mp->mnt_namecache_gen) {
749 _cache_setunresolved(ncp);
755 cache_setunresolved(struct nchandle *nch)
757 _cache_setunresolved(nch->ncp);
761 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
762 * looking for matches. This flag tells the lookup code when it must
763 * check for a mount linkage and also prevents the directories in question
764 * from being deleted or renamed.
768 cache_clrmountpt_callback(struct mount *mp, void *data)
770 struct nchandle *nch = data;
772 if (mp->mnt_ncmounton.ncp == nch->ncp)
774 if (mp->mnt_ncmountpt.ncp == nch->ncp)
780 cache_clrmountpt(struct nchandle *nch)
784 count = mountlist_scan(cache_clrmountpt_callback, nch,
785 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
787 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT;
791 * Invalidate portions of the namecache topology given a starting entry.
792 * The passed ncp is set to an unresolved state and:
794 * The passed ncp must be locked.
796 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
797 * that the physical underlying nodes have been
798 * destroyed... as in deleted. For example, when
799 * a directory is removed. This will cause record
800 * lookups on the name to no longer be able to find
801 * the record and tells the resolver to return failure
802 * rather then trying to resolve through the parent.
804 * The topology itself, including ncp->nc_name,
807 * This only applies to the passed ncp, if CINV_CHILDREN
808 * is specified the children are not flagged.
810 * CINV_CHILDREN - Set all children (recursively) to an unresolved
813 * Note that this will also have the side effect of
814 * cleaning out any unreferenced nodes in the topology
815 * from the leaves up as the recursion backs out.
817 * Note that the topology for any referenced nodes remains intact.
819 * It is possible for cache_inval() to race a cache_resolve(), meaning that
820 * the namecache entry may not actually be invalidated on return if it was
821 * revalidated while recursing down into its children. This code guarentees
822 * that the node(s) will go through an invalidation cycle, but does not
823 * guarentee that they will remain in an invalidated state.
825 * Returns non-zero if a revalidation was detected during the invalidation
826 * recursion, zero otherwise. Note that since only the original ncp is
827 * locked the revalidation ultimately can only indicate that the original ncp
828 * *MIGHT* no have been reresolved.
830 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
831 * have to avoid blowing out the kernel stack. We do this by saving the
832 * deep namecache node and aborting the recursion, then re-recursing at that
833 * node using a depth-first algorithm in order to allow multiple deep
834 * recursions to chain through each other, then we restart the invalidation
839 struct namecache *resume_ncp;
843 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *);
847 _cache_inval(struct namecache *ncp, int flags)
849 struct cinvtrack track;
850 struct namecache *ncp2;
854 track.resume_ncp = NULL;
857 r = _cache_inval_internal(ncp, flags, &track);
858 if (track.resume_ncp == NULL)
860 kprintf("Warning: deep namecache recursion at %s\n",
863 while ((ncp2 = track.resume_ncp) != NULL) {
864 track.resume_ncp = NULL;
866 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY,
876 cache_inval(struct nchandle *nch, int flags)
878 return(_cache_inval(nch->ncp, flags));
882 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track)
884 struct namecache *kid;
885 struct namecache *nextkid;
888 KKASSERT(ncp->nc_exlocks);
890 _cache_setunresolved(ncp);
891 if (flags & CINV_DESTROY)
892 ncp->nc_flag |= NCF_DESTROYED;
894 if ((flags & CINV_CHILDREN) &&
895 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL
897 if (++track->depth > MAX_RECURSION_DEPTH) {
898 track->resume_ncp = ncp;
905 if (track->resume_ncp) {
909 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
910 _cache_hold(nextkid);
911 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
912 TAILQ_FIRST(&kid->nc_list)
915 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track);
926 * Someone could have gotten in there while ncp was unlocked,
929 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
935 * Invalidate a vnode's namecache associations. To avoid races against
936 * the resolver we do not invalidate a node which we previously invalidated
937 * but which was then re-resolved while we were in the invalidation loop.
939 * Returns non-zero if any namecache entries remain after the invalidation
942 * NOTE: unlike the namecache topology which guarentees that ncp's will not
943 * be ripped out of the topology while held, the vnode's v_namecache list
944 * has no such restriction. NCP's can be ripped out of the list at virtually
945 * any time if not locked, even if held.
948 cache_inval_vp(struct vnode *vp, int flags)
950 struct namecache *ncp;
951 struct namecache *next;
954 ncp = TAILQ_FIRST(&vp->v_namecache);
958 /* loop entered with ncp held */
959 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
962 if (ncp->nc_vp != vp) {
963 kprintf("Warning: cache_inval_vp: race-A detected on "
964 "%s\n", ncp->nc_name);
970 _cache_inval(ncp, flags);
971 _cache_put(ncp); /* also releases reference */
973 if (ncp && ncp->nc_vp != vp) {
974 kprintf("Warning: cache_inval_vp: race-B detected on "
975 "%s\n", ncp->nc_name);
980 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
984 * This routine is used instead of the normal cache_inval_vp() when we
985 * are trying to recycle otherwise good vnodes.
987 * Return 0 on success, non-zero if not all namecache records could be
988 * disassociated from the vnode (for various reasons).
991 cache_inval_vp_nonblock(struct vnode *vp)
993 struct namecache *ncp;
994 struct namecache *next;
996 ncp = TAILQ_FIRST(&vp->v_namecache);
1000 /* loop entered with ncp held */
1001 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1003 if (_cache_lock_nonblock(ncp)) {
1009 if (ncp->nc_vp != vp) {
1010 kprintf("Warning: cache_inval_vp: race-A detected on "
1011 "%s\n", ncp->nc_name);
1017 _cache_inval(ncp, 0);
1018 _cache_put(ncp); /* also releases reference */
1020 if (ncp && ncp->nc_vp != vp) {
1021 kprintf("Warning: cache_inval_vp: race-B detected on "
1022 "%s\n", ncp->nc_name);
1027 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1031 * The source ncp has been renamed to the target ncp. Both fncp and tncp
1032 * must be locked. The target ncp is destroyed (as a normal rename-over
1033 * would destroy the target file or directory).
1035 * Because there may be references to the source ncp we cannot copy its
1036 * contents to the target. Instead the source ncp is relinked as the target
1037 * and the target ncp is removed from the namecache topology.
1040 cache_rename(struct nchandle *fnch, struct nchandle *tnch)
1042 struct namecache *fncp = fnch->ncp;
1043 struct namecache *tncp = tnch->ncp;
1046 _cache_setunresolved(tncp);
1047 cache_unlink_parent(fncp);
1048 cache_link_parent(fncp, tncp->nc_parent);
1049 cache_unlink_parent(tncp);
1050 oname = fncp->nc_name;
1051 fncp->nc_name = tncp->nc_name;
1052 fncp->nc_nlen = tncp->nc_nlen;
1053 tncp->nc_name = NULL;
1055 if (fncp->nc_flag & NCF_HASHED)
1056 _cache_rehash(fncp);
1057 if (tncp->nc_flag & NCF_HASHED)
1058 _cache_rehash(tncp);
1060 kfree(oname, M_VFSCACHE);
1064 * vget the vnode associated with the namecache entry. Resolve the namecache
1065 * entry if necessary and deal with namecache/vp races. The passed ncp must
1066 * be referenced and may be locked. The ncp's ref/locking state is not
1067 * effected by this call.
1069 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
1070 * (depending on the passed lk_type) will be returned in *vpp with an error
1071 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
1072 * most typical error is ENOENT, meaning that the ncp represents a negative
1073 * cache hit and there is no vnode to retrieve, but other errors can occur
1076 * The main race we have to deal with are namecache zaps. The ncp itself
1077 * will not disappear since it is referenced, and it turns out that the
1078 * validity of the vp pointer can be checked simply by rechecking the
1079 * contents of ncp->nc_vp.
1082 cache_vget(struct nchandle *nch, struct ucred *cred,
1083 int lk_type, struct vnode **vpp)
1085 struct namecache *ncp;
1092 if (ncp->nc_flag & NCF_UNRESOLVED) {
1094 error = cache_resolve(nch, cred);
1099 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
1101 * Accessing the vnode from the namecache is a bit
1102 * dangerous. Because there are no refs on the vnode, it
1103 * could be in the middle of a reclaim.
1105 if (vp->v_flag & VRECLAIMED) {
1106 kprintf("Warning: vnode reclaim race detected in cache_vget on %p (%s)\n", vp, ncp->nc_name);
1108 _cache_setunresolved(ncp);
1112 error = vget(vp, lk_type);
1114 if (vp != ncp->nc_vp)
1117 } else if (vp != ncp->nc_vp) {
1120 } else if (vp->v_flag & VRECLAIMED) {
1121 panic("vget succeeded on a VRECLAIMED node! vp %p", vp);
1124 if (error == 0 && vp == NULL)
1131 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp)
1133 struct namecache *ncp;
1141 if (ncp->nc_flag & NCF_UNRESOLVED) {
1143 error = cache_resolve(nch, cred);
1148 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
1150 * Since we did not obtain any locks, a cache zap
1151 * race can occur here if the vnode is in the middle
1152 * of being reclaimed and has not yet been able to
1153 * clean out its cache node. If that case occurs,
1154 * we must lock and unresolve the cache, then loop
1157 if ((error = vget(vp, LK_SHARED)) != 0) {
1158 if (error == ENOENT) {
1159 kprintf("Warning: vnode reclaim race detected on cache_vref %p (%s)\n", vp, ncp->nc_name);
1161 _cache_setunresolved(ncp);
1167 /* caller does not want a lock */
1171 if (error == 0 && vp == NULL)
1178 * Return a referenced vnode representing the parent directory of
1179 * ncp. Because the caller has locked the ncp it should not be possible for
1180 * the parent ncp to go away.
1182 * However, we might race against the parent dvp and not be able to
1183 * reference it. If we race, return NULL.
1185 static struct vnode *
1186 cache_dvpref(struct namecache *ncp)
1188 struct namecache *par;
1192 if ((par = ncp->nc_parent) != NULL) {
1193 if ((par->nc_flag & NCF_UNRESOLVED) == 0) {
1194 if ((dvp = par->nc_vp) != NULL) {
1195 if (vget(dvp, LK_SHARED) == 0) {
1197 /* return referenced, unlocked dvp */
1208 * Convert a directory vnode to a namecache record without any other
1209 * knowledge of the topology. This ONLY works with directory vnodes and
1210 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
1211 * returned ncp (if not NULL) will be held and unlocked.
1213 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
1214 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
1215 * for dvp. This will fail only if the directory has been deleted out from
1218 * Callers must always check for a NULL return no matter the value of 'makeit'.
1220 * To avoid underflowing the kernel stack each recursive call increments
1221 * the makeit variable.
1224 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
1225 struct vnode *dvp, char *fakename);
1226 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1227 struct vnode **saved_dvp);
1230 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit,
1231 struct nchandle *nch)
1233 struct vnode *saved_dvp;
1239 nch->mount = dvp->v_mount;
1244 * Temporary debugging code to force the directory scanning code
1247 if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) {
1248 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
1249 kprintf("cache_fromdvp: forcing %s\n", nch->ncp->nc_name);
1254 * Loop until resolution, inside code will break out on error.
1256 while ((nch->ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) {
1259 * If dvp is the root of its filesystem it should already
1260 * have a namecache pointer associated with it as a side
1261 * effect of the mount, but it may have been disassociated.
1263 if (dvp->v_flag & VROOT) {
1264 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp);
1265 error = cache_resolve_mp(nch->mount);
1266 _cache_put(nch->ncp);
1268 kprintf("cache_fromdvp: resolve root of mount %p error %d",
1269 dvp->v_mount, error);
1273 kprintf(" failed\n");
1278 kprintf(" succeeded\n");
1283 * If we are recursed too deeply resort to an O(n^2)
1284 * algorithm to resolve the namecache topology. The
1285 * resolved pvp is left referenced in saved_dvp to
1286 * prevent the tree from being destroyed while we loop.
1289 error = cache_fromdvp_try(dvp, cred, &saved_dvp);
1291 kprintf("lookupdotdot(longpath) failed %d "
1292 "dvp %p\n", error, dvp);
1300 * Get the parent directory and resolve its ncp.
1303 kfree(fakename, M_TEMP);
1306 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
1309 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp);
1315 * Reuse makeit as a recursion depth counter. On success
1316 * nch will be fully referenced.
1318 cache_fromdvp(pvp, cred, makeit + 1, nch);
1320 if (nch->ncp == NULL)
1324 * Do an inefficient scan of pvp (embodied by ncp) to look
1325 * for dvp. This will create a namecache record for dvp on
1326 * success. We loop up to recheck on success.
1328 * ncp and dvp are both held but not locked.
1330 error = cache_inefficient_scan(nch, cred, dvp, fakename);
1332 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
1333 pvp, nch->ncp->nc_name, dvp);
1335 /* nch was NULLed out, reload mount */
1336 nch->mount = dvp->v_mount;
1340 kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
1341 pvp, nch->ncp->nc_name);
1344 /* nch was NULLed out, reload mount */
1345 nch->mount = dvp->v_mount;
1349 kfree(fakename, M_TEMP);
1352 * hold it for real so the mount gets a ref
1364 * Go up the chain of parent directories until we find something
1365 * we can resolve into the namecache. This is very inefficient.
1369 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1370 struct vnode **saved_dvp)
1372 struct nchandle nch;
1375 static time_t last_fromdvp_report;
1379 * Loop getting the parent directory vnode until we get something we
1380 * can resolve in the namecache.
1383 nch.mount = dvp->v_mount;
1389 kfree(fakename, M_TEMP);
1392 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
1399 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
1400 _cache_hold(nch.ncp);
1404 if (pvp->v_flag & VROOT) {
1405 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp);
1406 error = cache_resolve_mp(nch.mount);
1407 _cache_unlock(nch.ncp);
1410 _cache_drop(nch.ncp);
1420 if (last_fromdvp_report != time_second) {
1421 last_fromdvp_report = time_second;
1422 kprintf("Warning: extremely inefficient path "
1423 "resolution on %s\n",
1426 error = cache_inefficient_scan(&nch, cred, dvp, fakename);
1429 * Hopefully dvp now has a namecache record associated with
1430 * it. Leave it referenced to prevent the kernel from
1431 * recycling the vnode. Otherwise extremely long directory
1432 * paths could result in endless recycling.
1437 _cache_drop(nch.ncp);
1440 kfree(fakename, M_TEMP);
1445 * Do an inefficient scan of the directory represented by ncp looking for
1446 * the directory vnode dvp. ncp must be held but not locked on entry and
1447 * will be held on return. dvp must be refd but not locked on entry and
1448 * will remain refd on return.
1450 * Why do this at all? Well, due to its stateless nature the NFS server
1451 * converts file handles directly to vnodes without necessarily going through
1452 * the namecache ops that would otherwise create the namecache topology
1453 * leading to the vnode. We could either (1) Change the namecache algorithms
1454 * to allow disconnect namecache records that are re-merged opportunistically,
1455 * or (2) Make the NFS server backtrack and scan to recover a connected
1456 * namecache topology in order to then be able to issue new API lookups.
1458 * It turns out that (1) is a huge mess. It takes a nice clean set of
1459 * namecache algorithms and introduces a lot of complication in every subsystem
1460 * that calls into the namecache to deal with the re-merge case, especially
1461 * since we are using the namecache to placehold negative lookups and the
1462 * vnode might not be immediately assigned. (2) is certainly far less
1463 * efficient then (1), but since we are only talking about directories here
1464 * (which are likely to remain cached), the case does not actually run all
1465 * that often and has the supreme advantage of not polluting the namecache
1468 * If a fakename is supplied just construct a namecache entry using the
1472 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
1473 struct vnode *dvp, char *fakename)
1475 struct nlcomponent nlc;
1476 struct nchandle rncp;
1488 vat.va_blocksize = 0;
1489 if ((error = VOP_GETATTR(dvp, &vat)) != 0)
1491 if ((error = cache_vref(nch, cred, &pvp)) != 0)
1494 kprintf("inefficient_scan: directory iosize %ld "
1495 "vattr fileid = %lld\n",
1497 (long long)vat.va_fileid);
1501 * Use the supplied fakename if not NULL. Fake names are typically
1502 * not in the actual filesystem hierarchy. This is used by HAMMER
1503 * to glue @@timestamp recursions together.
1506 nlc.nlc_nameptr = fakename;
1507 nlc.nlc_namelen = strlen(fakename);
1508 rncp = cache_nlookup(nch, &nlc);
1512 if ((blksize = vat.va_blocksize) == 0)
1513 blksize = DEV_BSIZE;
1514 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK);
1520 iov.iov_base = rbuf;
1521 iov.iov_len = blksize;
1524 uio.uio_resid = blksize;
1525 uio.uio_segflg = UIO_SYSSPACE;
1526 uio.uio_rw = UIO_READ;
1527 uio.uio_td = curthread;
1529 if (ncvp_debug >= 2)
1530 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
1531 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
1533 den = (struct dirent *)rbuf;
1534 bytes = blksize - uio.uio_resid;
1537 if (ncvp_debug >= 2) {
1538 kprintf("cache_inefficient_scan: %*.*s\n",
1539 den->d_namlen, den->d_namlen,
1542 if (den->d_type != DT_WHT &&
1543 den->d_ino == vat.va_fileid) {
1545 kprintf("cache_inefficient_scan: "
1546 "MATCHED inode %lld path %s/%*.*s\n",
1547 (long long)vat.va_fileid,
1549 den->d_namlen, den->d_namlen,
1552 nlc.nlc_nameptr = den->d_name;
1553 nlc.nlc_namelen = den->d_namlen;
1554 rncp = cache_nlookup(nch, &nlc);
1555 KKASSERT(rncp.ncp != NULL);
1558 bytes -= _DIRENT_DIRSIZ(den);
1559 den = _DIRENT_NEXT(den);
1561 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
1564 kfree(rbuf, M_TEMP);
1568 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) {
1569 _cache_setvp(rncp.mount, rncp.ncp, dvp);
1570 if (ncvp_debug >= 2) {
1571 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
1572 nch->ncp->nc_name, rncp.ncp->nc_name, dvp);
1575 if (ncvp_debug >= 2) {
1576 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
1577 nch->ncp->nc_name, rncp.ncp->nc_name, dvp,
1581 if (rncp.ncp->nc_vp == NULL)
1582 error = rncp.ncp->nc_error;
1584 * Release rncp after a successful nlookup. rncp was fully
1589 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
1590 dvp, nch->ncp->nc_name);
1597 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
1598 * state, which disassociates it from its vnode or ncneglist.
1600 * Then, if there are no additional references to the ncp and no children,
1601 * the ncp is removed from the topology and destroyed. This function will
1602 * also run through the nc_parent chain and destroy parent ncps if possible.
1603 * As a side benefit, it turns out the only conditions that allow running
1604 * up the chain are also the conditions to ensure no deadlock will occur.
1606 * References and/or children may exist if the ncp is in the middle of the
1607 * topology, preventing the ncp from being destroyed.
1609 * This function must be called with the ncp held and locked and will unlock
1610 * and drop it during zapping.
1613 cache_zap(struct namecache *ncp)
1615 struct namecache *par;
1618 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
1620 _cache_setunresolved(ncp);
1623 * Try to scrap the entry and possibly tail-recurse on its parent.
1624 * We only scrap unref'd (other then our ref) unresolved entries,
1625 * we do not scrap 'live' entries.
1627 while (ncp->nc_flag & NCF_UNRESOLVED) {
1629 * Someone other then us has a ref, stop.
1631 if (ncp->nc_refs > 1)
1635 * We have children, stop.
1637 if (!TAILQ_EMPTY(&ncp->nc_list))
1641 * Remove ncp from the topology: hash table and parent linkage.
1643 if (ncp->nc_flag & NCF_HASHED) {
1644 ncp->nc_flag &= ~NCF_HASHED;
1645 LIST_REMOVE(ncp, nc_hash);
1647 if ((par = ncp->nc_parent) != NULL) {
1648 par = _cache_hold(par);
1649 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
1650 ncp->nc_parent = NULL;
1651 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
1656 * ncp should not have picked up any refs. Physically
1659 KKASSERT(ncp->nc_refs == 1);
1661 /* _cache_unlock(ncp) not required */
1662 ncp->nc_refs = -1; /* safety */
1664 kfree(ncp->nc_name, M_VFSCACHE);
1665 kfree(ncp, M_VFSCACHE);
1668 * Loop on the parent (it may be NULL). Only bother looping
1669 * if the parent has a single ref (ours), which also means
1670 * we can lock it trivially.
1675 if (ncp->nc_refs != 1) {
1679 KKASSERT(par->nc_exlocks == 0);
1684 atomic_subtract_int(&ncp->nc_refs, 1);
1687 static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW;
1691 cache_hysteresis(void)
1694 * Don't cache too many negative hits. We use hysteresis to reduce
1695 * the impact on the critical path.
1697 switch(cache_hysteresis_state) {
1699 if (numneg > MINNEG && numneg * ncnegfactor > numcache) {
1701 cache_hysteresis_state = CHI_HIGH;
1705 if (numneg > MINNEG * 9 / 10 &&
1706 numneg * ncnegfactor * 9 / 10 > numcache
1710 cache_hysteresis_state = CHI_LOW;
1717 * NEW NAMECACHE LOOKUP API
1719 * Lookup an entry in the cache. A locked, referenced, non-NULL
1720 * entry is *always* returned, even if the supplied component is illegal.
1721 * The resulting namecache entry should be returned to the system with
1722 * cache_put() or _cache_unlock() + cache_drop().
1724 * namecache locks are recursive but care must be taken to avoid lock order
1727 * Nobody else will be able to manipulate the associated namespace (e.g.
1728 * create, delete, rename, rename-target) until the caller unlocks the
1731 * The returned entry will be in one of three states: positive hit (non-null
1732 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
1733 * Unresolved entries must be resolved through the filesystem to associate the
1734 * vnode and/or determine whether a positive or negative hit has occured.
1736 * It is not necessary to lock a directory in order to lock namespace under
1737 * that directory. In fact, it is explicitly not allowed to do that. A
1738 * directory is typically only locked when being created, renamed, or
1741 * The directory (par) may be unresolved, in which case any returned child
1742 * will likely also be marked unresolved. Likely but not guarenteed. Since
1743 * the filesystem lookup requires a resolved directory vnode the caller is
1744 * responsible for resolving the namecache chain top-down. This API
1745 * specifically allows whole chains to be created in an unresolved state.
1748 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc)
1750 struct nchandle nch;
1751 struct namecache *ncp;
1752 struct namecache *new_ncp;
1753 struct nchashhead *nchpp;
1760 mp = par_nch->mount;
1763 * Try to locate an existing entry
1765 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
1766 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
1769 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1773 * Break out if we find a matching entry. Note that
1774 * UNRESOLVED entries may match, but DESTROYED entries
1777 if (ncp->nc_parent == par_nch->ncp &&
1778 ncp->nc_nlen == nlc->nlc_namelen &&
1779 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
1780 (ncp->nc_flag & NCF_DESTROYED) == 0
1782 if (_cache_get_nonblock(ncp) == 0) {
1783 _cache_auto_unresolve(mp, ncp);
1785 _cache_free(new_ncp);
1795 * We failed to locate an entry, create a new entry and add it to
1796 * the cache. We have to relookup after possibly blocking in
1799 if (new_ncp == NULL) {
1800 new_ncp = cache_alloc(nlc->nlc_namelen);
1807 * Initialize as a new UNRESOLVED entry, lock (non-blocking),
1808 * and link to the parent. The mount point is usually inherited
1809 * from the parent unless this is a special case such as a mount
1810 * point where nlc_namelen is 0. If nlc_namelen is 0 nc_name will
1813 if (nlc->nlc_namelen) {
1814 bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen);
1815 ncp->nc_name[nlc->nlc_namelen] = 0;
1817 nchpp = NCHHASH(hash);
1818 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
1819 ncp->nc_flag |= NCF_HASHED;
1820 cache_link_parent(ncp, par_nch->ncp);
1823 * stats and namecache size management
1825 if (ncp->nc_flag & NCF_UNRESOLVED)
1826 ++gd->gd_nchstats->ncs_miss;
1827 else if (ncp->nc_vp)
1828 ++gd->gd_nchstats->ncs_goodhits;
1830 ++gd->gd_nchstats->ncs_neghits;
1834 atomic_add_int(&nch.mount->mnt_refs, 1);
1839 * The namecache entry is marked as being used as a mount point.
1840 * Locate the mount if it is visible to the caller.
1842 struct findmount_info {
1843 struct mount *result;
1844 struct mount *nch_mount;
1845 struct namecache *nch_ncp;
1850 cache_findmount_callback(struct mount *mp, void *data)
1852 struct findmount_info *info = data;
1855 * Check the mount's mounted-on point against the passed nch.
1857 if (mp->mnt_ncmounton.mount == info->nch_mount &&
1858 mp->mnt_ncmounton.ncp == info->nch_ncp
1867 cache_findmount(struct nchandle *nch)
1869 struct findmount_info info;
1872 info.nch_mount = nch->mount;
1873 info.nch_ncp = nch->ncp;
1874 mountlist_scan(cache_findmount_callback, &info,
1875 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
1876 return(info.result);
1880 * Resolve an unresolved namecache entry, generally by looking it up.
1881 * The passed ncp must be locked and refd.
1883 * Theoretically since a vnode cannot be recycled while held, and since
1884 * the nc_parent chain holds its vnode as long as children exist, the
1885 * direct parent of the cache entry we are trying to resolve should
1886 * have a valid vnode. If not then generate an error that we can
1887 * determine is related to a resolver bug.
1889 * However, if a vnode was in the middle of a recyclement when the NCP
1890 * got locked, ncp->nc_vp might point to a vnode that is about to become
1891 * invalid. cache_resolve() handles this case by unresolving the entry
1892 * and then re-resolving it.
1894 * Note that successful resolution does not necessarily return an error
1895 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
1899 cache_resolve(struct nchandle *nch, struct ucred *cred)
1901 struct namecache *par;
1902 struct namecache *ncp;
1903 struct nchandle nctmp;
1912 * If the ncp is already resolved we have nothing to do. However,
1913 * we do want to guarentee that a usable vnode is returned when
1914 * a vnode is present, so make sure it hasn't been reclaimed.
1916 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1917 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1918 _cache_setunresolved(ncp);
1919 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1920 return (ncp->nc_error);
1924 * Mount points need special handling because the parent does not
1925 * belong to the same filesystem as the ncp.
1927 if (ncp == mp->mnt_ncmountpt.ncp)
1928 return (cache_resolve_mp(mp));
1931 * We expect an unbroken chain of ncps to at least the mount point,
1932 * and even all the way to root (but this code doesn't have to go
1933 * past the mount point).
1935 if (ncp->nc_parent == NULL) {
1936 kprintf("EXDEV case 1 %p %*.*s\n", ncp,
1937 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
1938 ncp->nc_error = EXDEV;
1939 return(ncp->nc_error);
1943 * The vp's of the parent directories in the chain are held via vhold()
1944 * due to the existance of the child, and should not disappear.
1945 * However, there are cases where they can disappear:
1947 * - due to filesystem I/O errors.
1948 * - due to NFS being stupid about tracking the namespace and
1949 * destroys the namespace for entire directories quite often.
1950 * - due to forced unmounts.
1951 * - due to an rmdir (parent will be marked DESTROYED)
1953 * When this occurs we have to track the chain backwards and resolve
1954 * it, looping until the resolver catches up to the current node. We
1955 * could recurse here but we might run ourselves out of kernel stack
1956 * so we do it in a more painful manner. This situation really should
1957 * not occur all that often, or if it does not have to go back too
1958 * many nodes to resolve the ncp.
1960 while ((dvp = cache_dvpref(ncp)) == NULL) {
1962 * This case can occur if a process is CD'd into a
1963 * directory which is then rmdir'd. If the parent is marked
1964 * destroyed there is no point trying to resolve it.
1966 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
1969 par = ncp->nc_parent;
1970 while (par->nc_parent && par->nc_parent->nc_vp == NULL)
1971 par = par->nc_parent;
1972 if (par->nc_parent == NULL) {
1973 kprintf("EXDEV case 2 %*.*s\n",
1974 par->nc_nlen, par->nc_nlen, par->nc_name);
1977 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
1978 par->nc_nlen, par->nc_nlen, par->nc_name);
1980 * The parent is not set in stone, ref and lock it to prevent
1981 * it from disappearing. Also note that due to renames it
1982 * is possible for our ncp to move and for par to no longer
1983 * be one of its parents. We resolve it anyway, the loop
1984 * will handle any moves.
1987 if (par == nch->mount->mnt_ncmountpt.ncp) {
1988 cache_resolve_mp(nch->mount);
1989 } else if ((dvp = cache_dvpref(par)) == NULL) {
1990 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
1994 if (par->nc_flag & NCF_UNRESOLVED) {
1997 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
2001 if ((error = par->nc_error) != 0) {
2002 if (par->nc_error != EAGAIN) {
2003 kprintf("EXDEV case 3 %*.*s error %d\n",
2004 par->nc_nlen, par->nc_nlen, par->nc_name,
2009 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
2010 par, par->nc_nlen, par->nc_nlen, par->nc_name);
2017 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
2018 * ncp's and reattach them. If this occurs the original ncp is marked
2019 * EAGAIN to force a relookup.
2021 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
2022 * ncp must already be resolved.
2027 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
2030 ncp->nc_error = EPERM;
2032 if (ncp->nc_error == EAGAIN) {
2033 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
2034 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
2037 return(ncp->nc_error);
2041 * Resolve the ncp associated with a mount point. Such ncp's almost always
2042 * remain resolved and this routine is rarely called. NFS MPs tends to force
2043 * re-resolution more often due to its mac-truck-smash-the-namecache
2044 * method of tracking namespace changes.
2046 * The semantics for this call is that the passed ncp must be locked on
2047 * entry and will be locked on return. However, if we actually have to
2048 * resolve the mount point we temporarily unlock the entry in order to
2049 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
2050 * the unlock we have to recheck the flags after we relock.
2053 cache_resolve_mp(struct mount *mp)
2055 struct namecache *ncp = mp->mnt_ncmountpt.ncp;
2059 KKASSERT(mp != NULL);
2062 * If the ncp is already resolved we have nothing to do. However,
2063 * we do want to guarentee that a usable vnode is returned when
2064 * a vnode is present, so make sure it hasn't been reclaimed.
2066 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
2067 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
2068 _cache_setunresolved(ncp);
2071 if (ncp->nc_flag & NCF_UNRESOLVED) {
2073 while (vfs_busy(mp, 0))
2075 error = VFS_ROOT(mp, &vp);
2079 * recheck the ncp state after relocking.
2081 if (ncp->nc_flag & NCF_UNRESOLVED) {
2082 ncp->nc_error = error;
2084 _cache_setvp(mp, ncp, vp);
2087 kprintf("[diagnostic] cache_resolve_mp: failed"
2088 " to resolve mount %p err=%d ncp=%p\n",
2090 _cache_setvp(mp, ncp, NULL);
2092 } else if (error == 0) {
2097 return(ncp->nc_error);
2101 cache_cleanneg(int count)
2103 struct namecache *ncp;
2106 * Automode from the vnlru proc - clean out 10% of the negative cache
2110 count = numneg / 10 + 1;
2113 * Attempt to clean out the specified number of negative cache
2117 ncp = TAILQ_FIRST(&ncneglist);
2119 KKASSERT(numneg == 0);
2122 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
2123 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
2124 if (_cache_get_nonblock(ncp) == 0)
2131 * Rehash a ncp. Rehashing is typically required if the name changes (should
2132 * not generally occur) or the parent link changes. This function will
2133 * unhash the ncp if the ncp is no longer hashable.
2136 _cache_rehash(struct namecache *ncp)
2138 struct nchashhead *nchpp;
2141 if (ncp->nc_flag & NCF_HASHED) {
2142 ncp->nc_flag &= ~NCF_HASHED;
2143 LIST_REMOVE(ncp, nc_hash);
2145 if (ncp->nc_nlen && ncp->nc_parent) {
2146 hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT);
2147 hash = fnv_32_buf(&ncp->nc_parent,
2148 sizeof(ncp->nc_parent), hash);
2149 nchpp = NCHHASH(hash);
2150 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
2151 ncp->nc_flag |= NCF_HASHED;
2156 * Name cache initialization, from vfsinit() when we are booting
2164 /* initialise per-cpu namecache effectiveness statistics. */
2165 for (i = 0; i < ncpus; ++i) {
2166 gd = globaldata_find(i);
2167 gd->gd_nchstats = &nchstats[i];
2169 TAILQ_INIT(&ncneglist);
2170 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash);
2171 nclockwarn = 5 * hz;
2175 * Called from start_init() to bootstrap the root filesystem. Returns
2176 * a referenced, unlocked namecache record.
2179 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp)
2181 nch->ncp = cache_alloc(0);
2183 atomic_add_int(&mp->mnt_refs, 1);
2185 _cache_setvp(nch->mount, nch->ncp, vp);
2189 * vfs_cache_setroot()
2191 * Create an association between the root of our namecache and
2192 * the root vnode. This routine may be called several times during
2195 * If the caller intends to save the returned namecache pointer somewhere
2196 * it must cache_hold() it.
2199 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch)
2202 struct nchandle onch;
2210 cache_zero(&rootnch);
2218 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
2219 * topology and is being removed as quickly as possible. The new VOP_N*()
2220 * API calls are required to make specific adjustments using the supplied
2221 * ncp pointers rather then just bogusly purging random vnodes.
2223 * Invalidate all namecache entries to a particular vnode as well as
2224 * any direct children of that vnode in the namecache. This is a
2225 * 'catch all' purge used by filesystems that do not know any better.
2227 * Note that the linkage between the vnode and its namecache entries will
2228 * be removed, but the namecache entries themselves might stay put due to
2229 * active references from elsewhere in the system or due to the existance of
2230 * the children. The namecache topology is left intact even if we do not
2231 * know what the vnode association is. Such entries will be marked
2235 cache_purge(struct vnode *vp)
2237 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
2241 * Flush all entries referencing a particular filesystem.
2243 * Since we need to check it anyway, we will flush all the invalid
2244 * entries at the same time.
2249 cache_purgevfs(struct mount *mp)
2251 struct nchashhead *nchpp;
2252 struct namecache *ncp, *nnp;
2255 * Scan hash tables for applicable entries.
2257 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) {
2258 ncp = LIST_FIRST(nchpp);
2262 nnp = LIST_NEXT(ncp, nc_hash);
2265 if (ncp->nc_mount == mp) {
2278 static int disablecwd;
2279 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
2281 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
2282 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
2283 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
2284 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
2285 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
2286 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
2292 sys___getcwd(struct __getcwd_args *uap)
2302 buflen = uap->buflen;
2305 if (buflen > MAXPATHLEN)
2306 buflen = MAXPATHLEN;
2308 buf = kmalloc(buflen, M_TEMP, M_WAITOK);
2310 bp = kern_getcwd(buf, buflen, &error);
2313 error = copyout(bp, uap->buf, strlen(bp) + 1);
2319 kern_getcwd(char *buf, size_t buflen, int *error)
2321 struct proc *p = curproc;
2323 int i, slash_prefixed;
2324 struct filedesc *fdp;
2325 struct nchandle nch;
2334 nch = fdp->fd_ncdir;
2335 while (nch.ncp && (nch.ncp != fdp->fd_nrdir.ncp ||
2336 nch.mount != fdp->fd_nrdir.mount)
2339 * While traversing upwards if we encounter the root
2340 * of the current mount we have to skip to the mount point
2341 * in the underlying filesystem.
2343 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) {
2344 nch = nch.mount->mnt_ncmounton;
2349 * Prepend the path segment
2351 for (i = nch.ncp->nc_nlen - 1; i >= 0; i--) {
2357 *--bp = nch.ncp->nc_name[i];
2368 * Go up a directory. This isn't a mount point so we don't
2369 * have to check again.
2371 nch.ncp = nch.ncp->nc_parent;
2373 if (nch.ncp == NULL) {
2378 if (!slash_prefixed) {
2392 * Thus begins the fullpath magic.
2396 #define STATNODE(name) \
2397 static u_int name; \
2398 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
2400 static int disablefullpath;
2401 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
2402 &disablefullpath, 0, "");
2404 STATNODE(numfullpathcalls);
2405 STATNODE(numfullpathfail1);
2406 STATNODE(numfullpathfail2);
2407 STATNODE(numfullpathfail3);
2408 STATNODE(numfullpathfail4);
2409 STATNODE(numfullpathfound);
2412 cache_fullpath(struct proc *p, struct nchandle *nchp, char **retbuf, char **freebuf)
2415 int i, slash_prefixed;
2416 struct nchandle fd_nrdir;
2417 struct nchandle nch;
2424 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK);
2425 bp = buf + MAXPATHLEN - 1;
2428 fd_nrdir = p->p_fd->fd_nrdir;
2435 (nch.ncp != fd_nrdir.ncp || nch.mount != fd_nrdir.mount)
2438 * While traversing upwards if we encounter the root
2439 * of the current mount we have to skip to the mount point.
2441 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) {
2442 nch = nch.mount->mnt_ncmounton;
2447 * Prepend the path segment
2449 for (i = nch.ncp->nc_nlen - 1; i >= 0; i--) {
2455 *--bp = nch.ncp->nc_name[i];
2466 * Go up a directory. This isn't a mount point so we don't
2467 * have to check again.
2469 nch.ncp = nch.ncp->nc_parent;
2471 if (nch.ncp == NULL) {
2477 if (!slash_prefixed) {
2493 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf)
2495 struct namecache *ncp;
2496 struct nchandle nch;
2499 if (disablefullpath)
2505 /* vn is NULL, client wants us to use p->p_textvp */
2507 if ((vn = p->p_textvp) == NULL)
2510 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
2519 nch.mount = vn->v_mount;
2520 return(cache_fullpath(p, &nch, retbuf, freebuf));