2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/sysctl.h>
73 #include <sys/mount.h>
74 #include <sys/vnode.h>
75 #include <sys/malloc.h>
76 #include <sys/sysproto.h>
77 #include <sys/spinlock.h>
79 #include <sys/namei.h>
80 #include <sys/nlookup.h>
81 #include <sys/filedesc.h>
82 #include <sys/fnv_hash.h>
83 #include <sys/globaldata.h>
84 #include <sys/kern_syscall.h>
85 #include <sys/dirent.h>
88 #include <sys/sysref2.h>
89 #include <sys/spinlock2.h>
90 #include <sys/mplock2.h>
92 #define MAX_RECURSION_DEPTH 64
95 * Random lookups in the cache are accomplished with a hash table using
96 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock.
98 * Negative entries may exist and correspond to resolved namecache
99 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT
100 * will be set if the entry corresponds to a whited-out directory entry
101 * (verses simply not finding the entry at all). ncneglist is locked
102 * with a global spinlock (ncspin).
106 * (1) A ncp must be referenced before it can be locked.
108 * (2) A ncp must be locked in order to modify it.
110 * (3) ncp locks are always ordered child -> parent. That may seem
111 * backwards but forward scans use the hash table and thus can hold
112 * the parent unlocked when traversing downward.
114 * This allows insert/rename/delete/dot-dot and other operations
115 * to use ncp->nc_parent links.
117 * This also prevents a locked up e.g. NFS node from creating a
118 * chain reaction all the way back to the root vnode / namecache.
120 * (4) parent linkages require both the parent and child to be locked.
124 * Structures associated with name cacheing.
126 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
130 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
132 LIST_HEAD(nchash_list, namecache);
135 struct nchash_list list;
136 struct spinlock spin;
139 static struct nchash_head *nchashtbl;
140 static struct namecache_list ncneglist;
141 static struct spinlock ncspin;
144 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
145 * to create the namecache infrastructure leading to a dangling vnode.
147 * 0 Only errors are reported
148 * 1 Successes are reported
149 * 2 Successes + the whole directory scan is reported
150 * 3 Force the directory scan code run as if the parent vnode did not
151 * have a namecache record, even if it does have one.
153 static int ncvp_debug;
154 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, "");
156 static u_long nchash; /* size of hash table */
157 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
159 static int ncnegfactor = 16; /* ratio of negative entries */
160 SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
162 static int nclockwarn; /* warn on locked entries in ticks */
163 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, "");
165 static int numdefered; /* number of cache entries allocated */
166 SYSCTL_INT(_debug, OID_AUTO, numdefered, CTLFLAG_RD, &numdefered, 0, "");
168 static int ncposlimit; /* number of cache entries allocated */
169 SYSCTL_INT(_debug, OID_AUTO, ncposlimit, CTLFLAG_RW, &ncposlimit, 0, "");
171 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
172 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
174 int cache_mpsafe = 1;
175 SYSCTL_INT(_vfs, OID_AUTO, cache_mpsafe, CTLFLAG_RW, &cache_mpsafe, 0, "");
177 static int cache_resolve_mp(struct mount *mp);
178 static struct vnode *cache_dvpref(struct namecache *ncp);
179 static void _cache_lock(struct namecache *ncp);
180 static void _cache_setunresolved(struct namecache *ncp);
181 static void _cache_cleanneg(int count);
182 static void _cache_cleanpos(int count);
183 static void _cache_cleandefered(void);
186 * The new name cache statistics
188 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
189 #define STATNODE(mode, name, var) \
190 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
191 #define STATNODE_INT(mode, name, var) \
192 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
193 static int numneg; STATNODE_INT(CTLFLAG_RD, numneg, &numneg);
194 static int numcache; STATNODE_INT(CTLFLAG_RD, numcache, &numcache);
195 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
196 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
197 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
198 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
199 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
200 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
201 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
202 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
203 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
204 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
206 struct nchstats nchstats[SMP_MAXCPU];
208 * Export VFS cache effectiveness statistics to user-land.
210 * The statistics are left for aggregation to user-land so
211 * neat things can be achieved, like observing per-CPU cache
215 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
217 struct globaldata *gd;
221 for (i = 0; i < ncpus; ++i) {
222 gd = globaldata_find(i);
223 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
224 sizeof(struct nchstats))))
230 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
231 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
233 static struct namecache *cache_zap(struct namecache *ncp, int nonblock);
236 * Namespace locking. The caller must already hold a reference to the
237 * namecache structure in order to lock/unlock it. This function prevents
238 * the namespace from being created or destroyed by accessors other then
241 * Note that holding a locked namecache structure prevents other threads
242 * from making namespace changes (e.g. deleting or creating), prevents
243 * vnode association state changes by other threads, and prevents the
244 * namecache entry from being resolved or unresolved by other threads.
246 * The lock owner has full authority to associate/disassociate vnodes
247 * and resolve/unresolve the locked ncp.
249 * The primary lock field is nc_exlocks. nc_locktd is set after the
250 * fact (when locking) or cleared prior to unlocking.
252 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
253 * or recycled, but it does NOT help you if the vnode had already
254 * initiated a recyclement. If this is important, use cache_get()
255 * rather then cache_lock() (and deal with the differences in the
256 * way the refs counter is handled). Or, alternatively, make an
257 * unconditional call to cache_validate() or cache_resolve()
258 * after cache_lock() returns.
264 _cache_lock(struct namecache *ncp)
271 KKASSERT(ncp->nc_refs != 0);
276 count = ncp->nc_exlocks;
279 if (atomic_cmpset_int(&ncp->nc_exlocks, 0, 1)) {
281 * The vp associated with a locked ncp must
282 * be held to prevent it from being recycled.
284 * WARNING! If VRECLAIMED is set the vnode
285 * could already be in the middle of a recycle.
286 * Callers must use cache_vref() or
287 * cache_vget() on the locked ncp to
288 * validate the vp or set the cache entry
291 * NOTE! vhold() is allowed if we hold a
292 * lock on the ncp (which we do).
296 vhold(ncp->nc_vp); /* MPSAFE */
302 if (ncp->nc_locktd == td) {
303 if (atomic_cmpset_int(&ncp->nc_exlocks, count,
310 tsleep_interlock(ncp, 0);
311 if (atomic_cmpset_int(&ncp->nc_exlocks, count,
312 count | NC_EXLOCK_REQ) == 0) {
316 error = tsleep(ncp, PINTERLOCKED, "clock", nclockwarn);
317 if (error == EWOULDBLOCK) {
320 kprintf("[diagnostic] cache_lock: blocked "
323 kprintf(" \"%*.*s\"\n",
324 ncp->nc_nlen, ncp->nc_nlen,
330 kprintf("[diagnostic] cache_lock: unblocked %*.*s after "
332 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name,
333 (int)(ticks - didwarn) / hz);
338 * NOTE: nc_refs may be zero if the ncp is interlocked by circumstance,
339 * such as the case where one of its children is locked.
345 _cache_lock_nonblock(struct namecache *ncp)
353 count = ncp->nc_exlocks;
356 if (atomic_cmpset_int(&ncp->nc_exlocks, 0, 1)) {
358 * The vp associated with a locked ncp must
359 * be held to prevent it from being recycled.
361 * WARNING! If VRECLAIMED is set the vnode
362 * could already be in the middle of a recycle.
363 * Callers must use cache_vref() or
364 * cache_vget() on the locked ncp to
365 * validate the vp or set the cache entry
368 * NOTE! vhold() is allowed if we hold a
369 * lock on the ncp (which we do).
373 vhold(ncp->nc_vp); /* MPSAFE */
379 if (ncp->nc_locktd == td) {
380 if (atomic_cmpset_int(&ncp->nc_exlocks, count,
395 * NOTE: nc_refs can be 0 (degenerate case during _cache_drop).
397 * nc_locktd must be NULLed out prior to nc_exlocks getting cleared.
403 _cache_unlock(struct namecache *ncp)
405 thread_t td __debugvar = curthread;
408 KKASSERT(ncp->nc_refs >= 0);
409 KKASSERT(ncp->nc_exlocks > 0);
410 KKASSERT(ncp->nc_locktd == td);
412 count = ncp->nc_exlocks;
413 if ((count & ~NC_EXLOCK_REQ) == 1) {
414 ncp->nc_locktd = NULL;
419 if ((count & ~NC_EXLOCK_REQ) == 1) {
420 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 0)) {
421 if (count & NC_EXLOCK_REQ)
426 if (atomic_cmpset_int(&ncp->nc_exlocks, count,
431 count = ncp->nc_exlocks;
437 * cache_hold() and cache_drop() prevent the premature deletion of a
438 * namecache entry but do not prevent operations (such as zapping) on
439 * that namecache entry.
441 * This routine may only be called from outside this source module if
442 * nc_refs is already at least 1.
444 * This is a rare case where callers are allowed to hold a spinlock,
445 * so we can't ourselves.
451 _cache_hold(struct namecache *ncp)
453 atomic_add_int(&ncp->nc_refs, 1);
458 * Drop a cache entry, taking care to deal with races.
460 * For potential 1->0 transitions we must hold the ncp lock to safely
461 * test its flags. An unresolved entry with no children must be zapped
464 * The call to cache_zap() itself will handle all remaining races and
465 * will decrement the ncp's refs regardless. If we are resolved or
466 * have children nc_refs can safely be dropped to 0 without having to
469 * NOTE: cache_zap() will re-check nc_refs and nc_list in a MPSAFE fashion.
471 * NOTE: cache_zap() may return a non-NULL referenced parent which must
472 * be dropped in a loop.
478 _cache_drop(struct namecache *ncp)
483 KKASSERT(ncp->nc_refs > 0);
487 if (_cache_lock_nonblock(ncp) == 0) {
488 ncp->nc_flag &= ~NCF_DEFEREDZAP;
489 if ((ncp->nc_flag & NCF_UNRESOLVED) &&
490 TAILQ_EMPTY(&ncp->nc_list)) {
491 ncp = cache_zap(ncp, 1);
494 if (atomic_cmpset_int(&ncp->nc_refs, 1, 0)) {
501 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1))
509 * Link a new namecache entry to its parent and to the hash table. Be
510 * careful to avoid races if vhold() blocks in the future.
512 * Both ncp and par must be referenced and locked.
514 * NOTE: The hash table spinlock is likely held during this call, we
515 * can't do anything fancy.
520 _cache_link_parent(struct namecache *ncp, struct namecache *par,
521 struct nchash_head *nchpp)
523 KKASSERT(ncp->nc_parent == NULL);
524 ncp->nc_parent = par;
525 ncp->nc_head = nchpp;
528 * Set inheritance flags. Note that the parent flags may be
529 * stale due to getattr potentially not having been run yet
530 * (it gets run during nlookup()'s).
532 ncp->nc_flag &= ~(NCF_SF_PNOCACHE | NCF_UF_PCACHE);
533 if (par->nc_flag & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE))
534 ncp->nc_flag |= NCF_SF_PNOCACHE;
535 if (par->nc_flag & (NCF_UF_CACHE | NCF_UF_PCACHE))
536 ncp->nc_flag |= NCF_UF_PCACHE;
538 LIST_INSERT_HEAD(&nchpp->list, ncp, nc_hash);
540 if (TAILQ_EMPTY(&par->nc_list)) {
541 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
543 * Any vp associated with an ncp which has children must
544 * be held to prevent it from being recycled.
549 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
554 * Remove the parent and hash associations from a namecache structure.
555 * If this is the last child of the parent the cache_drop(par) will
556 * attempt to recursively zap the parent.
558 * ncp must be locked. This routine will acquire a temporary lock on
559 * the parent as wlel as the appropriate hash chain.
564 _cache_unlink_parent(struct namecache *ncp)
566 struct namecache *par;
567 struct vnode *dropvp;
569 if ((par = ncp->nc_parent) != NULL) {
570 KKASSERT(ncp->nc_parent == par);
573 spin_lock(&ncp->nc_head->spin);
574 LIST_REMOVE(ncp, nc_hash);
575 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
577 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
579 spin_unlock(&ncp->nc_head->spin);
580 ncp->nc_parent = NULL;
586 * We can only safely vdrop with no spinlocks held.
594 * Allocate a new namecache structure. Most of the code does not require
595 * zero-termination of the string but it makes vop_compat_ncreate() easier.
599 static struct namecache *
600 cache_alloc(int nlen)
602 struct namecache *ncp;
604 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
606 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK);
608 ncp->nc_flag = NCF_UNRESOLVED;
609 ncp->nc_error = ENOTCONN; /* needs to be resolved */
612 TAILQ_INIT(&ncp->nc_list);
618 * Can only be called for the case where the ncp has never been
619 * associated with anything (so no spinlocks are needed).
624 _cache_free(struct namecache *ncp)
626 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1);
628 kfree(ncp->nc_name, M_VFSCACHE);
629 kfree(ncp, M_VFSCACHE);
636 cache_zero(struct nchandle *nch)
643 * Ref and deref a namecache structure.
645 * The caller must specify a stable ncp pointer, typically meaning the
646 * ncp is already referenced but this can also occur indirectly through
647 * e.g. holding a lock on a direct child.
649 * WARNING: Caller may hold an unrelated read spinlock, which means we can't
650 * use read spinlocks here.
655 cache_hold(struct nchandle *nch)
657 _cache_hold(nch->ncp);
658 atomic_add_int(&nch->mount->mnt_refs, 1);
663 * Create a copy of a namecache handle for an already-referenced
669 cache_copy(struct nchandle *nch, struct nchandle *target)
673 _cache_hold(target->ncp);
674 atomic_add_int(&nch->mount->mnt_refs, 1);
681 cache_changemount(struct nchandle *nch, struct mount *mp)
683 atomic_add_int(&nch->mount->mnt_refs, -1);
685 atomic_add_int(&nch->mount->mnt_refs, 1);
692 cache_drop(struct nchandle *nch)
694 atomic_add_int(&nch->mount->mnt_refs, -1);
695 _cache_drop(nch->ncp);
704 cache_lock(struct nchandle *nch)
706 _cache_lock(nch->ncp);
710 * Relock nch1 given an unlocked nch1 and a locked nch2. The caller
711 * is responsible for checking both for validity on return as they
712 * may have become invalid.
714 * We have to deal with potential deadlocks here, just ping pong
715 * the lock until we get it (we will always block somewhere when
716 * looping so this is not cpu-intensive).
718 * which = 0 nch1 not locked, nch2 is locked
719 * which = 1 nch1 is locked, nch2 is not locked
722 cache_relock(struct nchandle *nch1, struct ucred *cred1,
723 struct nchandle *nch2, struct ucred *cred2)
731 if (cache_lock_nonblock(nch1) == 0) {
732 cache_resolve(nch1, cred1);
737 cache_resolve(nch1, cred1);
740 if (cache_lock_nonblock(nch2) == 0) {
741 cache_resolve(nch2, cred2);
746 cache_resolve(nch2, cred2);
756 cache_lock_nonblock(struct nchandle *nch)
758 return(_cache_lock_nonblock(nch->ncp));
766 cache_unlock(struct nchandle *nch)
768 _cache_unlock(nch->ncp);
772 * ref-and-lock, unlock-and-deref functions.
774 * This function is primarily used by nlookup. Even though cache_lock
775 * holds the vnode, it is possible that the vnode may have already
776 * initiated a recyclement.
778 * We want cache_get() to return a definitively usable vnode or a
779 * definitively unresolved ncp.
785 _cache_get(struct namecache *ncp)
789 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
790 _cache_setunresolved(ncp);
795 * This is a special form of _cache_lock() which only succeeds if
796 * it can get a pristine, non-recursive lock. The caller must have
797 * already ref'd the ncp.
799 * On success the ncp will be locked, on failure it will not. The
800 * ref count does not change either way.
802 * We want _cache_lock_special() (on success) to return a definitively
803 * usable vnode or a definitively unresolved ncp.
808 _cache_lock_special(struct namecache *ncp)
810 if (_cache_lock_nonblock(ncp) == 0) {
811 if ((ncp->nc_exlocks & ~NC_EXLOCK_REQ) == 1) {
812 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
813 _cache_setunresolved(ncp);
823 * NOTE: The same nchandle can be passed for both arguments.
828 cache_get(struct nchandle *nch, struct nchandle *target)
830 KKASSERT(nch->ncp->nc_refs > 0);
831 target->mount = nch->mount;
832 target->ncp = _cache_get(nch->ncp);
833 atomic_add_int(&target->mount->mnt_refs, 1);
841 _cache_put(struct namecache *ncp)
851 cache_put(struct nchandle *nch)
853 atomic_add_int(&nch->mount->mnt_refs, -1);
854 _cache_put(nch->ncp);
860 * Resolve an unresolved ncp by associating a vnode with it. If the
861 * vnode is NULL, a negative cache entry is created.
863 * The ncp should be locked on entry and will remain locked on return.
869 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp)
871 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
875 * Any vp associated with an ncp which has children must
876 * be held. Any vp associated with a locked ncp must be held.
878 if (!TAILQ_EMPTY(&ncp->nc_list))
880 spin_lock(&vp->v_spinlock);
882 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
883 spin_unlock(&vp->v_spinlock);
888 * Set auxiliary flags
892 ncp->nc_flag |= NCF_ISDIR;
895 ncp->nc_flag |= NCF_ISSYMLINK;
896 /* XXX cache the contents of the symlink */
901 atomic_add_int(&numcache, 1);
905 * When creating a negative cache hit we set the
906 * namecache_gen. A later resolve will clean out the
907 * negative cache hit if the mount point's namecache_gen
908 * has changed. Used by devfs, could also be used by
913 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
915 spin_unlock(&ncspin);
916 ncp->nc_error = ENOENT;
918 ncp->nc_namecache_gen = mp->mnt_namecache_gen;
920 ncp->nc_flag &= ~(NCF_UNRESOLVED | NCF_DEFEREDZAP);
927 cache_setvp(struct nchandle *nch, struct vnode *vp)
929 _cache_setvp(nch->mount, nch->ncp, vp);
936 cache_settimeout(struct nchandle *nch, int nticks)
938 struct namecache *ncp = nch->ncp;
940 if ((ncp->nc_timeout = ticks + nticks) == 0)
945 * Disassociate the vnode or negative-cache association and mark a
946 * namecache entry as unresolved again. Note that the ncp is still
947 * left in the hash table and still linked to its parent.
949 * The ncp should be locked and refd on entry and will remain locked and refd
952 * This routine is normally never called on a directory containing children.
953 * However, NFS often does just that in its rename() code as a cop-out to
954 * avoid complex namespace operations. This disconnects a directory vnode
955 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
962 _cache_setunresolved(struct namecache *ncp)
966 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
967 ncp->nc_flag |= NCF_UNRESOLVED;
969 ncp->nc_error = ENOTCONN;
970 if ((vp = ncp->nc_vp) != NULL) {
971 atomic_add_int(&numcache, -1);
972 spin_lock(&vp->v_spinlock);
974 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
975 spin_unlock(&vp->v_spinlock);
978 * Any vp associated with an ncp with children is
979 * held by that ncp. Any vp associated with a locked
980 * ncp is held by that ncp. These conditions must be
981 * undone when the vp is cleared out from the ncp.
983 if (!TAILQ_EMPTY(&ncp->nc_list))
989 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
991 spin_unlock(&ncspin);
993 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK);
998 * The cache_nresolve() code calls this function to automatically
999 * set a resolved cache element to unresolved if it has timed out
1000 * or if it is a negative cache hit and the mount point namecache_gen
1005 static __inline void
1006 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp)
1009 * Already in an unresolved state, nothing to do.
1011 if (ncp->nc_flag & NCF_UNRESOLVED)
1015 * Try to zap entries that have timed out. We have
1016 * to be careful here because locked leafs may depend
1017 * on the vnode remaining intact in a parent, so only
1018 * do this under very specific conditions.
1020 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 &&
1021 TAILQ_EMPTY(&ncp->nc_list)) {
1022 _cache_setunresolved(ncp);
1027 * If a resolved negative cache hit is invalid due to
1028 * the mount's namecache generation being bumped, zap it.
1030 if (ncp->nc_vp == NULL &&
1031 ncp->nc_namecache_gen != mp->mnt_namecache_gen) {
1032 _cache_setunresolved(ncp);
1041 cache_setunresolved(struct nchandle *nch)
1043 _cache_setunresolved(nch->ncp);
1047 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
1048 * looking for matches. This flag tells the lookup code when it must
1049 * check for a mount linkage and also prevents the directories in question
1050 * from being deleted or renamed.
1056 cache_clrmountpt_callback(struct mount *mp, void *data)
1058 struct nchandle *nch = data;
1060 if (mp->mnt_ncmounton.ncp == nch->ncp)
1062 if (mp->mnt_ncmountpt.ncp == nch->ncp)
1071 cache_clrmountpt(struct nchandle *nch)
1075 count = mountlist_scan(cache_clrmountpt_callback, nch,
1076 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
1078 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT;
1082 * Invalidate portions of the namecache topology given a starting entry.
1083 * The passed ncp is set to an unresolved state and:
1085 * The passed ncp must be referencxed and locked. The routine may unlock
1086 * and relock ncp several times, and will recheck the children and loop
1087 * to catch races. When done the passed ncp will be returned with the
1088 * reference and lock intact.
1090 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
1091 * that the physical underlying nodes have been
1092 * destroyed... as in deleted. For example, when
1093 * a directory is removed. This will cause record
1094 * lookups on the name to no longer be able to find
1095 * the record and tells the resolver to return failure
1096 * rather then trying to resolve through the parent.
1098 * The topology itself, including ncp->nc_name,
1101 * This only applies to the passed ncp, if CINV_CHILDREN
1102 * is specified the children are not flagged.
1104 * CINV_CHILDREN - Set all children (recursively) to an unresolved
1107 * Note that this will also have the side effect of
1108 * cleaning out any unreferenced nodes in the topology
1109 * from the leaves up as the recursion backs out.
1111 * Note that the topology for any referenced nodes remains intact, but
1112 * the nodes will be marked as having been destroyed and will be set
1113 * to an unresolved state.
1115 * It is possible for cache_inval() to race a cache_resolve(), meaning that
1116 * the namecache entry may not actually be invalidated on return if it was
1117 * revalidated while recursing down into its children. This code guarentees
1118 * that the node(s) will go through an invalidation cycle, but does not
1119 * guarentee that they will remain in an invalidated state.
1121 * Returns non-zero if a revalidation was detected during the invalidation
1122 * recursion, zero otherwise. Note that since only the original ncp is
1123 * locked the revalidation ultimately can only indicate that the original ncp
1124 * *MIGHT* no have been reresolved.
1126 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
1127 * have to avoid blowing out the kernel stack. We do this by saving the
1128 * deep namecache node and aborting the recursion, then re-recursing at that
1129 * node using a depth-first algorithm in order to allow multiple deep
1130 * recursions to chain through each other, then we restart the invalidation
1137 struct namecache *resume_ncp;
1141 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *);
1145 _cache_inval(struct namecache *ncp, int flags)
1147 struct cinvtrack track;
1148 struct namecache *ncp2;
1152 track.resume_ncp = NULL;
1155 r = _cache_inval_internal(ncp, flags, &track);
1156 if (track.resume_ncp == NULL)
1158 kprintf("Warning: deep namecache recursion at %s\n",
1161 while ((ncp2 = track.resume_ncp) != NULL) {
1162 track.resume_ncp = NULL;
1164 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY,
1174 cache_inval(struct nchandle *nch, int flags)
1176 return(_cache_inval(nch->ncp, flags));
1180 * Helper for _cache_inval(). The passed ncp is refd and locked and
1181 * remains that way on return, but may be unlocked/relocked multiple
1182 * times by the routine.
1185 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track)
1187 struct namecache *kid;
1188 struct namecache *nextkid;
1191 KKASSERT(ncp->nc_exlocks);
1193 _cache_setunresolved(ncp);
1194 if (flags & CINV_DESTROY)
1195 ncp->nc_flag |= NCF_DESTROYED;
1196 if ((flags & CINV_CHILDREN) &&
1197 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL
1200 if (++track->depth > MAX_RECURSION_DEPTH) {
1201 track->resume_ncp = ncp;
1207 if (track->resume_ncp) {
1211 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
1212 _cache_hold(nextkid);
1213 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
1214 TAILQ_FIRST(&kid->nc_list)
1217 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track);
1228 * Someone could have gotten in there while ncp was unlocked,
1231 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1237 * Invalidate a vnode's namecache associations. To avoid races against
1238 * the resolver we do not invalidate a node which we previously invalidated
1239 * but which was then re-resolved while we were in the invalidation loop.
1241 * Returns non-zero if any namecache entries remain after the invalidation
1244 * NOTE: Unlike the namecache topology which guarentees that ncp's will not
1245 * be ripped out of the topology while held, the vnode's v_namecache
1246 * list has no such restriction. NCP's can be ripped out of the list
1247 * at virtually any time if not locked, even if held.
1249 * In addition, the v_namecache list itself must be locked via
1250 * the vnode's spinlock.
1255 cache_inval_vp(struct vnode *vp, int flags)
1257 struct namecache *ncp;
1258 struct namecache *next;
1261 spin_lock(&vp->v_spinlock);
1262 ncp = TAILQ_FIRST(&vp->v_namecache);
1266 /* loop entered with ncp held and vp spin-locked */
1267 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1269 spin_unlock(&vp->v_spinlock);
1271 if (ncp->nc_vp != vp) {
1272 kprintf("Warning: cache_inval_vp: race-A detected on "
1273 "%s\n", ncp->nc_name);
1279 _cache_inval(ncp, flags);
1280 _cache_put(ncp); /* also releases reference */
1282 spin_lock(&vp->v_spinlock);
1283 if (ncp && ncp->nc_vp != vp) {
1284 spin_unlock(&vp->v_spinlock);
1285 kprintf("Warning: cache_inval_vp: race-B detected on "
1286 "%s\n", ncp->nc_name);
1291 spin_unlock(&vp->v_spinlock);
1292 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1296 * This routine is used instead of the normal cache_inval_vp() when we
1297 * are trying to recycle otherwise good vnodes.
1299 * Return 0 on success, non-zero if not all namecache records could be
1300 * disassociated from the vnode (for various reasons).
1305 cache_inval_vp_nonblock(struct vnode *vp)
1307 struct namecache *ncp;
1308 struct namecache *next;
1310 spin_lock(&vp->v_spinlock);
1311 ncp = TAILQ_FIRST(&vp->v_namecache);
1315 /* loop entered with ncp held */
1316 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1318 spin_unlock(&vp->v_spinlock);
1319 if (_cache_lock_nonblock(ncp)) {
1325 if (ncp->nc_vp != vp) {
1326 kprintf("Warning: cache_inval_vp: race-A detected on "
1327 "%s\n", ncp->nc_name);
1333 _cache_inval(ncp, 0);
1334 _cache_put(ncp); /* also releases reference */
1336 spin_lock(&vp->v_spinlock);
1337 if (ncp && ncp->nc_vp != vp) {
1338 spin_unlock(&vp->v_spinlock);
1339 kprintf("Warning: cache_inval_vp: race-B detected on "
1340 "%s\n", ncp->nc_name);
1345 spin_unlock(&vp->v_spinlock);
1347 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1351 * The source ncp has been renamed to the target ncp. Both fncp and tncp
1352 * must be locked. The target ncp is destroyed (as a normal rename-over
1353 * would destroy the target file or directory).
1355 * Because there may be references to the source ncp we cannot copy its
1356 * contents to the target. Instead the source ncp is relinked as the target
1357 * and the target ncp is removed from the namecache topology.
1362 cache_rename(struct nchandle *fnch, struct nchandle *tnch)
1364 struct namecache *fncp = fnch->ncp;
1365 struct namecache *tncp = tnch->ncp;
1366 struct namecache *tncp_par;
1367 struct nchash_head *nchpp;
1372 * Rename fncp (unlink)
1374 _cache_unlink_parent(fncp);
1375 oname = fncp->nc_name;
1376 fncp->nc_name = tncp->nc_name;
1377 fncp->nc_nlen = tncp->nc_nlen;
1378 tncp_par = tncp->nc_parent;
1379 _cache_hold(tncp_par);
1380 _cache_lock(tncp_par);
1383 * Rename fncp (relink)
1385 hash = fnv_32_buf(fncp->nc_name, fncp->nc_nlen, FNV1_32_INIT);
1386 hash = fnv_32_buf(&tncp_par, sizeof(tncp_par), hash);
1387 nchpp = NCHHASH(hash);
1389 spin_lock(&nchpp->spin);
1390 _cache_link_parent(fncp, tncp_par, nchpp);
1391 spin_unlock(&nchpp->spin);
1393 _cache_put(tncp_par);
1396 * Get rid of the overwritten tncp (unlink)
1398 _cache_setunresolved(tncp);
1399 _cache_unlink_parent(tncp);
1400 tncp->nc_name = NULL;
1404 kfree(oname, M_VFSCACHE);
1408 * vget the vnode associated with the namecache entry. Resolve the namecache
1409 * entry if necessary. The passed ncp must be referenced and locked.
1411 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
1412 * (depending on the passed lk_type) will be returned in *vpp with an error
1413 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
1414 * most typical error is ENOENT, meaning that the ncp represents a negative
1415 * cache hit and there is no vnode to retrieve, but other errors can occur
1418 * The vget() can race a reclaim. If this occurs we re-resolve the
1421 * There are numerous places in the kernel where vget() is called on a
1422 * vnode while one or more of its namecache entries is locked. Releasing
1423 * a vnode never deadlocks against locked namecache entries (the vnode
1424 * will not get recycled while referenced ncp's exist). This means we
1425 * can safely acquire the vnode. In fact, we MUST NOT release the ncp
1426 * lock when acquiring the vp lock or we might cause a deadlock.
1431 cache_vget(struct nchandle *nch, struct ucred *cred,
1432 int lk_type, struct vnode **vpp)
1434 struct namecache *ncp;
1439 KKASSERT(ncp->nc_locktd == curthread);
1442 if (ncp->nc_flag & NCF_UNRESOLVED)
1443 error = cache_resolve(nch, cred);
1447 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
1448 error = vget(vp, lk_type);
1453 if (error == ENOENT) {
1454 kprintf("Warning: vnode reclaim race detected "
1455 "in cache_vget on %p (%s)\n",
1457 _cache_setunresolved(ncp);
1462 * Not a reclaim race, some other error.
1464 KKASSERT(ncp->nc_vp == vp);
1467 KKASSERT(ncp->nc_vp == vp);
1468 KKASSERT((vp->v_flag & VRECLAIMED) == 0);
1471 if (error == 0 && vp == NULL)
1478 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp)
1480 struct namecache *ncp;
1485 KKASSERT(ncp->nc_locktd == curthread);
1488 if (ncp->nc_flag & NCF_UNRESOLVED)
1489 error = cache_resolve(nch, cred);
1493 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
1494 error = vget(vp, LK_SHARED);
1499 if (error == ENOENT) {
1500 kprintf("Warning: vnode reclaim race detected "
1501 "in cache_vget on %p (%s)\n",
1503 _cache_setunresolved(ncp);
1508 * Not a reclaim race, some other error.
1510 KKASSERT(ncp->nc_vp == vp);
1513 KKASSERT(ncp->nc_vp == vp);
1514 KKASSERT((vp->v_flag & VRECLAIMED) == 0);
1515 /* caller does not want a lock */
1519 if (error == 0 && vp == NULL)
1526 * Return a referenced vnode representing the parent directory of
1529 * Because the caller has locked the ncp it should not be possible for
1530 * the parent ncp to go away. However, the parent can unresolve its
1531 * dvp at any time so we must be able to acquire a lock on the parent
1532 * to safely access nc_vp.
1534 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock,
1535 * so use vhold()/vdrop() while holding the lock to prevent dvp from
1536 * getting destroyed.
1538 * MPSAFE - Note vhold() is allowed when dvp has 0 refs if we hold a
1539 * lock on the ncp in question..
1541 static struct vnode *
1542 cache_dvpref(struct namecache *ncp)
1544 struct namecache *par;
1548 if ((par = ncp->nc_parent) != NULL) {
1551 if ((par->nc_flag & NCF_UNRESOLVED) == 0) {
1552 if ((dvp = par->nc_vp) != NULL)
1557 if (vget(dvp, LK_SHARED) == 0) {
1560 /* return refd, unlocked dvp */
1572 * Convert a directory vnode to a namecache record without any other
1573 * knowledge of the topology. This ONLY works with directory vnodes and
1574 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
1575 * returned ncp (if not NULL) will be held and unlocked.
1577 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
1578 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
1579 * for dvp. This will fail only if the directory has been deleted out from
1582 * Callers must always check for a NULL return no matter the value of 'makeit'.
1584 * To avoid underflowing the kernel stack each recursive call increments
1585 * the makeit variable.
1588 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
1589 struct vnode *dvp, char *fakename);
1590 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1591 struct vnode **saved_dvp);
1594 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit,
1595 struct nchandle *nch)
1597 struct vnode *saved_dvp;
1603 nch->mount = dvp->v_mount;
1608 * Handle the makeit == 0 degenerate case
1611 spin_lock(&dvp->v_spinlock);
1612 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
1615 spin_unlock(&dvp->v_spinlock);
1619 * Loop until resolution, inside code will break out on error.
1623 * Break out if we successfully acquire a working ncp.
1625 spin_lock(&dvp->v_spinlock);
1626 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
1629 spin_unlock(&dvp->v_spinlock);
1632 spin_unlock(&dvp->v_spinlock);
1635 * If dvp is the root of its filesystem it should already
1636 * have a namecache pointer associated with it as a side
1637 * effect of the mount, but it may have been disassociated.
1639 if (dvp->v_flag & VROOT) {
1640 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp);
1641 error = cache_resolve_mp(nch->mount);
1642 _cache_put(nch->ncp);
1644 kprintf("cache_fromdvp: resolve root of mount %p error %d",
1645 dvp->v_mount, error);
1649 kprintf(" failed\n");
1654 kprintf(" succeeded\n");
1659 * If we are recursed too deeply resort to an O(n^2)
1660 * algorithm to resolve the namecache topology. The
1661 * resolved pvp is left referenced in saved_dvp to
1662 * prevent the tree from being destroyed while we loop.
1665 error = cache_fromdvp_try(dvp, cred, &saved_dvp);
1667 kprintf("lookupdotdot(longpath) failed %d "
1668 "dvp %p\n", error, dvp);
1676 * Get the parent directory and resolve its ncp.
1679 kfree(fakename, M_TEMP);
1682 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
1685 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp);
1691 * Reuse makeit as a recursion depth counter. On success
1692 * nch will be fully referenced.
1694 cache_fromdvp(pvp, cred, makeit + 1, nch);
1696 if (nch->ncp == NULL)
1700 * Do an inefficient scan of pvp (embodied by ncp) to look
1701 * for dvp. This will create a namecache record for dvp on
1702 * success. We loop up to recheck on success.
1704 * ncp and dvp are both held but not locked.
1706 error = cache_inefficient_scan(nch, cred, dvp, fakename);
1708 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
1709 pvp, nch->ncp->nc_name, dvp);
1711 /* nch was NULLed out, reload mount */
1712 nch->mount = dvp->v_mount;
1716 kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
1717 pvp, nch->ncp->nc_name);
1720 /* nch was NULLed out, reload mount */
1721 nch->mount = dvp->v_mount;
1725 * If nch->ncp is non-NULL it will have been held already.
1728 kfree(fakename, M_TEMP);
1737 * Go up the chain of parent directories until we find something
1738 * we can resolve into the namecache. This is very inefficient.
1742 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1743 struct vnode **saved_dvp)
1745 struct nchandle nch;
1748 static time_t last_fromdvp_report;
1752 * Loop getting the parent directory vnode until we get something we
1753 * can resolve in the namecache.
1756 nch.mount = dvp->v_mount;
1762 kfree(fakename, M_TEMP);
1765 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
1772 spin_lock(&pvp->v_spinlock);
1773 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
1774 _cache_hold(nch.ncp);
1775 spin_unlock(&pvp->v_spinlock);
1779 spin_unlock(&pvp->v_spinlock);
1780 if (pvp->v_flag & VROOT) {
1781 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp);
1782 error = cache_resolve_mp(nch.mount);
1783 _cache_unlock(nch.ncp);
1786 _cache_drop(nch.ncp);
1796 if (last_fromdvp_report != time_second) {
1797 last_fromdvp_report = time_second;
1798 kprintf("Warning: extremely inefficient path "
1799 "resolution on %s\n",
1802 error = cache_inefficient_scan(&nch, cred, dvp, fakename);
1805 * Hopefully dvp now has a namecache record associated with
1806 * it. Leave it referenced to prevent the kernel from
1807 * recycling the vnode. Otherwise extremely long directory
1808 * paths could result in endless recycling.
1813 _cache_drop(nch.ncp);
1816 kfree(fakename, M_TEMP);
1821 * Do an inefficient scan of the directory represented by ncp looking for
1822 * the directory vnode dvp. ncp must be held but not locked on entry and
1823 * will be held on return. dvp must be refd but not locked on entry and
1824 * will remain refd on return.
1826 * Why do this at all? Well, due to its stateless nature the NFS server
1827 * converts file handles directly to vnodes without necessarily going through
1828 * the namecache ops that would otherwise create the namecache topology
1829 * leading to the vnode. We could either (1) Change the namecache algorithms
1830 * to allow disconnect namecache records that are re-merged opportunistically,
1831 * or (2) Make the NFS server backtrack and scan to recover a connected
1832 * namecache topology in order to then be able to issue new API lookups.
1834 * It turns out that (1) is a huge mess. It takes a nice clean set of
1835 * namecache algorithms and introduces a lot of complication in every subsystem
1836 * that calls into the namecache to deal with the re-merge case, especially
1837 * since we are using the namecache to placehold negative lookups and the
1838 * vnode might not be immediately assigned. (2) is certainly far less
1839 * efficient then (1), but since we are only talking about directories here
1840 * (which are likely to remain cached), the case does not actually run all
1841 * that often and has the supreme advantage of not polluting the namecache
1844 * If a fakename is supplied just construct a namecache entry using the
1848 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
1849 struct vnode *dvp, char *fakename)
1851 struct nlcomponent nlc;
1852 struct nchandle rncp;
1864 vat.va_blocksize = 0;
1865 if ((error = VOP_GETATTR(dvp, &vat)) != 0)
1868 error = cache_vref(nch, cred, &pvp);
1873 kprintf("inefficient_scan: directory iosize %ld "
1874 "vattr fileid = %lld\n",
1876 (long long)vat.va_fileid);
1880 * Use the supplied fakename if not NULL. Fake names are typically
1881 * not in the actual filesystem hierarchy. This is used by HAMMER
1882 * to glue @@timestamp recursions together.
1885 nlc.nlc_nameptr = fakename;
1886 nlc.nlc_namelen = strlen(fakename);
1887 rncp = cache_nlookup(nch, &nlc);
1891 if ((blksize = vat.va_blocksize) == 0)
1892 blksize = DEV_BSIZE;
1893 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK);
1899 iov.iov_base = rbuf;
1900 iov.iov_len = blksize;
1903 uio.uio_resid = blksize;
1904 uio.uio_segflg = UIO_SYSSPACE;
1905 uio.uio_rw = UIO_READ;
1906 uio.uio_td = curthread;
1908 if (ncvp_debug >= 2)
1909 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
1910 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
1912 den = (struct dirent *)rbuf;
1913 bytes = blksize - uio.uio_resid;
1916 if (ncvp_debug >= 2) {
1917 kprintf("cache_inefficient_scan: %*.*s\n",
1918 den->d_namlen, den->d_namlen,
1921 if (den->d_type != DT_WHT &&
1922 den->d_ino == vat.va_fileid) {
1924 kprintf("cache_inefficient_scan: "
1925 "MATCHED inode %lld path %s/%*.*s\n",
1926 (long long)vat.va_fileid,
1928 den->d_namlen, den->d_namlen,
1931 nlc.nlc_nameptr = den->d_name;
1932 nlc.nlc_namelen = den->d_namlen;
1933 rncp = cache_nlookup(nch, &nlc);
1934 KKASSERT(rncp.ncp != NULL);
1937 bytes -= _DIRENT_DIRSIZ(den);
1938 den = _DIRENT_NEXT(den);
1940 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
1943 kfree(rbuf, M_TEMP);
1947 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) {
1948 _cache_setvp(rncp.mount, rncp.ncp, dvp);
1949 if (ncvp_debug >= 2) {
1950 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
1951 nch->ncp->nc_name, rncp.ncp->nc_name, dvp);
1954 if (ncvp_debug >= 2) {
1955 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
1956 nch->ncp->nc_name, rncp.ncp->nc_name, dvp,
1960 if (rncp.ncp->nc_vp == NULL)
1961 error = rncp.ncp->nc_error;
1963 * Release rncp after a successful nlookup. rncp was fully
1968 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
1969 dvp, nch->ncp->nc_name);
1976 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
1977 * state, which disassociates it from its vnode or ncneglist.
1979 * Then, if there are no additional references to the ncp and no children,
1980 * the ncp is removed from the topology and destroyed.
1982 * References and/or children may exist if the ncp is in the middle of the
1983 * topology, preventing the ncp from being destroyed.
1985 * This function must be called with the ncp held and locked and will unlock
1986 * and drop it during zapping.
1988 * If nonblock is non-zero and the parent ncp cannot be locked we give up.
1989 * This case can occur in the cache_drop() path.
1991 * This function may returned a held (but NOT locked) parent node which the
1992 * caller must drop. We do this so _cache_drop() can loop, to avoid
1993 * blowing out the kernel stack.
1995 * WARNING! For MPSAFE operation this routine must acquire up to three
1996 * spin locks to be able to safely test nc_refs. Lock order is
1999 * hash spinlock if on hash list
2000 * parent spinlock if child of parent
2001 * (the ncp is unresolved so there is no vnode association)
2003 static struct namecache *
2004 cache_zap(struct namecache *ncp, int nonblock)
2006 struct namecache *par;
2007 struct vnode *dropvp;
2011 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
2013 _cache_setunresolved(ncp);
2016 * Try to scrap the entry and possibly tail-recurse on its parent.
2017 * We only scrap unref'd (other then our ref) unresolved entries,
2018 * we do not scrap 'live' entries.
2020 * Note that once the spinlocks are acquired if nc_refs == 1 no
2021 * other references are possible. If it isn't, however, we have
2022 * to decrement but also be sure to avoid a 1->0 transition.
2024 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
2025 KKASSERT(ncp->nc_refs > 0);
2028 * Acquire locks. Note that the parent can't go away while we hold
2031 if ((par = ncp->nc_parent) != NULL) {
2034 if (_cache_lock_nonblock(par) == 0)
2036 refs = ncp->nc_refs;
2037 ncp->nc_flag |= NCF_DEFEREDZAP;
2038 ++numdefered; /* MP race ok */
2039 if (atomic_cmpset_int(&ncp->nc_refs,
2051 spin_lock(&ncp->nc_head->spin);
2055 * If someone other then us has a ref or we have children
2056 * we cannot zap the entry. The 1->0 transition and any
2057 * further list operation is protected by the spinlocks
2058 * we have acquired but other transitions are not.
2061 refs = ncp->nc_refs;
2062 if (refs == 1 && TAILQ_EMPTY(&ncp->nc_list))
2064 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) {
2066 spin_unlock(&ncp->nc_head->spin);
2076 * We are the only ref and with the spinlocks held no further
2077 * refs can be acquired by others.
2079 * Remove us from the hash list and parent list. We have to
2080 * drop a ref on the parent's vp if the parent's list becomes
2085 struct nchash_head *nchpp = ncp->nc_head;
2087 KKASSERT(nchpp != NULL);
2088 LIST_REMOVE(ncp, nc_hash);
2089 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
2090 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
2091 dropvp = par->nc_vp;
2092 ncp->nc_head = NULL;
2093 ncp->nc_parent = NULL;
2094 spin_unlock(&nchpp->spin);
2097 KKASSERT(ncp->nc_head == NULL);
2101 * ncp should not have picked up any refs. Physically
2104 KKASSERT(ncp->nc_refs == 1);
2105 /* _cache_unlock(ncp) not required */
2106 ncp->nc_refs = -1; /* safety */
2108 kfree(ncp->nc_name, M_VFSCACHE);
2109 kfree(ncp, M_VFSCACHE);
2112 * Delayed drop (we had to release our spinlocks)
2114 * The refed parent (if not NULL) must be dropped. The
2115 * caller is responsible for looping.
2123 * Clean up dangling negative cache and defered-drop entries in the
2126 typedef enum { CHI_LOW, CHI_HIGH } cache_hs_t;
2128 static cache_hs_t neg_cache_hysteresis_state = CHI_LOW;
2129 static cache_hs_t pos_cache_hysteresis_state = CHI_LOW;
2132 cache_hysteresis(void)
2137 * Don't cache too many negative hits. We use hysteresis to reduce
2138 * the impact on the critical path.
2140 switch(neg_cache_hysteresis_state) {
2142 if (numneg > MINNEG && numneg * ncnegfactor > numcache) {
2143 _cache_cleanneg(10);
2144 neg_cache_hysteresis_state = CHI_HIGH;
2148 if (numneg > MINNEG * 9 / 10 &&
2149 numneg * ncnegfactor * 9 / 10 > numcache
2151 _cache_cleanneg(10);
2153 neg_cache_hysteresis_state = CHI_LOW;
2159 * Don't cache too many positive hits. We use hysteresis to reduce
2160 * the impact on the critical path.
2162 * Excessive positive hits can accumulate due to large numbers of
2163 * hardlinks (the vnode cache will not prevent hl ncps from growing
2166 if ((poslimit = ncposlimit) == 0)
2167 poslimit = desiredvnodes * 2;
2169 switch(pos_cache_hysteresis_state) {
2171 if (numcache > poslimit && numcache > MINPOS) {
2172 _cache_cleanpos(10);
2173 pos_cache_hysteresis_state = CHI_HIGH;
2177 if (numcache > poslimit * 5 / 6 && numcache > MINPOS) {
2178 _cache_cleanpos(10);
2180 pos_cache_hysteresis_state = CHI_LOW;
2186 * Clean out dangling defered-zap ncps which could not
2187 * be cleanly dropped if too many build up. Note
2188 * that numdefered is not an exact number as such ncps
2189 * can be reused and the counter is not handled in a MP
2190 * safe manner by design.
2192 if (numdefered * ncnegfactor > numcache) {
2193 _cache_cleandefered();
2198 * NEW NAMECACHE LOOKUP API
2200 * Lookup an entry in the namecache. The passed par_nch must be referenced
2201 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp
2202 * is ALWAYS returned, eve if the supplied component is illegal.
2204 * The resulting namecache entry should be returned to the system with
2205 * cache_put() or cache_unlock() + cache_drop().
2207 * namecache locks are recursive but care must be taken to avoid lock order
2208 * reversals (hence why the passed par_nch must be unlocked). Locking
2209 * rules are to order for parent traversals, not for child traversals.
2211 * Nobody else will be able to manipulate the associated namespace (e.g.
2212 * create, delete, rename, rename-target) until the caller unlocks the
2215 * The returned entry will be in one of three states: positive hit (non-null
2216 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
2217 * Unresolved entries must be resolved through the filesystem to associate the
2218 * vnode and/or determine whether a positive or negative hit has occured.
2220 * It is not necessary to lock a directory in order to lock namespace under
2221 * that directory. In fact, it is explicitly not allowed to do that. A
2222 * directory is typically only locked when being created, renamed, or
2225 * The directory (par) may be unresolved, in which case any returned child
2226 * will likely also be marked unresolved. Likely but not guarenteed. Since
2227 * the filesystem lookup requires a resolved directory vnode the caller is
2228 * responsible for resolving the namecache chain top-down. This API
2229 * specifically allows whole chains to be created in an unresolved state.
2232 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc)
2234 struct nchandle nch;
2235 struct namecache *ncp;
2236 struct namecache *new_ncp;
2237 struct nchash_head *nchpp;
2245 mp = par_nch->mount;
2249 * This is a good time to call it, no ncp's are locked by
2255 * Try to locate an existing entry
2257 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
2258 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
2260 nchpp = NCHHASH(hash);
2262 spin_lock(&nchpp->spin);
2263 LIST_FOREACH(ncp, &nchpp->list, nc_hash) {
2267 * Break out if we find a matching entry. Note that
2268 * UNRESOLVED entries may match, but DESTROYED entries
2271 if (ncp->nc_parent == par_nch->ncp &&
2272 ncp->nc_nlen == nlc->nlc_namelen &&
2273 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
2274 (ncp->nc_flag & NCF_DESTROYED) == 0
2277 spin_unlock(&nchpp->spin);
2279 _cache_unlock(par_nch->ncp);
2282 if (_cache_lock_special(ncp) == 0) {
2283 _cache_auto_unresolve(mp, ncp);
2285 _cache_free(new_ncp);
2296 * We failed to locate an entry, create a new entry and add it to
2297 * the cache. The parent ncp must also be locked so we
2300 * We have to relookup after possibly blocking in kmalloc or
2301 * when locking par_nch.
2303 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
2304 * mount case, in which case nc_name will be NULL.
2306 if (new_ncp == NULL) {
2307 spin_unlock(&nchpp->spin);
2308 new_ncp = cache_alloc(nlc->nlc_namelen);
2309 if (nlc->nlc_namelen) {
2310 bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
2312 new_ncp->nc_name[nlc->nlc_namelen] = 0;
2316 if (par_locked == 0) {
2317 spin_unlock(&nchpp->spin);
2318 _cache_lock(par_nch->ncp);
2324 * WARNING! We still hold the spinlock. We have to set the hash
2325 * table entry atomically.
2328 _cache_link_parent(ncp, par_nch->ncp, nchpp);
2329 spin_unlock(&nchpp->spin);
2330 _cache_unlock(par_nch->ncp);
2331 /* par_locked = 0 - not used */
2334 * stats and namecache size management
2336 if (ncp->nc_flag & NCF_UNRESOLVED)
2337 ++gd->gd_nchstats->ncs_miss;
2338 else if (ncp->nc_vp)
2339 ++gd->gd_nchstats->ncs_goodhits;
2341 ++gd->gd_nchstats->ncs_neghits;
2344 atomic_add_int(&nch.mount->mnt_refs, 1);
2349 * This is a non-blocking verison of cache_nlookup() used by
2350 * nfs_readdirplusrpc_uio(). It can fail for any reason and
2351 * will return nch.ncp == NULL in that case.
2354 cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc)
2356 struct nchandle nch;
2357 struct namecache *ncp;
2358 struct namecache *new_ncp;
2359 struct nchash_head *nchpp;
2367 mp = par_nch->mount;
2371 * Try to locate an existing entry
2373 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
2374 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
2376 nchpp = NCHHASH(hash);
2378 spin_lock(&nchpp->spin);
2379 LIST_FOREACH(ncp, &nchpp->list, nc_hash) {
2383 * Break out if we find a matching entry. Note that
2384 * UNRESOLVED entries may match, but DESTROYED entries
2387 if (ncp->nc_parent == par_nch->ncp &&
2388 ncp->nc_nlen == nlc->nlc_namelen &&
2389 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
2390 (ncp->nc_flag & NCF_DESTROYED) == 0
2393 spin_unlock(&nchpp->spin);
2395 _cache_unlock(par_nch->ncp);
2398 if (_cache_lock_special(ncp) == 0) {
2399 _cache_auto_unresolve(mp, ncp);
2401 _cache_free(new_ncp);
2412 * We failed to locate an entry, create a new entry and add it to
2413 * the cache. The parent ncp must also be locked so we
2416 * We have to relookup after possibly blocking in kmalloc or
2417 * when locking par_nch.
2419 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
2420 * mount case, in which case nc_name will be NULL.
2422 if (new_ncp == NULL) {
2423 spin_unlock(&nchpp->spin);
2424 new_ncp = cache_alloc(nlc->nlc_namelen);
2425 if (nlc->nlc_namelen) {
2426 bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
2428 new_ncp->nc_name[nlc->nlc_namelen] = 0;
2432 if (par_locked == 0) {
2433 spin_unlock(&nchpp->spin);
2434 if (_cache_lock_nonblock(par_nch->ncp) == 0) {
2442 * WARNING! We still hold the spinlock. We have to set the hash
2443 * table entry atomically.
2446 _cache_link_parent(ncp, par_nch->ncp, nchpp);
2447 spin_unlock(&nchpp->spin);
2448 _cache_unlock(par_nch->ncp);
2449 /* par_locked = 0 - not used */
2452 * stats and namecache size management
2454 if (ncp->nc_flag & NCF_UNRESOLVED)
2455 ++gd->gd_nchstats->ncs_miss;
2456 else if (ncp->nc_vp)
2457 ++gd->gd_nchstats->ncs_goodhits;
2459 ++gd->gd_nchstats->ncs_neghits;
2462 atomic_add_int(&nch.mount->mnt_refs, 1);
2466 _cache_free(new_ncp);
2475 * The namecache entry is marked as being used as a mount point.
2476 * Locate the mount if it is visible to the caller.
2478 struct findmount_info {
2479 struct mount *result;
2480 struct mount *nch_mount;
2481 struct namecache *nch_ncp;
2486 cache_findmount_callback(struct mount *mp, void *data)
2488 struct findmount_info *info = data;
2491 * Check the mount's mounted-on point against the passed nch.
2493 if (mp->mnt_ncmounton.mount == info->nch_mount &&
2494 mp->mnt_ncmounton.ncp == info->nch_ncp
2503 cache_findmount(struct nchandle *nch)
2505 struct findmount_info info;
2508 info.nch_mount = nch->mount;
2509 info.nch_ncp = nch->ncp;
2510 mountlist_scan(cache_findmount_callback, &info,
2511 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
2512 return(info.result);
2516 * Resolve an unresolved namecache entry, generally by looking it up.
2517 * The passed ncp must be locked and refd.
2519 * Theoretically since a vnode cannot be recycled while held, and since
2520 * the nc_parent chain holds its vnode as long as children exist, the
2521 * direct parent of the cache entry we are trying to resolve should
2522 * have a valid vnode. If not then generate an error that we can
2523 * determine is related to a resolver bug.
2525 * However, if a vnode was in the middle of a recyclement when the NCP
2526 * got locked, ncp->nc_vp might point to a vnode that is about to become
2527 * invalid. cache_resolve() handles this case by unresolving the entry
2528 * and then re-resolving it.
2530 * Note that successful resolution does not necessarily return an error
2531 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
2537 cache_resolve(struct nchandle *nch, struct ucred *cred)
2539 struct namecache *par_tmp;
2540 struct namecache *par;
2541 struct namecache *ncp;
2542 struct nchandle nctmp;
2551 * If the ncp is already resolved we have nothing to do. However,
2552 * we do want to guarentee that a usable vnode is returned when
2553 * a vnode is present, so make sure it hasn't been reclaimed.
2555 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
2556 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
2557 _cache_setunresolved(ncp);
2558 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
2559 return (ncp->nc_error);
2563 * Mount points need special handling because the parent does not
2564 * belong to the same filesystem as the ncp.
2566 if (ncp == mp->mnt_ncmountpt.ncp)
2567 return (cache_resolve_mp(mp));
2570 * We expect an unbroken chain of ncps to at least the mount point,
2571 * and even all the way to root (but this code doesn't have to go
2572 * past the mount point).
2574 if (ncp->nc_parent == NULL) {
2575 kprintf("EXDEV case 1 %p %*.*s\n", ncp,
2576 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
2577 ncp->nc_error = EXDEV;
2578 return(ncp->nc_error);
2582 * The vp's of the parent directories in the chain are held via vhold()
2583 * due to the existance of the child, and should not disappear.
2584 * However, there are cases where they can disappear:
2586 * - due to filesystem I/O errors.
2587 * - due to NFS being stupid about tracking the namespace and
2588 * destroys the namespace for entire directories quite often.
2589 * - due to forced unmounts.
2590 * - due to an rmdir (parent will be marked DESTROYED)
2592 * When this occurs we have to track the chain backwards and resolve
2593 * it, looping until the resolver catches up to the current node. We
2594 * could recurse here but we might run ourselves out of kernel stack
2595 * so we do it in a more painful manner. This situation really should
2596 * not occur all that often, or if it does not have to go back too
2597 * many nodes to resolve the ncp.
2599 while ((dvp = cache_dvpref(ncp)) == NULL) {
2601 * This case can occur if a process is CD'd into a
2602 * directory which is then rmdir'd. If the parent is marked
2603 * destroyed there is no point trying to resolve it.
2605 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
2607 par = ncp->nc_parent;
2610 while ((par_tmp = par->nc_parent) != NULL &&
2611 par_tmp->nc_vp == NULL) {
2612 _cache_hold(par_tmp);
2613 _cache_lock(par_tmp);
2617 if (par->nc_parent == NULL) {
2618 kprintf("EXDEV case 2 %*.*s\n",
2619 par->nc_nlen, par->nc_nlen, par->nc_name);
2623 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
2624 par->nc_nlen, par->nc_nlen, par->nc_name);
2626 * The parent is not set in stone, ref and lock it to prevent
2627 * it from disappearing. Also note that due to renames it
2628 * is possible for our ncp to move and for par to no longer
2629 * be one of its parents. We resolve it anyway, the loop
2630 * will handle any moves.
2632 _cache_get(par); /* additional hold/lock */
2633 _cache_put(par); /* from earlier hold/lock */
2634 if (par == nch->mount->mnt_ncmountpt.ncp) {
2635 cache_resolve_mp(nch->mount);
2636 } else if ((dvp = cache_dvpref(par)) == NULL) {
2637 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
2641 if (par->nc_flag & NCF_UNRESOLVED) {
2644 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
2648 if ((error = par->nc_error) != 0) {
2649 if (par->nc_error != EAGAIN) {
2650 kprintf("EXDEV case 3 %*.*s error %d\n",
2651 par->nc_nlen, par->nc_nlen, par->nc_name,
2656 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
2657 par, par->nc_nlen, par->nc_nlen, par->nc_name);
2664 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
2665 * ncp's and reattach them. If this occurs the original ncp is marked
2666 * EAGAIN to force a relookup.
2668 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
2669 * ncp must already be resolved.
2674 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
2677 ncp->nc_error = EPERM;
2679 if (ncp->nc_error == EAGAIN) {
2680 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
2681 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
2684 return(ncp->nc_error);
2688 * Resolve the ncp associated with a mount point. Such ncp's almost always
2689 * remain resolved and this routine is rarely called. NFS MPs tends to force
2690 * re-resolution more often due to its mac-truck-smash-the-namecache
2691 * method of tracking namespace changes.
2693 * The semantics for this call is that the passed ncp must be locked on
2694 * entry and will be locked on return. However, if we actually have to
2695 * resolve the mount point we temporarily unlock the entry in order to
2696 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
2697 * the unlock we have to recheck the flags after we relock.
2700 cache_resolve_mp(struct mount *mp)
2702 struct namecache *ncp = mp->mnt_ncmountpt.ncp;
2706 KKASSERT(mp != NULL);
2709 * If the ncp is already resolved we have nothing to do. However,
2710 * we do want to guarentee that a usable vnode is returned when
2711 * a vnode is present, so make sure it hasn't been reclaimed.
2713 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
2714 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
2715 _cache_setunresolved(ncp);
2718 if (ncp->nc_flag & NCF_UNRESOLVED) {
2720 while (vfs_busy(mp, 0))
2722 error = VFS_ROOT(mp, &vp);
2726 * recheck the ncp state after relocking.
2728 if (ncp->nc_flag & NCF_UNRESOLVED) {
2729 ncp->nc_error = error;
2731 _cache_setvp(mp, ncp, vp);
2734 kprintf("[diagnostic] cache_resolve_mp: failed"
2735 " to resolve mount %p err=%d ncp=%p\n",
2737 _cache_setvp(mp, ncp, NULL);
2739 } else if (error == 0) {
2744 return(ncp->nc_error);
2748 * Clean out negative cache entries when too many have accumulated.
2753 _cache_cleanneg(int count)
2755 struct namecache *ncp;
2758 * Attempt to clean out the specified number of negative cache
2763 ncp = TAILQ_FIRST(&ncneglist);
2765 spin_unlock(&ncspin);
2768 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
2769 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
2771 spin_unlock(&ncspin);
2772 if (_cache_lock_special(ncp) == 0) {
2773 ncp = cache_zap(ncp, 1);
2784 * Clean out positive cache entries when too many have accumulated.
2789 _cache_cleanpos(int count)
2791 static volatile int rover;
2792 struct nchash_head *nchpp;
2793 struct namecache *ncp;
2797 * Attempt to clean out the specified number of negative cache
2801 rover_copy = ++rover; /* MPSAFEENOUGH */
2802 nchpp = NCHHASH(rover_copy);
2804 spin_lock(&nchpp->spin);
2805 ncp = LIST_FIRST(&nchpp->list);
2808 spin_unlock(&nchpp->spin);
2811 if (_cache_lock_special(ncp) == 0) {
2812 ncp = cache_zap(ncp, 1);
2824 * This is a kitchen sink function to clean out ncps which we
2825 * tried to zap from cache_drop() but failed because we were
2826 * unable to acquire the parent lock.
2828 * Such entries can also be removed via cache_inval_vp(), such
2829 * as when unmounting.
2834 _cache_cleandefered(void)
2836 struct nchash_head *nchpp;
2837 struct namecache *ncp;
2838 struct namecache dummy;
2842 bzero(&dummy, sizeof(dummy));
2843 dummy.nc_flag = NCF_DESTROYED;
2845 for (i = 0; i <= nchash; ++i) {
2846 nchpp = &nchashtbl[i];
2848 spin_lock(&nchpp->spin);
2849 LIST_INSERT_HEAD(&nchpp->list, &dummy, nc_hash);
2851 while ((ncp = LIST_NEXT(ncp, nc_hash)) != NULL) {
2852 if ((ncp->nc_flag & NCF_DEFEREDZAP) == 0)
2854 LIST_REMOVE(&dummy, nc_hash);
2855 LIST_INSERT_AFTER(ncp, &dummy, nc_hash);
2857 spin_unlock(&nchpp->spin);
2858 if (_cache_lock_nonblock(ncp) == 0) {
2859 ncp->nc_flag &= ~NCF_DEFEREDZAP;
2863 spin_lock(&nchpp->spin);
2866 LIST_REMOVE(&dummy, nc_hash);
2867 spin_unlock(&nchpp->spin);
2872 * Name cache initialization, from vfsinit() when we are booting
2880 /* initialise per-cpu namecache effectiveness statistics. */
2881 for (i = 0; i < ncpus; ++i) {
2882 gd = globaldata_find(i);
2883 gd->gd_nchstats = &nchstats[i];
2885 TAILQ_INIT(&ncneglist);
2887 nchashtbl = hashinit_ext(desiredvnodes / 2,
2888 sizeof(struct nchash_head),
2889 M_VFSCACHE, &nchash);
2890 for (i = 0; i <= (int)nchash; ++i) {
2891 LIST_INIT(&nchashtbl[i].list);
2892 spin_init(&nchashtbl[i].spin);
2894 nclockwarn = 5 * hz;
2898 * Called from start_init() to bootstrap the root filesystem. Returns
2899 * a referenced, unlocked namecache record.
2902 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp)
2904 nch->ncp = cache_alloc(0);
2906 atomic_add_int(&mp->mnt_refs, 1);
2908 _cache_setvp(nch->mount, nch->ncp, vp);
2912 * vfs_cache_setroot()
2914 * Create an association between the root of our namecache and
2915 * the root vnode. This routine may be called several times during
2918 * If the caller intends to save the returned namecache pointer somewhere
2919 * it must cache_hold() it.
2922 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch)
2925 struct nchandle onch;
2933 cache_zero(&rootnch);
2941 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
2942 * topology and is being removed as quickly as possible. The new VOP_N*()
2943 * API calls are required to make specific adjustments using the supplied
2944 * ncp pointers rather then just bogusly purging random vnodes.
2946 * Invalidate all namecache entries to a particular vnode as well as
2947 * any direct children of that vnode in the namecache. This is a
2948 * 'catch all' purge used by filesystems that do not know any better.
2950 * Note that the linkage between the vnode and its namecache entries will
2951 * be removed, but the namecache entries themselves might stay put due to
2952 * active references from elsewhere in the system or due to the existance of
2953 * the children. The namecache topology is left intact even if we do not
2954 * know what the vnode association is. Such entries will be marked
2958 cache_purge(struct vnode *vp)
2960 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
2964 * Flush all entries referencing a particular filesystem.
2966 * Since we need to check it anyway, we will flush all the invalid
2967 * entries at the same time.
2972 cache_purgevfs(struct mount *mp)
2974 struct nchash_head *nchpp;
2975 struct namecache *ncp, *nnp;
2978 * Scan hash tables for applicable entries.
2980 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) {
2981 spin_lock_wr(&nchpp->spin); XXX
2982 ncp = LIST_FIRST(&nchpp->list);
2986 nnp = LIST_NEXT(ncp, nc_hash);
2989 if (ncp->nc_mount == mp) {
2991 ncp = cache_zap(ncp, 0);
2999 spin_unlock_wr(&nchpp->spin); XXX
3005 static int disablecwd;
3006 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
3008 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
3009 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
3010 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
3011 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
3012 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
3013 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
3019 sys___getcwd(struct __getcwd_args *uap)
3029 buflen = uap->buflen;
3032 if (buflen > MAXPATHLEN)
3033 buflen = MAXPATHLEN;
3035 buf = kmalloc(buflen, M_TEMP, M_WAITOK);
3037 bp = kern_getcwd(buf, buflen, &error);
3040 error = copyout(bp, uap->buf, strlen(bp) + 1);
3046 kern_getcwd(char *buf, size_t buflen, int *error)
3048 struct proc *p = curproc;
3050 int i, slash_prefixed;
3051 struct filedesc *fdp;
3052 struct nchandle nch;
3053 struct namecache *ncp;
3062 nch = fdp->fd_ncdir;
3067 while (ncp && (ncp != fdp->fd_nrdir.ncp ||
3068 nch.mount != fdp->fd_nrdir.mount)
3071 * While traversing upwards if we encounter the root
3072 * of the current mount we have to skip to the mount point
3073 * in the underlying filesystem.
3075 if (ncp == nch.mount->mnt_ncmountpt.ncp) {
3076 nch = nch.mount->mnt_ncmounton;
3085 * Prepend the path segment
3087 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
3094 *--bp = ncp->nc_name[i];
3106 * Go up a directory. This isn't a mount point so we don't
3107 * have to check again.
3109 while ((nch.ncp = ncp->nc_parent) != NULL) {
3111 if (nch.ncp != ncp->nc_parent) {
3115 _cache_hold(nch.ncp);
3128 if (!slash_prefixed) {
3146 * Thus begins the fullpath magic.
3148 * The passed nchp is referenced but not locked.
3151 #define STATNODE(name) \
3152 static u_int name; \
3153 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
3155 static int disablefullpath;
3156 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
3157 &disablefullpath, 0, "");
3159 STATNODE(numfullpathcalls);
3160 STATNODE(numfullpathfail1);
3161 STATNODE(numfullpathfail2);
3162 STATNODE(numfullpathfail3);
3163 STATNODE(numfullpathfail4);
3164 STATNODE(numfullpathfound);
3167 cache_fullpath(struct proc *p, struct nchandle *nchp,
3168 char **retbuf, char **freebuf, int guess)
3170 struct nchandle fd_nrdir;
3171 struct nchandle nch;
3172 struct namecache *ncp;
3173 struct mount *mp, *new_mp;
3179 atomic_add_int(&numfullpathcalls, -1);
3184 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK);
3185 bp = buf + MAXPATHLEN - 1;
3188 fd_nrdir = p->p_fd->fd_nrdir;
3198 while (ncp && (ncp != fd_nrdir.ncp || mp != fd_nrdir.mount)) {
3202 * If we are asked to guess the upwards path, we do so whenever
3203 * we encounter an ncp marked as a mountpoint. We try to find
3204 * the actual mountpoint by finding the mountpoint with this ncp.
3206 if (guess && (ncp->nc_flag & NCF_ISMOUNTPT)) {
3207 new_mp = mount_get_by_nc(ncp);
3210 * While traversing upwards if we encounter the root
3211 * of the current mount we have to skip to the mount point.
3213 if (ncp == mp->mnt_ncmountpt.ncp) {
3217 nch = new_mp->mnt_ncmounton;
3227 * Prepend the path segment
3229 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
3236 *--bp = ncp->nc_name[i];
3248 * Go up a directory. This isn't a mount point so we don't
3249 * have to check again.
3251 * We can only safely access nc_parent with ncp held locked.
3253 while ((nch.ncp = ncp->nc_parent) != NULL) {
3255 if (nch.ncp != ncp->nc_parent) {
3259 _cache_hold(nch.ncp);
3273 if (!slash_prefixed) {
3293 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf, int guess)
3295 struct namecache *ncp;
3296 struct nchandle nch;
3299 atomic_add_int(&numfullpathcalls, 1);
3300 if (disablefullpath)
3306 /* vn is NULL, client wants us to use p->p_textvp */
3308 if ((vn = p->p_textvp) == NULL)
3311 spin_lock(&vn->v_spinlock);
3312 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
3317 spin_unlock(&vn->v_spinlock);
3321 spin_unlock(&vn->v_spinlock);
3323 atomic_add_int(&numfullpathcalls, -1);
3325 nch.mount = vn->v_mount;
3326 error = cache_fullpath(p, &nch, retbuf, freebuf, guess);