2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/sysctl.h>
69 #include <sys/mount.h>
70 #include <sys/vnode.h>
71 #include <sys/malloc.h>
72 #include <sys/sysproto.h>
73 #include <sys/spinlock.h>
75 #include <sys/namei.h>
76 #include <sys/nlookup.h>
77 #include <sys/filedesc.h>
78 #include <sys/fnv_hash.h>
79 #include <sys/globaldata.h>
80 #include <sys/kern_syscall.h>
81 #include <sys/dirent.h>
84 #include <sys/sysref2.h>
85 #include <sys/spinlock2.h>
86 #include <sys/mplock2.h>
88 #define MAX_RECURSION_DEPTH 64
91 * Random lookups in the cache are accomplished with a hash table using
92 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock.
94 * Negative entries may exist and correspond to resolved namecache
95 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT
96 * will be set if the entry corresponds to a whited-out directory entry
97 * (verses simply not finding the entry at all). ncneglist is locked
98 * with a global spinlock (ncspin).
102 * (1) A ncp must be referenced before it can be locked.
104 * (2) A ncp must be locked in order to modify it.
106 * (3) ncp locks are always ordered child -> parent. That may seem
107 * backwards but forward scans use the hash table and thus can hold
108 * the parent unlocked when traversing downward.
110 * This allows insert/rename/delete/dot-dot and other operations
111 * to use ncp->nc_parent links.
113 * This also prevents a locked up e.g. NFS node from creating a
114 * chain reaction all the way back to the root vnode / namecache.
116 * (4) parent linkages require both the parent and child to be locked.
120 * Structures associated with name cacheing.
122 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
125 #define NCMOUNT_NUMCACHE 1009 /* prime number */
127 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
129 LIST_HEAD(nchash_list, namecache);
132 struct nchash_list list;
133 struct spinlock spin;
136 struct ncmount_cache {
137 struct spinlock spin;
138 struct namecache *ncp;
140 int isneg; /* if != 0 mp is originator and not target */
143 static struct nchash_head *nchashtbl;
144 static struct namecache_list ncneglist;
145 static struct spinlock ncspin;
146 static struct ncmount_cache ncmount_cache[NCMOUNT_NUMCACHE];
149 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
150 * to create the namecache infrastructure leading to a dangling vnode.
152 * 0 Only errors are reported
153 * 1 Successes are reported
154 * 2 Successes + the whole directory scan is reported
155 * 3 Force the directory scan code run as if the parent vnode did not
156 * have a namecache record, even if it does have one.
158 static int ncvp_debug;
159 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0,
160 "Namecache debug level (0-3)");
162 static u_long nchash; /* size of hash table */
163 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
164 "Size of namecache hash table");
166 static int ncnegflush = 10; /* burst for negative flush */
167 SYSCTL_INT(_debug, OID_AUTO, ncnegflush, CTLFLAG_RW, &ncnegflush, 0,
168 "Batch flush negative entries");
170 static int ncposflush = 10; /* burst for positive flush */
171 SYSCTL_INT(_debug, OID_AUTO, ncposflush, CTLFLAG_RW, &ncposflush, 0,
172 "Batch flush positive entries");
174 static int ncnegfactor = 16; /* ratio of negative entries */
175 SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
176 "Ratio of namecache negative entries");
178 static int nclockwarn; /* warn on locked entries in ticks */
179 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0,
180 "Warn on locked namecache entries in ticks");
182 static int numdefered; /* number of cache entries allocated */
183 SYSCTL_INT(_debug, OID_AUTO, numdefered, CTLFLAG_RD, &numdefered, 0,
184 "Number of cache entries allocated");
186 static int ncposlimit; /* number of cache entries allocated */
187 SYSCTL_INT(_debug, OID_AUTO, ncposlimit, CTLFLAG_RW, &ncposlimit, 0,
188 "Number of cache entries allocated");
190 static int ncp_shared_lock_disable = 0;
191 SYSCTL_INT(_debug, OID_AUTO, ncp_shared_lock_disable, CTLFLAG_RW,
192 &ncp_shared_lock_disable, 0, "Disable shared namecache locks");
194 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode),
195 "sizeof(struct vnode)");
196 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache),
197 "sizeof(struct namecache)");
199 static int ncmount_cache_enable = 1;
200 SYSCTL_INT(_debug, OID_AUTO, ncmount_cache_enable, CTLFLAG_RW,
201 &ncmount_cache_enable, 0, "mount point cache");
202 static long ncmount_cache_hit;
203 SYSCTL_LONG(_debug, OID_AUTO, ncmount_cache_hit, CTLFLAG_RW,
204 &ncmount_cache_hit, 0, "mpcache hits");
205 static long ncmount_cache_miss;
206 SYSCTL_LONG(_debug, OID_AUTO, ncmount_cache_miss, CTLFLAG_RW,
207 &ncmount_cache_miss, 0, "mpcache misses");
208 static long ncmount_cache_overwrite;
209 SYSCTL_LONG(_debug, OID_AUTO, ncmount_cache_overwrite, CTLFLAG_RW,
210 &ncmount_cache_overwrite, 0, "mpcache entry overwrites");
212 static int cache_resolve_mp(struct mount *mp);
213 static struct vnode *cache_dvpref(struct namecache *ncp);
214 static void _cache_lock(struct namecache *ncp);
215 static void _cache_setunresolved(struct namecache *ncp);
216 static void _cache_cleanneg(int count);
217 static void _cache_cleanpos(int count);
218 static void _cache_cleandefered(void);
219 static void _cache_unlink(struct namecache *ncp);
222 * The new name cache statistics
224 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
226 SYSCTL_INT(_vfs_cache, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0,
227 "Number of negative namecache entries");
229 SYSCTL_INT(_vfs_cache, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0,
230 "Number of namecaches entries");
231 static u_long numcalls;
232 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcalls, CTLFLAG_RD, &numcalls, 0,
233 "Number of namecache lookups");
234 static u_long numchecks;
235 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numchecks, CTLFLAG_RD, &numchecks, 0,
236 "Number of checked entries in namecache lookups");
238 struct nchstats nchstats[SMP_MAXCPU];
240 * Export VFS cache effectiveness statistics to user-land.
242 * The statistics are left for aggregation to user-land so
243 * neat things can be achieved, like observing per-CPU cache
247 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
249 struct globaldata *gd;
253 for (i = 0; i < ncpus; ++i) {
254 gd = globaldata_find(i);
255 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
256 sizeof(struct nchstats))))
262 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
263 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
265 static struct namecache *cache_zap(struct namecache *ncp, int nonblock);
268 * Namespace locking. The caller must already hold a reference to the
269 * namecache structure in order to lock/unlock it. This function prevents
270 * the namespace from being created or destroyed by accessors other then
273 * Note that holding a locked namecache structure prevents other threads
274 * from making namespace changes (e.g. deleting or creating), prevents
275 * vnode association state changes by other threads, and prevents the
276 * namecache entry from being resolved or unresolved by other threads.
278 * An exclusive lock owner has full authority to associate/disassociate
279 * vnodes and resolve/unresolve the locked ncp.
281 * A shared lock owner only has authority to acquire the underlying vnode,
284 * The primary lock field is nc_lockstatus. nc_locktd is set after the
285 * fact (when locking) or cleared prior to unlocking.
287 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
288 * or recycled, but it does NOT help you if the vnode had already
289 * initiated a recyclement. If this is important, use cache_get()
290 * rather then cache_lock() (and deal with the differences in the
291 * way the refs counter is handled). Or, alternatively, make an
292 * unconditional call to cache_validate() or cache_resolve()
293 * after cache_lock() returns.
297 _cache_lock(struct namecache *ncp)
305 KKASSERT(ncp->nc_refs != 0);
311 count = ncp->nc_lockstatus;
314 if ((count & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 0) {
315 if (atomic_cmpset_int(&ncp->nc_lockstatus,
318 * The vp associated with a locked ncp must
319 * be held to prevent it from being recycled.
321 * WARNING! If VRECLAIMED is set the vnode
322 * could already be in the middle of a recycle.
323 * Callers must use cache_vref() or
324 * cache_vget() on the locked ncp to
325 * validate the vp or set the cache entry
328 * NOTE! vhold() is allowed if we hold a
329 * lock on the ncp (which we do).
339 if (ncp->nc_locktd == td) {
340 KKASSERT((count & NC_SHLOCK_FLAG) == 0);
341 if (atomic_cmpset_int(&ncp->nc_lockstatus,
348 tsleep_interlock(&ncp->nc_locktd, 0);
349 if (atomic_cmpset_int(&ncp->nc_lockstatus, count,
350 count | NC_EXLOCK_REQ) == 0) {
356 error = tsleep(&ncp->nc_locktd, PINTERLOCKED,
357 "clock", nclockwarn);
358 if (error == EWOULDBLOCK) {
361 kprintf("[diagnostic] cache_lock: "
362 "blocked on %p %08x",
364 kprintf(" \"%*.*s\"\n",
365 ncp->nc_nlen, ncp->nc_nlen,
372 kprintf("[diagnostic] cache_lock: unblocked %*.*s after "
374 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name,
375 (int)(ticks + (hz / 2) - begticks) / hz);
380 * The shared lock works similarly to the exclusive lock except
381 * nc_locktd is left NULL and we need an interlock (VHOLD) to
382 * prevent vhold() races, since the moment our cmpset_int succeeds
383 * another cpu can come in and get its own shared lock.
385 * A critical section is needed to prevent interruption during the
390 _cache_lock_shared(struct namecache *ncp)
395 u_int optreq = NC_EXLOCK_REQ;
397 KKASSERT(ncp->nc_refs != 0);
401 count = ncp->nc_lockstatus;
404 if ((count & ~NC_SHLOCK_REQ) == 0) {
406 if (atomic_cmpset_int(&ncp->nc_lockstatus,
408 (count + 1) | NC_SHLOCK_FLAG |
411 * The vp associated with a locked ncp must
412 * be held to prevent it from being recycled.
414 * WARNING! If VRECLAIMED is set the vnode
415 * could already be in the middle of a recycle.
416 * Callers must use cache_vref() or
417 * cache_vget() on the locked ncp to
418 * validate the vp or set the cache entry
421 * NOTE! vhold() is allowed if we hold a
422 * lock on the ncp (which we do).
426 atomic_clear_int(&ncp->nc_lockstatus,
437 * If already held shared we can just bump the count, but
438 * only allow this if nobody is trying to get the lock
439 * exclusively. If we are blocking too long ignore excl
440 * requests (which can race/deadlock us).
442 * VHOLD is a bit of a hack. Even though we successfully
443 * added another shared ref, the cpu that got the first
444 * shared ref might not yet have held the vnode.
446 if ((count & (optreq|NC_SHLOCK_FLAG)) == NC_SHLOCK_FLAG) {
447 KKASSERT((count & ~(NC_EXLOCK_REQ |
449 NC_SHLOCK_FLAG)) > 0);
450 if (atomic_cmpset_int(&ncp->nc_lockstatus,
452 while (ncp->nc_lockstatus & NC_SHLOCK_VHOLD)
458 tsleep_interlock(ncp, 0);
459 if (atomic_cmpset_int(&ncp->nc_lockstatus, count,
460 count | NC_SHLOCK_REQ) == 0) {
464 error = tsleep(ncp, PINTERLOCKED, "clocksh", nclockwarn);
465 if (error == EWOULDBLOCK) {
469 kprintf("[diagnostic] cache_lock_shared: "
470 "blocked on %p %08x",
472 kprintf(" \"%*.*s\"\n",
473 ncp->nc_nlen, ncp->nc_nlen,
480 kprintf("[diagnostic] cache_lock_shared: "
481 "unblocked %*.*s after %d secs\n",
482 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name,
483 (int)(ticks - didwarn) / hz);
488 * NOTE: nc_refs may be zero if the ncp is interlocked by circumstance,
489 * such as the case where one of its children is locked.
493 _cache_lock_nonblock(struct namecache *ncp)
501 count = ncp->nc_lockstatus;
503 if ((count & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 0) {
504 if (atomic_cmpset_int(&ncp->nc_lockstatus,
507 * The vp associated with a locked ncp must
508 * be held to prevent it from being recycled.
510 * WARNING! If VRECLAIMED is set the vnode
511 * could already be in the middle of a recycle.
512 * Callers must use cache_vref() or
513 * cache_vget() on the locked ncp to
514 * validate the vp or set the cache entry
517 * NOTE! vhold() is allowed if we hold a
518 * lock on the ncp (which we do).
528 if (ncp->nc_locktd == td) {
529 if (atomic_cmpset_int(&ncp->nc_lockstatus,
542 * The shared lock works similarly to the exclusive lock except
543 * nc_locktd is left NULL and we need an interlock (VHOLD) to
544 * prevent vhold() races, since the moment our cmpset_int succeeds
545 * another cpu can come in and get its own shared lock.
547 * A critical section is needed to prevent interruption during the
552 _cache_lock_shared_nonblock(struct namecache *ncp)
557 count = ncp->nc_lockstatus;
559 if ((count & ~NC_SHLOCK_REQ) == 0) {
561 if (atomic_cmpset_int(&ncp->nc_lockstatus,
563 (count + 1) | NC_SHLOCK_FLAG |
566 * The vp associated with a locked ncp must
567 * be held to prevent it from being recycled.
569 * WARNING! If VRECLAIMED is set the vnode
570 * could already be in the middle of a recycle.
571 * Callers must use cache_vref() or
572 * cache_vget() on the locked ncp to
573 * validate the vp or set the cache entry
576 * NOTE! vhold() is allowed if we hold a
577 * lock on the ncp (which we do).
581 atomic_clear_int(&ncp->nc_lockstatus,
592 * If already held shared we can just bump the count, but
593 * only allow this if nobody is trying to get the lock
596 * VHOLD is a bit of a hack. Even though we successfully
597 * added another shared ref, the cpu that got the first
598 * shared ref might not yet have held the vnode.
600 if ((count & (NC_EXLOCK_REQ|NC_SHLOCK_FLAG)) ==
602 KKASSERT((count & ~(NC_EXLOCK_REQ |
604 NC_SHLOCK_FLAG)) > 0);
605 if (atomic_cmpset_int(&ncp->nc_lockstatus,
607 while (ncp->nc_lockstatus & NC_SHLOCK_VHOLD)
621 * NOTE: nc_refs can be 0 (degenerate case during _cache_drop).
623 * nc_locktd must be NULLed out prior to nc_lockstatus getting cleared.
627 _cache_unlock(struct namecache *ncp)
629 thread_t td __debugvar = curthread;
632 struct vnode *dropvp;
634 KKASSERT(ncp->nc_refs >= 0);
635 KKASSERT((ncp->nc_lockstatus & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) > 0);
636 KKASSERT((ncp->nc_lockstatus & NC_SHLOCK_FLAG) || ncp->nc_locktd == td);
638 count = ncp->nc_lockstatus;
642 * Clear nc_locktd prior to the atomic op (excl lock only)
644 if ((count & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 1)
645 ncp->nc_locktd = NULL;
650 ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ|NC_SHLOCK_FLAG)) == 1) {
652 if (count & NC_EXLOCK_REQ)
653 ncount = count & NC_SHLOCK_REQ; /* cnt->0 */
657 if (atomic_cmpset_int(&ncp->nc_lockstatus,
659 if (count & NC_EXLOCK_REQ)
660 wakeup(&ncp->nc_locktd);
661 else if (count & NC_SHLOCK_REQ)
667 KKASSERT((count & NC_SHLOCK_VHOLD) == 0);
668 KKASSERT((count & ~(NC_EXLOCK_REQ |
670 NC_SHLOCK_FLAG)) > 1);
671 if (atomic_cmpset_int(&ncp->nc_lockstatus,
676 count = ncp->nc_lockstatus;
681 * Don't actually drop the vp until we successfully clean out
682 * the lock, otherwise we may race another shared lock.
690 _cache_lockstatus(struct namecache *ncp)
692 if (ncp->nc_locktd == curthread)
693 return(LK_EXCLUSIVE);
694 if (ncp->nc_lockstatus & NC_SHLOCK_FLAG)
700 * cache_hold() and cache_drop() prevent the premature deletion of a
701 * namecache entry but do not prevent operations (such as zapping) on
702 * that namecache entry.
704 * This routine may only be called from outside this source module if
705 * nc_refs is already at least 1.
707 * This is a rare case where callers are allowed to hold a spinlock,
708 * so we can't ourselves.
712 _cache_hold(struct namecache *ncp)
714 atomic_add_int(&ncp->nc_refs, 1);
719 * Drop a cache entry, taking care to deal with races.
721 * For potential 1->0 transitions we must hold the ncp lock to safely
722 * test its flags. An unresolved entry with no children must be zapped
725 * The call to cache_zap() itself will handle all remaining races and
726 * will decrement the ncp's refs regardless. If we are resolved or
727 * have children nc_refs can safely be dropped to 0 without having to
730 * NOTE: cache_zap() will re-check nc_refs and nc_list in a MPSAFE fashion.
732 * NOTE: cache_zap() may return a non-NULL referenced parent which must
733 * be dropped in a loop.
737 _cache_drop(struct namecache *ncp)
742 KKASSERT(ncp->nc_refs > 0);
746 if (_cache_lock_nonblock(ncp) == 0) {
747 ncp->nc_flag &= ~NCF_DEFEREDZAP;
748 if ((ncp->nc_flag & NCF_UNRESOLVED) &&
749 TAILQ_EMPTY(&ncp->nc_list)) {
750 ncp = cache_zap(ncp, 1);
753 if (atomic_cmpset_int(&ncp->nc_refs, 1, 0)) {
760 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1))
768 * Link a new namecache entry to its parent and to the hash table. Be
769 * careful to avoid races if vhold() blocks in the future.
771 * Both ncp and par must be referenced and locked.
773 * NOTE: The hash table spinlock is held during this call, we can't do
777 _cache_link_parent(struct namecache *ncp, struct namecache *par,
778 struct nchash_head *nchpp)
780 KKASSERT(ncp->nc_parent == NULL);
781 ncp->nc_parent = par;
782 ncp->nc_head = nchpp;
785 * Set inheritance flags. Note that the parent flags may be
786 * stale due to getattr potentially not having been run yet
787 * (it gets run during nlookup()'s).
789 ncp->nc_flag &= ~(NCF_SF_PNOCACHE | NCF_UF_PCACHE);
790 if (par->nc_flag & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE))
791 ncp->nc_flag |= NCF_SF_PNOCACHE;
792 if (par->nc_flag & (NCF_UF_CACHE | NCF_UF_PCACHE))
793 ncp->nc_flag |= NCF_UF_PCACHE;
795 LIST_INSERT_HEAD(&nchpp->list, ncp, nc_hash);
797 if (TAILQ_EMPTY(&par->nc_list)) {
798 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
800 * Any vp associated with an ncp which has children must
801 * be held to prevent it from being recycled.
806 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
811 * Remove the parent and hash associations from a namecache structure.
812 * If this is the last child of the parent the cache_drop(par) will
813 * attempt to recursively zap the parent.
815 * ncp must be locked. This routine will acquire a temporary lock on
816 * the parent as wlel as the appropriate hash chain.
819 _cache_unlink_parent(struct namecache *ncp)
821 struct namecache *par;
822 struct vnode *dropvp;
824 if ((par = ncp->nc_parent) != NULL) {
825 KKASSERT(ncp->nc_parent == par);
828 spin_lock(&ncp->nc_head->spin);
829 LIST_REMOVE(ncp, nc_hash);
830 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
832 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
834 spin_unlock(&ncp->nc_head->spin);
835 ncp->nc_parent = NULL;
841 * We can only safely vdrop with no spinlocks held.
849 * Allocate a new namecache structure. Most of the code does not require
850 * zero-termination of the string but it makes vop_compat_ncreate() easier.
852 static struct namecache *
853 cache_alloc(int nlen)
855 struct namecache *ncp;
857 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
859 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK);
861 ncp->nc_flag = NCF_UNRESOLVED;
862 ncp->nc_error = ENOTCONN; /* needs to be resolved */
865 TAILQ_INIT(&ncp->nc_list);
871 * Can only be called for the case where the ncp has never been
872 * associated with anything (so no spinlocks are needed).
875 _cache_free(struct namecache *ncp)
877 KKASSERT(ncp->nc_refs == 1 && ncp->nc_lockstatus == 1);
879 kfree(ncp->nc_name, M_VFSCACHE);
880 kfree(ncp, M_VFSCACHE);
884 * [re]initialize a nchandle.
887 cache_zero(struct nchandle *nch)
894 * Ref and deref a namecache structure.
896 * The caller must specify a stable ncp pointer, typically meaning the
897 * ncp is already referenced but this can also occur indirectly through
898 * e.g. holding a lock on a direct child.
900 * WARNING: Caller may hold an unrelated read spinlock, which means we can't
901 * use read spinlocks here.
906 cache_hold(struct nchandle *nch)
908 _cache_hold(nch->ncp);
909 atomic_add_int(&nch->mount->mnt_refs, 1);
914 * Create a copy of a namecache handle for an already-referenced
920 cache_copy(struct nchandle *nch, struct nchandle *target)
924 _cache_hold(target->ncp);
925 atomic_add_int(&nch->mount->mnt_refs, 1);
932 cache_changemount(struct nchandle *nch, struct mount *mp)
934 atomic_add_int(&nch->mount->mnt_refs, -1);
936 atomic_add_int(&nch->mount->mnt_refs, 1);
940 cache_drop(struct nchandle *nch)
942 atomic_add_int(&nch->mount->mnt_refs, -1);
943 _cache_drop(nch->ncp);
949 cache_lockstatus(struct nchandle *nch)
951 return(_cache_lockstatus(nch->ncp));
955 cache_lock(struct nchandle *nch)
957 _cache_lock(nch->ncp);
961 cache_lock_maybe_shared(struct nchandle *nch, int excl)
963 struct namecache *ncp = nch->ncp;
965 if (ncp_shared_lock_disable || excl ||
966 (ncp->nc_flag & NCF_UNRESOLVED)) {
969 _cache_lock_shared(ncp);
970 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
971 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) {
983 * Relock nch1 given an unlocked nch1 and a locked nch2. The caller
984 * is responsible for checking both for validity on return as they
985 * may have become invalid.
987 * We have to deal with potential deadlocks here, just ping pong
988 * the lock until we get it (we will always block somewhere when
989 * looping so this is not cpu-intensive).
991 * which = 0 nch1 not locked, nch2 is locked
992 * which = 1 nch1 is locked, nch2 is not locked
995 cache_relock(struct nchandle *nch1, struct ucred *cred1,
996 struct nchandle *nch2, struct ucred *cred2)
1004 if (cache_lock_nonblock(nch1) == 0) {
1005 cache_resolve(nch1, cred1);
1010 cache_resolve(nch1, cred1);
1013 if (cache_lock_nonblock(nch2) == 0) {
1014 cache_resolve(nch2, cred2);
1019 cache_resolve(nch2, cred2);
1026 cache_lock_nonblock(struct nchandle *nch)
1028 return(_cache_lock_nonblock(nch->ncp));
1032 cache_unlock(struct nchandle *nch)
1034 _cache_unlock(nch->ncp);
1038 * ref-and-lock, unlock-and-deref functions.
1040 * This function is primarily used by nlookup. Even though cache_lock
1041 * holds the vnode, it is possible that the vnode may have already
1042 * initiated a recyclement.
1044 * We want cache_get() to return a definitively usable vnode or a
1045 * definitively unresolved ncp.
1049 _cache_get(struct namecache *ncp)
1053 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1054 _cache_setunresolved(ncp);
1059 * Attempt to obtain a shared lock on the ncp. A shared lock will only
1060 * be obtained if the ncp is resolved and the vnode (if not ENOENT) is
1061 * valid. Otherwise an exclusive lock will be acquired instead.
1065 _cache_get_maybe_shared(struct namecache *ncp, int excl)
1067 if (ncp_shared_lock_disable || excl ||
1068 (ncp->nc_flag & NCF_UNRESOLVED)) {
1069 return(_cache_get(ncp));
1072 _cache_lock_shared(ncp);
1073 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1074 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) {
1076 ncp = _cache_get(ncp);
1081 ncp = _cache_get(ncp);
1088 * This is a special form of _cache_lock() which only succeeds if
1089 * it can get a pristine, non-recursive lock. The caller must have
1090 * already ref'd the ncp.
1092 * On success the ncp will be locked, on failure it will not. The
1093 * ref count does not change either way.
1095 * We want _cache_lock_special() (on success) to return a definitively
1096 * usable vnode or a definitively unresolved ncp.
1099 _cache_lock_special(struct namecache *ncp)
1101 if (_cache_lock_nonblock(ncp) == 0) {
1102 if ((ncp->nc_lockstatus &
1103 ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 1) {
1104 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1105 _cache_setunresolved(ncp);
1110 return(EWOULDBLOCK);
1114 * This function tries to get a shared lock but will back-off to an exclusive
1117 * (1) Some other thread is trying to obtain an exclusive lock
1118 * (to prevent the exclusive requester from getting livelocked out
1119 * by many shared locks).
1121 * (2) The current thread already owns an exclusive lock (to avoid
1124 * WARNING! On machines with lots of cores we really want to try hard to
1125 * get a shared lock or concurrent path lookups can chain-react
1126 * into a very high-latency exclusive lock.
1129 _cache_lock_shared_special(struct namecache *ncp)
1132 * Only honor a successful shared lock (returning 0) if there is
1133 * no exclusive request pending and the vnode, if present, is not
1134 * in a reclaimed state.
1136 if (_cache_lock_shared_nonblock(ncp) == 0) {
1137 if ((ncp->nc_lockstatus & NC_EXLOCK_REQ) == 0) {
1138 if (ncp->nc_vp == NULL ||
1139 (ncp->nc_vp->v_flag & VRECLAIMED) == 0) {
1144 return(EWOULDBLOCK);
1148 * Non-blocking shared lock failed. If we already own the exclusive
1149 * lock just acquire another exclusive lock (instead of deadlocking).
1150 * Otherwise acquire a shared lock.
1152 if (ncp->nc_locktd == curthread) {
1156 _cache_lock_shared(ncp);
1162 * NOTE: The same nchandle can be passed for both arguments.
1165 cache_get(struct nchandle *nch, struct nchandle *target)
1167 KKASSERT(nch->ncp->nc_refs > 0);
1168 target->mount = nch->mount;
1169 target->ncp = _cache_get(nch->ncp);
1170 atomic_add_int(&target->mount->mnt_refs, 1);
1174 cache_get_maybe_shared(struct nchandle *nch, struct nchandle *target, int excl)
1176 KKASSERT(nch->ncp->nc_refs > 0);
1177 target->mount = nch->mount;
1178 target->ncp = _cache_get_maybe_shared(nch->ncp, excl);
1179 atomic_add_int(&target->mount->mnt_refs, 1);
1187 _cache_put(struct namecache *ncp)
1197 cache_put(struct nchandle *nch)
1199 atomic_add_int(&nch->mount->mnt_refs, -1);
1200 _cache_put(nch->ncp);
1206 * Resolve an unresolved ncp by associating a vnode with it. If the
1207 * vnode is NULL, a negative cache entry is created.
1209 * The ncp should be locked on entry and will remain locked on return.
1213 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp)
1215 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
1216 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE);
1220 * Any vp associated with an ncp which has children must
1221 * be held. Any vp associated with a locked ncp must be held.
1223 if (!TAILQ_EMPTY(&ncp->nc_list))
1225 spin_lock(&vp->v_spin);
1227 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
1228 spin_unlock(&vp->v_spin);
1229 if (ncp->nc_lockstatus & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ))
1233 * Set auxiliary flags
1235 switch(vp->v_type) {
1237 ncp->nc_flag |= NCF_ISDIR;
1240 ncp->nc_flag |= NCF_ISSYMLINK;
1241 /* XXX cache the contents of the symlink */
1246 atomic_add_int(&numcache, 1);
1248 /* XXX: this is a hack to work-around the lack of a real pfs vfs
1251 if (strncmp(mp->mnt_stat.f_fstypename, "null", 5) == 0)
1255 * When creating a negative cache hit we set the
1256 * namecache_gen. A later resolve will clean out the
1257 * negative cache hit if the mount point's namecache_gen
1258 * has changed. Used by devfs, could also be used by
1263 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
1265 spin_unlock(&ncspin);
1266 ncp->nc_error = ENOENT;
1268 VFS_NCPGEN_SET(mp, ncp);
1270 ncp->nc_flag &= ~(NCF_UNRESOLVED | NCF_DEFEREDZAP);
1277 cache_setvp(struct nchandle *nch, struct vnode *vp)
1279 _cache_setvp(nch->mount, nch->ncp, vp);
1286 cache_settimeout(struct nchandle *nch, int nticks)
1288 struct namecache *ncp = nch->ncp;
1290 if ((ncp->nc_timeout = ticks + nticks) == 0)
1291 ncp->nc_timeout = 1;
1295 * Disassociate the vnode or negative-cache association and mark a
1296 * namecache entry as unresolved again. Note that the ncp is still
1297 * left in the hash table and still linked to its parent.
1299 * The ncp should be locked and refd on entry and will remain locked and refd
1302 * This routine is normally never called on a directory containing children.
1303 * However, NFS often does just that in its rename() code as a cop-out to
1304 * avoid complex namespace operations. This disconnects a directory vnode
1305 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
1311 _cache_setunresolved(struct namecache *ncp)
1315 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1316 ncp->nc_flag |= NCF_UNRESOLVED;
1317 ncp->nc_timeout = 0;
1318 ncp->nc_error = ENOTCONN;
1319 if ((vp = ncp->nc_vp) != NULL) {
1320 atomic_add_int(&numcache, -1);
1321 spin_lock(&vp->v_spin);
1323 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
1324 spin_unlock(&vp->v_spin);
1327 * Any vp associated with an ncp with children is
1328 * held by that ncp. Any vp associated with a locked
1329 * ncp is held by that ncp. These conditions must be
1330 * undone when the vp is cleared out from the ncp.
1332 if (!TAILQ_EMPTY(&ncp->nc_list))
1334 if (ncp->nc_lockstatus & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ))
1338 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
1340 spin_unlock(&ncspin);
1342 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK);
1347 * The cache_nresolve() code calls this function to automatically
1348 * set a resolved cache element to unresolved if it has timed out
1349 * or if it is a negative cache hit and the mount point namecache_gen
1353 _cache_auto_unresolve_test(struct mount *mp, struct namecache *ncp)
1356 * Try to zap entries that have timed out. We have
1357 * to be careful here because locked leafs may depend
1358 * on the vnode remaining intact in a parent, so only
1359 * do this under very specific conditions.
1361 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 &&
1362 TAILQ_EMPTY(&ncp->nc_list)) {
1367 * If a resolved negative cache hit is invalid due to
1368 * the mount's namecache generation being bumped, zap it.
1370 if (ncp->nc_vp == NULL && VFS_NCPGEN_TEST(mp, ncp)) {
1375 * Otherwise we are good
1380 static __inline void
1381 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp)
1384 * Already in an unresolved state, nothing to do.
1386 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1387 if (_cache_auto_unresolve_test(mp, ncp))
1388 _cache_setunresolved(ncp);
1396 cache_setunresolved(struct nchandle *nch)
1398 _cache_setunresolved(nch->ncp);
1402 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
1403 * looking for matches. This flag tells the lookup code when it must
1404 * check for a mount linkage and also prevents the directories in question
1405 * from being deleted or renamed.
1409 cache_clrmountpt_callback(struct mount *mp, void *data)
1411 struct nchandle *nch = data;
1413 if (mp->mnt_ncmounton.ncp == nch->ncp)
1415 if (mp->mnt_ncmountpt.ncp == nch->ncp)
1424 cache_clrmountpt(struct nchandle *nch)
1428 count = mountlist_scan(cache_clrmountpt_callback, nch,
1429 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
1431 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT;
1435 * Invalidate portions of the namecache topology given a starting entry.
1436 * The passed ncp is set to an unresolved state and:
1438 * The passed ncp must be referencxed and locked. The routine may unlock
1439 * and relock ncp several times, and will recheck the children and loop
1440 * to catch races. When done the passed ncp will be returned with the
1441 * reference and lock intact.
1443 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
1444 * that the physical underlying nodes have been
1445 * destroyed... as in deleted. For example, when
1446 * a directory is removed. This will cause record
1447 * lookups on the name to no longer be able to find
1448 * the record and tells the resolver to return failure
1449 * rather then trying to resolve through the parent.
1451 * The topology itself, including ncp->nc_name,
1454 * This only applies to the passed ncp, if CINV_CHILDREN
1455 * is specified the children are not flagged.
1457 * CINV_CHILDREN - Set all children (recursively) to an unresolved
1460 * Note that this will also have the side effect of
1461 * cleaning out any unreferenced nodes in the topology
1462 * from the leaves up as the recursion backs out.
1464 * Note that the topology for any referenced nodes remains intact, but
1465 * the nodes will be marked as having been destroyed and will be set
1466 * to an unresolved state.
1468 * It is possible for cache_inval() to race a cache_resolve(), meaning that
1469 * the namecache entry may not actually be invalidated on return if it was
1470 * revalidated while recursing down into its children. This code guarentees
1471 * that the node(s) will go through an invalidation cycle, but does not
1472 * guarentee that they will remain in an invalidated state.
1474 * Returns non-zero if a revalidation was detected during the invalidation
1475 * recursion, zero otherwise. Note that since only the original ncp is
1476 * locked the revalidation ultimately can only indicate that the original ncp
1477 * *MIGHT* no have been reresolved.
1479 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
1480 * have to avoid blowing out the kernel stack. We do this by saving the
1481 * deep namecache node and aborting the recursion, then re-recursing at that
1482 * node using a depth-first algorithm in order to allow multiple deep
1483 * recursions to chain through each other, then we restart the invalidation
1488 struct namecache *resume_ncp;
1492 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *);
1496 _cache_inval(struct namecache *ncp, int flags)
1498 struct cinvtrack track;
1499 struct namecache *ncp2;
1503 track.resume_ncp = NULL;
1506 r = _cache_inval_internal(ncp, flags, &track);
1507 if (track.resume_ncp == NULL)
1509 kprintf("Warning: deep namecache recursion at %s\n",
1512 while ((ncp2 = track.resume_ncp) != NULL) {
1513 track.resume_ncp = NULL;
1515 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY,
1525 cache_inval(struct nchandle *nch, int flags)
1527 return(_cache_inval(nch->ncp, flags));
1531 * Helper for _cache_inval(). The passed ncp is refd and locked and
1532 * remains that way on return, but may be unlocked/relocked multiple
1533 * times by the routine.
1536 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track)
1538 struct namecache *kid;
1539 struct namecache *nextkid;
1542 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE);
1544 _cache_setunresolved(ncp);
1545 if (flags & CINV_DESTROY)
1546 ncp->nc_flag |= NCF_DESTROYED;
1547 if ((flags & CINV_CHILDREN) &&
1548 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL
1551 if (++track->depth > MAX_RECURSION_DEPTH) {
1552 track->resume_ncp = ncp;
1558 if (track->resume_ncp) {
1562 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
1563 _cache_hold(nextkid);
1564 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
1565 TAILQ_FIRST(&kid->nc_list)
1568 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track);
1579 * Someone could have gotten in there while ncp was unlocked,
1582 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1588 * Invalidate a vnode's namecache associations. To avoid races against
1589 * the resolver we do not invalidate a node which we previously invalidated
1590 * but which was then re-resolved while we were in the invalidation loop.
1592 * Returns non-zero if any namecache entries remain after the invalidation
1595 * NOTE: Unlike the namecache topology which guarentees that ncp's will not
1596 * be ripped out of the topology while held, the vnode's v_namecache
1597 * list has no such restriction. NCP's can be ripped out of the list
1598 * at virtually any time if not locked, even if held.
1600 * In addition, the v_namecache list itself must be locked via
1601 * the vnode's spinlock.
1604 cache_inval_vp(struct vnode *vp, int flags)
1606 struct namecache *ncp;
1607 struct namecache *next;
1610 spin_lock(&vp->v_spin);
1611 ncp = TAILQ_FIRST(&vp->v_namecache);
1615 /* loop entered with ncp held and vp spin-locked */
1616 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1618 spin_unlock(&vp->v_spin);
1620 if (ncp->nc_vp != vp) {
1621 kprintf("Warning: cache_inval_vp: race-A detected on "
1622 "%s\n", ncp->nc_name);
1628 _cache_inval(ncp, flags);
1629 _cache_put(ncp); /* also releases reference */
1631 spin_lock(&vp->v_spin);
1632 if (ncp && ncp->nc_vp != vp) {
1633 spin_unlock(&vp->v_spin);
1634 kprintf("Warning: cache_inval_vp: race-B detected on "
1635 "%s\n", ncp->nc_name);
1640 spin_unlock(&vp->v_spin);
1641 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1645 * This routine is used instead of the normal cache_inval_vp() when we
1646 * are trying to recycle otherwise good vnodes.
1648 * Return 0 on success, non-zero if not all namecache records could be
1649 * disassociated from the vnode (for various reasons).
1652 cache_inval_vp_nonblock(struct vnode *vp)
1654 struct namecache *ncp;
1655 struct namecache *next;
1657 spin_lock(&vp->v_spin);
1658 ncp = TAILQ_FIRST(&vp->v_namecache);
1662 /* loop entered with ncp held */
1663 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1665 spin_unlock(&vp->v_spin);
1666 if (_cache_lock_nonblock(ncp)) {
1672 if (ncp->nc_vp != vp) {
1673 kprintf("Warning: cache_inval_vp: race-A detected on "
1674 "%s\n", ncp->nc_name);
1680 _cache_inval(ncp, 0);
1681 _cache_put(ncp); /* also releases reference */
1683 spin_lock(&vp->v_spin);
1684 if (ncp && ncp->nc_vp != vp) {
1685 spin_unlock(&vp->v_spin);
1686 kprintf("Warning: cache_inval_vp: race-B detected on "
1687 "%s\n", ncp->nc_name);
1692 spin_unlock(&vp->v_spin);
1694 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1698 * The source ncp has been renamed to the target ncp. Both fncp and tncp
1699 * must be locked. The target ncp is destroyed (as a normal rename-over
1700 * would destroy the target file or directory).
1702 * Because there may be references to the source ncp we cannot copy its
1703 * contents to the target. Instead the source ncp is relinked as the target
1704 * and the target ncp is removed from the namecache topology.
1707 cache_rename(struct nchandle *fnch, struct nchandle *tnch)
1709 struct namecache *fncp = fnch->ncp;
1710 struct namecache *tncp = tnch->ncp;
1711 struct namecache *tncp_par;
1712 struct nchash_head *nchpp;
1717 if (tncp->nc_nlen) {
1718 nname = kmalloc(tncp->nc_nlen + 1, M_VFSCACHE, M_WAITOK);
1719 bcopy(tncp->nc_name, nname, tncp->nc_nlen);
1720 nname[tncp->nc_nlen] = 0;
1726 * Rename fncp (unlink)
1728 _cache_unlink_parent(fncp);
1729 oname = fncp->nc_name;
1730 fncp->nc_name = nname;
1731 fncp->nc_nlen = tncp->nc_nlen;
1733 kfree(oname, M_VFSCACHE);
1735 tncp_par = tncp->nc_parent;
1736 _cache_hold(tncp_par);
1737 _cache_lock(tncp_par);
1740 * Rename fncp (relink)
1742 hash = fnv_32_buf(fncp->nc_name, fncp->nc_nlen, FNV1_32_INIT);
1743 hash = fnv_32_buf(&tncp_par, sizeof(tncp_par), hash);
1744 nchpp = NCHHASH(hash);
1746 spin_lock(&nchpp->spin);
1747 _cache_link_parent(fncp, tncp_par, nchpp);
1748 spin_unlock(&nchpp->spin);
1750 _cache_put(tncp_par);
1753 * Get rid of the overwritten tncp (unlink)
1755 _cache_unlink(tncp);
1759 * Perform actions consistent with unlinking a file. The passed-in ncp
1762 * The ncp is marked DESTROYED so it no longer shows up in searches,
1763 * and will be physically deleted when the vnode goes away.
1765 * If the related vnode has no refs then we cycle it through vget()/vput()
1766 * to (possibly if we don't have a ref race) trigger a deactivation,
1767 * allowing the VFS to trivially detect and recycle the deleted vnode
1768 * via VOP_INACTIVE().
1770 * NOTE: _cache_rename() will automatically call _cache_unlink() on the
1774 cache_unlink(struct nchandle *nch)
1776 _cache_unlink(nch->ncp);
1780 _cache_unlink(struct namecache *ncp)
1785 * Causes lookups to fail and allows another ncp with the same
1786 * name to be created under ncp->nc_parent.
1788 ncp->nc_flag |= NCF_DESTROYED;
1791 * Attempt to trigger a deactivation.
1793 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
1794 (vp = ncp->nc_vp) != NULL &&
1795 !sysref_isactive(&vp->v_sysref)) {
1796 if (vget(vp, LK_SHARED) == 0)
1802 * vget the vnode associated with the namecache entry. Resolve the namecache
1803 * entry if necessary. The passed ncp must be referenced and locked.
1805 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
1806 * (depending on the passed lk_type) will be returned in *vpp with an error
1807 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
1808 * most typical error is ENOENT, meaning that the ncp represents a negative
1809 * cache hit and there is no vnode to retrieve, but other errors can occur
1812 * The vget() can race a reclaim. If this occurs we re-resolve the
1815 * There are numerous places in the kernel where vget() is called on a
1816 * vnode while one or more of its namecache entries is locked. Releasing
1817 * a vnode never deadlocks against locked namecache entries (the vnode
1818 * will not get recycled while referenced ncp's exist). This means we
1819 * can safely acquire the vnode. In fact, we MUST NOT release the ncp
1820 * lock when acquiring the vp lock or we might cause a deadlock.
1822 * NOTE: The passed-in ncp must be locked exclusively if it is initially
1823 * unresolved. If a reclaim race occurs the passed-in ncp will be
1824 * relocked exclusively before being re-resolved.
1827 cache_vget(struct nchandle *nch, struct ucred *cred,
1828 int lk_type, struct vnode **vpp)
1830 struct namecache *ncp;
1837 if (ncp->nc_flag & NCF_UNRESOLVED)
1838 error = cache_resolve(nch, cred);
1842 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
1843 error = vget(vp, lk_type);
1848 if (error == ENOENT) {
1849 kprintf("Warning: vnode reclaim race detected "
1850 "in cache_vget on %p (%s)\n",
1854 _cache_setunresolved(ncp);
1859 * Not a reclaim race, some other error.
1861 KKASSERT(ncp->nc_vp == vp);
1864 KKASSERT(ncp->nc_vp == vp);
1865 KKASSERT((vp->v_flag & VRECLAIMED) == 0);
1868 if (error == 0 && vp == NULL)
1875 * Similar to cache_vget() but only acquires a ref on the vnode.
1877 * NOTE: The passed-in ncp must be locked exclusively if it is initially
1878 * unresolved. If a reclaim race occurs the passed-in ncp will be
1879 * relocked exclusively before being re-resolved.
1882 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp)
1884 struct namecache *ncp;
1891 if (ncp->nc_flag & NCF_UNRESOLVED)
1892 error = cache_resolve(nch, cred);
1896 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
1897 error = vget(vp, LK_SHARED);
1902 if (error == ENOENT) {
1903 kprintf("Warning: vnode reclaim race detected "
1904 "in cache_vget on %p (%s)\n",
1908 _cache_setunresolved(ncp);
1913 * Not a reclaim race, some other error.
1915 KKASSERT(ncp->nc_vp == vp);
1918 KKASSERT(ncp->nc_vp == vp);
1919 KKASSERT((vp->v_flag & VRECLAIMED) == 0);
1920 /* caller does not want a lock */
1924 if (error == 0 && vp == NULL)
1931 * Return a referenced vnode representing the parent directory of
1934 * Because the caller has locked the ncp it should not be possible for
1935 * the parent ncp to go away. However, the parent can unresolve its
1936 * dvp at any time so we must be able to acquire a lock on the parent
1937 * to safely access nc_vp.
1939 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock,
1940 * so use vhold()/vdrop() while holding the lock to prevent dvp from
1941 * getting destroyed.
1943 * NOTE: vhold() is allowed when dvp has 0 refs if we hold a
1944 * lock on the ncp in question..
1946 static struct vnode *
1947 cache_dvpref(struct namecache *ncp)
1949 struct namecache *par;
1953 if ((par = ncp->nc_parent) != NULL) {
1956 if ((par->nc_flag & NCF_UNRESOLVED) == 0) {
1957 if ((dvp = par->nc_vp) != NULL)
1962 if (vget(dvp, LK_SHARED) == 0) {
1965 /* return refd, unlocked dvp */
1977 * Convert a directory vnode to a namecache record without any other
1978 * knowledge of the topology. This ONLY works with directory vnodes and
1979 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
1980 * returned ncp (if not NULL) will be held and unlocked.
1982 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
1983 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
1984 * for dvp. This will fail only if the directory has been deleted out from
1987 * Callers must always check for a NULL return no matter the value of 'makeit'.
1989 * To avoid underflowing the kernel stack each recursive call increments
1990 * the makeit variable.
1993 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
1994 struct vnode *dvp, char *fakename);
1995 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1996 struct vnode **saved_dvp);
1999 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit,
2000 struct nchandle *nch)
2002 struct vnode *saved_dvp;
2008 nch->mount = dvp->v_mount;
2013 * Handle the makeit == 0 degenerate case
2016 spin_lock_shared(&dvp->v_spin);
2017 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
2020 spin_unlock_shared(&dvp->v_spin);
2024 * Loop until resolution, inside code will break out on error.
2028 * Break out if we successfully acquire a working ncp.
2030 spin_lock_shared(&dvp->v_spin);
2031 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
2034 spin_unlock_shared(&dvp->v_spin);
2037 spin_unlock_shared(&dvp->v_spin);
2040 * If dvp is the root of its filesystem it should already
2041 * have a namecache pointer associated with it as a side
2042 * effect of the mount, but it may have been disassociated.
2044 if (dvp->v_flag & VROOT) {
2045 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp);
2046 error = cache_resolve_mp(nch->mount);
2047 _cache_put(nch->ncp);
2049 kprintf("cache_fromdvp: resolve root of mount %p error %d",
2050 dvp->v_mount, error);
2054 kprintf(" failed\n");
2059 kprintf(" succeeded\n");
2064 * If we are recursed too deeply resort to an O(n^2)
2065 * algorithm to resolve the namecache topology. The
2066 * resolved pvp is left referenced in saved_dvp to
2067 * prevent the tree from being destroyed while we loop.
2070 error = cache_fromdvp_try(dvp, cred, &saved_dvp);
2072 kprintf("lookupdotdot(longpath) failed %d "
2073 "dvp %p\n", error, dvp);
2081 * Get the parent directory and resolve its ncp.
2084 kfree(fakename, M_TEMP);
2087 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
2090 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp);
2096 * Reuse makeit as a recursion depth counter. On success
2097 * nch will be fully referenced.
2099 cache_fromdvp(pvp, cred, makeit + 1, nch);
2101 if (nch->ncp == NULL)
2105 * Do an inefficient scan of pvp (embodied by ncp) to look
2106 * for dvp. This will create a namecache record for dvp on
2107 * success. We loop up to recheck on success.
2109 * ncp and dvp are both held but not locked.
2111 error = cache_inefficient_scan(nch, cred, dvp, fakename);
2113 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
2114 pvp, nch->ncp->nc_name, dvp);
2116 /* nch was NULLed out, reload mount */
2117 nch->mount = dvp->v_mount;
2121 kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
2122 pvp, nch->ncp->nc_name);
2125 /* nch was NULLed out, reload mount */
2126 nch->mount = dvp->v_mount;
2130 * If nch->ncp is non-NULL it will have been held already.
2133 kfree(fakename, M_TEMP);
2142 * Go up the chain of parent directories until we find something
2143 * we can resolve into the namecache. This is very inefficient.
2147 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
2148 struct vnode **saved_dvp)
2150 struct nchandle nch;
2153 static time_t last_fromdvp_report;
2157 * Loop getting the parent directory vnode until we get something we
2158 * can resolve in the namecache.
2161 nch.mount = dvp->v_mount;
2167 kfree(fakename, M_TEMP);
2170 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
2177 spin_lock_shared(&pvp->v_spin);
2178 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
2179 _cache_hold(nch.ncp);
2180 spin_unlock_shared(&pvp->v_spin);
2184 spin_unlock_shared(&pvp->v_spin);
2185 if (pvp->v_flag & VROOT) {
2186 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp);
2187 error = cache_resolve_mp(nch.mount);
2188 _cache_unlock(nch.ncp);
2191 _cache_drop(nch.ncp);
2201 if (last_fromdvp_report != time_uptime) {
2202 last_fromdvp_report = time_uptime;
2203 kprintf("Warning: extremely inefficient path "
2204 "resolution on %s\n",
2207 error = cache_inefficient_scan(&nch, cred, dvp, fakename);
2210 * Hopefully dvp now has a namecache record associated with
2211 * it. Leave it referenced to prevent the kernel from
2212 * recycling the vnode. Otherwise extremely long directory
2213 * paths could result in endless recycling.
2218 _cache_drop(nch.ncp);
2221 kfree(fakename, M_TEMP);
2226 * Do an inefficient scan of the directory represented by ncp looking for
2227 * the directory vnode dvp. ncp must be held but not locked on entry and
2228 * will be held on return. dvp must be refd but not locked on entry and
2229 * will remain refd on return.
2231 * Why do this at all? Well, due to its stateless nature the NFS server
2232 * converts file handles directly to vnodes without necessarily going through
2233 * the namecache ops that would otherwise create the namecache topology
2234 * leading to the vnode. We could either (1) Change the namecache algorithms
2235 * to allow disconnect namecache records that are re-merged opportunistically,
2236 * or (2) Make the NFS server backtrack and scan to recover a connected
2237 * namecache topology in order to then be able to issue new API lookups.
2239 * It turns out that (1) is a huge mess. It takes a nice clean set of
2240 * namecache algorithms and introduces a lot of complication in every subsystem
2241 * that calls into the namecache to deal with the re-merge case, especially
2242 * since we are using the namecache to placehold negative lookups and the
2243 * vnode might not be immediately assigned. (2) is certainly far less
2244 * efficient then (1), but since we are only talking about directories here
2245 * (which are likely to remain cached), the case does not actually run all
2246 * that often and has the supreme advantage of not polluting the namecache
2249 * If a fakename is supplied just construct a namecache entry using the
2253 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
2254 struct vnode *dvp, char *fakename)
2256 struct nlcomponent nlc;
2257 struct nchandle rncp;
2269 vat.va_blocksize = 0;
2270 if ((error = VOP_GETATTR(dvp, &vat)) != 0)
2273 error = cache_vref(nch, cred, &pvp);
2278 kprintf("inefficient_scan: directory iosize %ld "
2279 "vattr fileid = %lld\n",
2281 (long long)vat.va_fileid);
2285 * Use the supplied fakename if not NULL. Fake names are typically
2286 * not in the actual filesystem hierarchy. This is used by HAMMER
2287 * to glue @@timestamp recursions together.
2290 nlc.nlc_nameptr = fakename;
2291 nlc.nlc_namelen = strlen(fakename);
2292 rncp = cache_nlookup(nch, &nlc);
2296 if ((blksize = vat.va_blocksize) == 0)
2297 blksize = DEV_BSIZE;
2298 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK);
2304 iov.iov_base = rbuf;
2305 iov.iov_len = blksize;
2308 uio.uio_resid = blksize;
2309 uio.uio_segflg = UIO_SYSSPACE;
2310 uio.uio_rw = UIO_READ;
2311 uio.uio_td = curthread;
2313 if (ncvp_debug >= 2)
2314 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
2315 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
2317 den = (struct dirent *)rbuf;
2318 bytes = blksize - uio.uio_resid;
2321 if (ncvp_debug >= 2) {
2322 kprintf("cache_inefficient_scan: %*.*s\n",
2323 den->d_namlen, den->d_namlen,
2326 if (den->d_type != DT_WHT &&
2327 den->d_ino == vat.va_fileid) {
2329 kprintf("cache_inefficient_scan: "
2330 "MATCHED inode %lld path %s/%*.*s\n",
2331 (long long)vat.va_fileid,
2333 den->d_namlen, den->d_namlen,
2336 nlc.nlc_nameptr = den->d_name;
2337 nlc.nlc_namelen = den->d_namlen;
2338 rncp = cache_nlookup(nch, &nlc);
2339 KKASSERT(rncp.ncp != NULL);
2342 bytes -= _DIRENT_DIRSIZ(den);
2343 den = _DIRENT_NEXT(den);
2345 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
2348 kfree(rbuf, M_TEMP);
2352 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) {
2353 _cache_setvp(rncp.mount, rncp.ncp, dvp);
2354 if (ncvp_debug >= 2) {
2355 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
2356 nch->ncp->nc_name, rncp.ncp->nc_name, dvp);
2359 if (ncvp_debug >= 2) {
2360 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
2361 nch->ncp->nc_name, rncp.ncp->nc_name, dvp,
2365 if (rncp.ncp->nc_vp == NULL)
2366 error = rncp.ncp->nc_error;
2368 * Release rncp after a successful nlookup. rncp was fully
2373 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
2374 dvp, nch->ncp->nc_name);
2381 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
2382 * state, which disassociates it from its vnode or ncneglist.
2384 * Then, if there are no additional references to the ncp and no children,
2385 * the ncp is removed from the topology and destroyed.
2387 * References and/or children may exist if the ncp is in the middle of the
2388 * topology, preventing the ncp from being destroyed.
2390 * This function must be called with the ncp held and locked and will unlock
2391 * and drop it during zapping.
2393 * If nonblock is non-zero and the parent ncp cannot be locked we give up.
2394 * This case can occur in the cache_drop() path.
2396 * This function may returned a held (but NOT locked) parent node which the
2397 * caller must drop. We do this so _cache_drop() can loop, to avoid
2398 * blowing out the kernel stack.
2400 * WARNING! For MPSAFE operation this routine must acquire up to three
2401 * spin locks to be able to safely test nc_refs. Lock order is
2404 * hash spinlock if on hash list
2405 * parent spinlock if child of parent
2406 * (the ncp is unresolved so there is no vnode association)
2408 static struct namecache *
2409 cache_zap(struct namecache *ncp, int nonblock)
2411 struct namecache *par;
2412 struct vnode *dropvp;
2416 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
2418 _cache_setunresolved(ncp);
2421 * Try to scrap the entry and possibly tail-recurse on its parent.
2422 * We only scrap unref'd (other then our ref) unresolved entries,
2423 * we do not scrap 'live' entries.
2425 * Note that once the spinlocks are acquired if nc_refs == 1 no
2426 * other references are possible. If it isn't, however, we have
2427 * to decrement but also be sure to avoid a 1->0 transition.
2429 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
2430 KKASSERT(ncp->nc_refs > 0);
2433 * Acquire locks. Note that the parent can't go away while we hold
2436 if ((par = ncp->nc_parent) != NULL) {
2439 if (_cache_lock_nonblock(par) == 0)
2441 refs = ncp->nc_refs;
2442 ncp->nc_flag |= NCF_DEFEREDZAP;
2443 ++numdefered; /* MP race ok */
2444 if (atomic_cmpset_int(&ncp->nc_refs,
2456 spin_lock(&ncp->nc_head->spin);
2460 * If someone other then us has a ref or we have children
2461 * we cannot zap the entry. The 1->0 transition and any
2462 * further list operation is protected by the spinlocks
2463 * we have acquired but other transitions are not.
2466 refs = ncp->nc_refs;
2467 if (refs == 1 && TAILQ_EMPTY(&ncp->nc_list))
2469 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) {
2471 spin_unlock(&ncp->nc_head->spin);
2481 * We are the only ref and with the spinlocks held no further
2482 * refs can be acquired by others.
2484 * Remove us from the hash list and parent list. We have to
2485 * drop a ref on the parent's vp if the parent's list becomes
2490 struct nchash_head *nchpp = ncp->nc_head;
2492 KKASSERT(nchpp != NULL);
2493 LIST_REMOVE(ncp, nc_hash);
2494 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
2495 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
2496 dropvp = par->nc_vp;
2497 ncp->nc_head = NULL;
2498 ncp->nc_parent = NULL;
2499 spin_unlock(&nchpp->spin);
2502 KKASSERT(ncp->nc_head == NULL);
2506 * ncp should not have picked up any refs. Physically
2509 KKASSERT(ncp->nc_refs == 1);
2510 /* _cache_unlock(ncp) not required */
2511 ncp->nc_refs = -1; /* safety */
2513 kfree(ncp->nc_name, M_VFSCACHE);
2514 kfree(ncp, M_VFSCACHE);
2517 * Delayed drop (we had to release our spinlocks)
2519 * The refed parent (if not NULL) must be dropped. The
2520 * caller is responsible for looping.
2528 * Clean up dangling negative cache and defered-drop entries in the
2531 * This routine is called in the critical path and also called from
2532 * vnlru(). When called from vnlru we use a lower limit to try to
2533 * deal with the negative cache before the critical path has to start
2536 typedef enum { CHI_LOW, CHI_HIGH } cache_hs_t;
2538 static cache_hs_t neg_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW };
2539 static cache_hs_t pos_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW };
2542 cache_hysteresis(int critpath)
2545 int neglimit = desiredvnodes / ncnegfactor;
2546 int xnumcache = numcache;
2549 neglimit = neglimit * 8 / 10;
2552 * Don't cache too many negative hits. We use hysteresis to reduce
2553 * the impact on the critical path.
2555 switch(neg_cache_hysteresis_state[critpath]) {
2557 if (numneg > MINNEG && numneg > neglimit) {
2559 _cache_cleanneg(ncnegflush);
2561 _cache_cleanneg(ncnegflush +
2563 neg_cache_hysteresis_state[critpath] = CHI_HIGH;
2567 if (numneg > MINNEG * 9 / 10 &&
2568 numneg * 9 / 10 > neglimit
2571 _cache_cleanneg(ncnegflush);
2573 _cache_cleanneg(ncnegflush +
2574 numneg * 9 / 10 - neglimit);
2576 neg_cache_hysteresis_state[critpath] = CHI_LOW;
2582 * Don't cache too many positive hits. We use hysteresis to reduce
2583 * the impact on the critical path.
2585 * Excessive positive hits can accumulate due to large numbers of
2586 * hardlinks (the vnode cache will not prevent hl ncps from growing
2589 if ((poslimit = ncposlimit) == 0)
2590 poslimit = desiredvnodes * 2;
2592 poslimit = poslimit * 8 / 10;
2594 switch(pos_cache_hysteresis_state[critpath]) {
2596 if (xnumcache > poslimit && xnumcache > MINPOS) {
2598 _cache_cleanpos(ncposflush);
2600 _cache_cleanpos(ncposflush +
2601 xnumcache - poslimit);
2602 pos_cache_hysteresis_state[critpath] = CHI_HIGH;
2606 if (xnumcache > poslimit * 5 / 6 && xnumcache > MINPOS) {
2608 _cache_cleanpos(ncposflush);
2610 _cache_cleanpos(ncposflush +
2611 xnumcache - poslimit * 5 / 6);
2613 pos_cache_hysteresis_state[critpath] = CHI_LOW;
2619 * Clean out dangling defered-zap ncps which could not
2620 * be cleanly dropped if too many build up. Note
2621 * that numdefered is not an exact number as such ncps
2622 * can be reused and the counter is not handled in a MP
2623 * safe manner by design.
2625 if (numdefered > neglimit) {
2626 _cache_cleandefered();
2631 * NEW NAMECACHE LOOKUP API
2633 * Lookup an entry in the namecache. The passed par_nch must be referenced
2634 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp
2635 * is ALWAYS returned, eve if the supplied component is illegal.
2637 * The resulting namecache entry should be returned to the system with
2638 * cache_put() or cache_unlock() + cache_drop().
2640 * namecache locks are recursive but care must be taken to avoid lock order
2641 * reversals (hence why the passed par_nch must be unlocked). Locking
2642 * rules are to order for parent traversals, not for child traversals.
2644 * Nobody else will be able to manipulate the associated namespace (e.g.
2645 * create, delete, rename, rename-target) until the caller unlocks the
2648 * The returned entry will be in one of three states: positive hit (non-null
2649 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
2650 * Unresolved entries must be resolved through the filesystem to associate the
2651 * vnode and/or determine whether a positive or negative hit has occured.
2653 * It is not necessary to lock a directory in order to lock namespace under
2654 * that directory. In fact, it is explicitly not allowed to do that. A
2655 * directory is typically only locked when being created, renamed, or
2658 * The directory (par) may be unresolved, in which case any returned child
2659 * will likely also be marked unresolved. Likely but not guarenteed. Since
2660 * the filesystem lookup requires a resolved directory vnode the caller is
2661 * responsible for resolving the namecache chain top-down. This API
2662 * specifically allows whole chains to be created in an unresolved state.
2665 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc)
2667 struct nchandle nch;
2668 struct namecache *ncp;
2669 struct namecache *new_ncp;
2670 struct nchash_head *nchpp;
2678 mp = par_nch->mount;
2682 * This is a good time to call it, no ncp's are locked by
2685 cache_hysteresis(1);
2688 * Try to locate an existing entry
2690 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
2691 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
2693 nchpp = NCHHASH(hash);
2696 spin_lock(&nchpp->spin);
2698 spin_lock_shared(&nchpp->spin);
2700 LIST_FOREACH(ncp, &nchpp->list, nc_hash) {
2704 * Break out if we find a matching entry. Note that
2705 * UNRESOLVED entries may match, but DESTROYED entries
2708 if (ncp->nc_parent == par_nch->ncp &&
2709 ncp->nc_nlen == nlc->nlc_namelen &&
2710 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
2711 (ncp->nc_flag & NCF_DESTROYED) == 0
2715 spin_unlock(&nchpp->spin);
2717 spin_unlock_shared(&nchpp->spin);
2719 _cache_unlock(par_nch->ncp);
2722 if (_cache_lock_special(ncp) == 0) {
2723 _cache_auto_unresolve(mp, ncp);
2725 _cache_free(new_ncp);
2736 * We failed to locate an entry, create a new entry and add it to
2737 * the cache. The parent ncp must also be locked so we
2740 * We have to relookup after possibly blocking in kmalloc or
2741 * when locking par_nch.
2743 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
2744 * mount case, in which case nc_name will be NULL.
2746 if (new_ncp == NULL) {
2747 spin_unlock_shared(&nchpp->spin);
2748 new_ncp = cache_alloc(nlc->nlc_namelen);
2749 if (nlc->nlc_namelen) {
2750 bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
2752 new_ncp->nc_name[nlc->nlc_namelen] = 0;
2758 * NOTE! The spinlock is held exclusively here because new_ncp
2761 if (par_locked == 0) {
2762 spin_unlock(&nchpp->spin);
2763 _cache_lock(par_nch->ncp);
2769 * WARNING! We still hold the spinlock. We have to set the hash
2770 * table entry atomically.
2773 _cache_link_parent(ncp, par_nch->ncp, nchpp);
2774 spin_unlock(&nchpp->spin);
2775 _cache_unlock(par_nch->ncp);
2776 /* par_locked = 0 - not used */
2779 * stats and namecache size management
2781 if (ncp->nc_flag & NCF_UNRESOLVED)
2782 ++gd->gd_nchstats->ncs_miss;
2783 else if (ncp->nc_vp)
2784 ++gd->gd_nchstats->ncs_goodhits;
2786 ++gd->gd_nchstats->ncs_neghits;
2789 atomic_add_int(&nch.mount->mnt_refs, 1);
2794 * Attempt to lookup a namecache entry and return with a shared namecache
2798 cache_nlookup_maybe_shared(struct nchandle *par_nch, struct nlcomponent *nlc,
2799 int excl, struct nchandle *res_nch)
2801 struct namecache *ncp;
2802 struct nchash_head *nchpp;
2808 * If exclusive requested or shared namecache locks are disabled,
2811 if (ncp_shared_lock_disable || excl)
2812 return(EWOULDBLOCK);
2816 mp = par_nch->mount;
2819 * This is a good time to call it, no ncp's are locked by
2822 cache_hysteresis(1);
2825 * Try to locate an existing entry
2827 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
2828 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
2829 nchpp = NCHHASH(hash);
2831 spin_lock_shared(&nchpp->spin);
2833 LIST_FOREACH(ncp, &nchpp->list, nc_hash) {
2837 * Break out if we find a matching entry. Note that
2838 * UNRESOLVED entries may match, but DESTROYED entries
2841 if (ncp->nc_parent == par_nch->ncp &&
2842 ncp->nc_nlen == nlc->nlc_namelen &&
2843 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
2844 (ncp->nc_flag & NCF_DESTROYED) == 0
2847 spin_unlock_shared(&nchpp->spin);
2848 if (_cache_lock_shared_special(ncp) == 0) {
2849 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
2850 (ncp->nc_flag & NCF_DESTROYED) == 0 &&
2851 _cache_auto_unresolve_test(mp, ncp) == 0) {
2857 spin_lock_shared(&nchpp->spin);
2865 spin_unlock_shared(&nchpp->spin);
2866 return(EWOULDBLOCK);
2871 * Note that nc_error might be non-zero (e.g ENOENT).
2874 res_nch->mount = mp;
2876 ++gd->gd_nchstats->ncs_goodhits;
2877 atomic_add_int(&res_nch->mount->mnt_refs, 1);
2879 KKASSERT(ncp->nc_error != EWOULDBLOCK);
2880 return(ncp->nc_error);
2884 * This is a non-blocking verison of cache_nlookup() used by
2885 * nfs_readdirplusrpc_uio(). It can fail for any reason and
2886 * will return nch.ncp == NULL in that case.
2889 cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc)
2891 struct nchandle nch;
2892 struct namecache *ncp;
2893 struct namecache *new_ncp;
2894 struct nchash_head *nchpp;
2902 mp = par_nch->mount;
2906 * Try to locate an existing entry
2908 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
2909 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
2911 nchpp = NCHHASH(hash);
2913 spin_lock(&nchpp->spin);
2914 LIST_FOREACH(ncp, &nchpp->list, nc_hash) {
2918 * Break out if we find a matching entry. Note that
2919 * UNRESOLVED entries may match, but DESTROYED entries
2922 if (ncp->nc_parent == par_nch->ncp &&
2923 ncp->nc_nlen == nlc->nlc_namelen &&
2924 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
2925 (ncp->nc_flag & NCF_DESTROYED) == 0
2928 spin_unlock(&nchpp->spin);
2930 _cache_unlock(par_nch->ncp);
2933 if (_cache_lock_special(ncp) == 0) {
2934 _cache_auto_unresolve(mp, ncp);
2936 _cache_free(new_ncp);
2947 * We failed to locate an entry, create a new entry and add it to
2948 * the cache. The parent ncp must also be locked so we
2951 * We have to relookup after possibly blocking in kmalloc or
2952 * when locking par_nch.
2954 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
2955 * mount case, in which case nc_name will be NULL.
2957 if (new_ncp == NULL) {
2958 spin_unlock(&nchpp->spin);
2959 new_ncp = cache_alloc(nlc->nlc_namelen);
2960 if (nlc->nlc_namelen) {
2961 bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
2963 new_ncp->nc_name[nlc->nlc_namelen] = 0;
2967 if (par_locked == 0) {
2968 spin_unlock(&nchpp->spin);
2969 if (_cache_lock_nonblock(par_nch->ncp) == 0) {
2977 * WARNING! We still hold the spinlock. We have to set the hash
2978 * table entry atomically.
2981 _cache_link_parent(ncp, par_nch->ncp, nchpp);
2982 spin_unlock(&nchpp->spin);
2983 _cache_unlock(par_nch->ncp);
2984 /* par_locked = 0 - not used */
2987 * stats and namecache size management
2989 if (ncp->nc_flag & NCF_UNRESOLVED)
2990 ++gd->gd_nchstats->ncs_miss;
2991 else if (ncp->nc_vp)
2992 ++gd->gd_nchstats->ncs_goodhits;
2994 ++gd->gd_nchstats->ncs_neghits;
2997 atomic_add_int(&nch.mount->mnt_refs, 1);
3001 _cache_free(new_ncp);
3010 * The namecache entry is marked as being used as a mount point.
3011 * Locate the mount if it is visible to the caller. The DragonFly
3012 * mount system allows arbitrary loops in the topology and disentangles
3013 * those loops by matching against (mp, ncp) rather than just (ncp).
3014 * This means any given ncp can dive any number of mounts, depending
3015 * on the relative mount (e.g. nullfs) the caller is at in the topology.
3017 * We use a very simple frontend cache to reduce SMP conflicts,
3018 * which we have to do because the mountlist scan needs an exclusive
3019 * lock around its ripout info list. Not to mention that there might
3020 * be a lot of mounts.
3022 struct findmount_info {
3023 struct mount *result;
3024 struct mount *nch_mount;
3025 struct namecache *nch_ncp;
3029 struct ncmount_cache *
3030 ncmount_cache_lookup(struct mount *mp, struct namecache *ncp)
3034 hash = ((int)(intptr_t)mp / sizeof(*mp)) ^
3035 ((int)(intptr_t)ncp / sizeof(*ncp));
3036 hash = (hash & 0x7FFFFFFF) % NCMOUNT_NUMCACHE;
3037 return (&ncmount_cache[hash]);
3042 cache_findmount_callback(struct mount *mp, void *data)
3044 struct findmount_info *info = data;
3047 * Check the mount's mounted-on point against the passed nch.
3049 if (mp->mnt_ncmounton.mount == info->nch_mount &&
3050 mp->mnt_ncmounton.ncp == info->nch_ncp
3053 atomic_add_int(&mp->mnt_refs, 1);
3060 cache_findmount(struct nchandle *nch)
3062 struct findmount_info info;
3063 struct ncmount_cache *ncc;
3069 if (ncmount_cache_enable == 0) {
3073 ncc = ncmount_cache_lookup(nch->mount, nch->ncp);
3074 if (ncc->ncp == nch->ncp) {
3075 spin_lock_shared(&ncc->spin);
3076 if (ncc->isneg == 0 &&
3077 ncc->ncp == nch->ncp && (mp = ncc->mp) != NULL) {
3078 if (mp->mnt_ncmounton.mount == nch->mount &&
3079 mp->mnt_ncmounton.ncp == nch->ncp) {
3081 * Cache hit (positive)
3083 atomic_add_int(&mp->mnt_refs, 1);
3084 spin_unlock_shared(&ncc->spin);
3085 ++ncmount_cache_hit;
3088 /* else cache miss */
3091 ncc->ncp == nch->ncp && ncc->mp == nch->mount) {
3093 * Cache hit (negative)
3095 spin_unlock_shared(&ncc->spin);
3096 ++ncmount_cache_hit;
3099 spin_unlock_shared(&ncc->spin);
3107 info.nch_mount = nch->mount;
3108 info.nch_ncp = nch->ncp;
3109 mountlist_scan(cache_findmount_callback, &info,
3110 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
3115 * Negative lookups: We cache the originating {ncp,mp}. (mp) is
3116 * only used for pointer comparisons and is not
3117 * referenced (otherwise there would be dangling
3120 * Positive lookups: We cache the originating {ncp} and the target
3121 * (mp). (mp) is referenced.
3123 * Indeterminant: If the match is undergoing an unmount we do
3124 * not cache it to avoid racing cache_unmounting(),
3125 * but still return the match.
3128 spin_lock(&ncc->spin);
3129 if (info.result == NULL) {
3130 if (ncc->isneg == 0 && ncc->mp)
3131 atomic_add_int(&ncc->mp->mnt_refs, -1);
3132 ncc->ncp = nch->ncp;
3133 ncc->mp = nch->mount;
3135 spin_unlock(&ncc->spin);
3136 ++ncmount_cache_overwrite;
3137 } else if ((info.result->mnt_kern_flag & MNTK_UNMOUNT) == 0) {
3138 if (ncc->isneg == 0 && ncc->mp)
3139 atomic_add_int(&ncc->mp->mnt_refs, -1);
3140 atomic_add_int(&info.result->mnt_refs, 1);
3141 ncc->ncp = nch->ncp;
3142 ncc->mp = info.result;
3144 spin_unlock(&ncc->spin);
3145 ++ncmount_cache_overwrite;
3147 spin_unlock(&ncc->spin);
3149 ++ncmount_cache_miss;
3151 return(info.result);
3155 cache_dropmount(struct mount *mp)
3157 atomic_add_int(&mp->mnt_refs, -1);
3161 cache_ismounting(struct mount *mp)
3163 struct nchandle *nch = &mp->mnt_ncmounton;
3164 struct ncmount_cache *ncc;
3166 ncc = ncmount_cache_lookup(nch->mount, nch->ncp);
3168 ncc->ncp == nch->ncp && ncc->mp == nch->mount) {
3169 spin_lock(&ncc->spin);
3171 ncc->ncp == nch->ncp && ncc->mp == nch->mount) {
3175 spin_unlock(&ncc->spin);
3180 cache_unmounting(struct mount *mp)
3182 struct nchandle *nch = &mp->mnt_ncmounton;
3183 struct ncmount_cache *ncc;
3185 ncc = ncmount_cache_lookup(nch->mount, nch->ncp);
3186 if (ncc->isneg == 0 &&
3187 ncc->ncp == nch->ncp && ncc->mp == mp) {
3188 spin_lock(&ncc->spin);
3189 if (ncc->isneg == 0 &&
3190 ncc->ncp == nch->ncp && ncc->mp == mp) {
3191 atomic_add_int(&mp->mnt_refs, -1);
3195 spin_unlock(&ncc->spin);
3200 * Resolve an unresolved namecache entry, generally by looking it up.
3201 * The passed ncp must be locked and refd.
3203 * Theoretically since a vnode cannot be recycled while held, and since
3204 * the nc_parent chain holds its vnode as long as children exist, the
3205 * direct parent of the cache entry we are trying to resolve should
3206 * have a valid vnode. If not then generate an error that we can
3207 * determine is related to a resolver bug.
3209 * However, if a vnode was in the middle of a recyclement when the NCP
3210 * got locked, ncp->nc_vp might point to a vnode that is about to become
3211 * invalid. cache_resolve() handles this case by unresolving the entry
3212 * and then re-resolving it.
3214 * Note that successful resolution does not necessarily return an error
3215 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
3219 cache_resolve(struct nchandle *nch, struct ucred *cred)
3221 struct namecache *par_tmp;
3222 struct namecache *par;
3223 struct namecache *ncp;
3224 struct nchandle nctmp;
3231 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE);
3234 * If the ncp is already resolved we have nothing to do. However,
3235 * we do want to guarentee that a usable vnode is returned when
3236 * a vnode is present, so make sure it hasn't been reclaimed.
3238 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
3239 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
3240 _cache_setunresolved(ncp);
3241 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
3242 return (ncp->nc_error);
3246 * If the ncp was destroyed it will never resolve again. This
3247 * can basically only happen when someone is chdir'd into an
3248 * empty directory which is then rmdir'd. We want to catch this
3249 * here and not dive the VFS because the VFS might actually
3250 * have a way to re-resolve the disconnected ncp, which will
3251 * result in inconsistencies in the cdir/nch for proc->p_fd.
3253 if (ncp->nc_flag & NCF_DESTROYED) {
3254 kprintf("Warning: cache_resolve: ncp '%s' was unlinked\n",
3260 * Mount points need special handling because the parent does not
3261 * belong to the same filesystem as the ncp.
3263 if (ncp == mp->mnt_ncmountpt.ncp)
3264 return (cache_resolve_mp(mp));
3267 * We expect an unbroken chain of ncps to at least the mount point,
3268 * and even all the way to root (but this code doesn't have to go
3269 * past the mount point).
3271 if (ncp->nc_parent == NULL) {
3272 kprintf("EXDEV case 1 %p %*.*s\n", ncp,
3273 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
3274 ncp->nc_error = EXDEV;
3275 return(ncp->nc_error);
3279 * The vp's of the parent directories in the chain are held via vhold()
3280 * due to the existance of the child, and should not disappear.
3281 * However, there are cases where they can disappear:
3283 * - due to filesystem I/O errors.
3284 * - due to NFS being stupid about tracking the namespace and
3285 * destroys the namespace for entire directories quite often.
3286 * - due to forced unmounts.
3287 * - due to an rmdir (parent will be marked DESTROYED)
3289 * When this occurs we have to track the chain backwards and resolve
3290 * it, looping until the resolver catches up to the current node. We
3291 * could recurse here but we might run ourselves out of kernel stack
3292 * so we do it in a more painful manner. This situation really should
3293 * not occur all that often, or if it does not have to go back too
3294 * many nodes to resolve the ncp.
3296 while ((dvp = cache_dvpref(ncp)) == NULL) {
3298 * This case can occur if a process is CD'd into a
3299 * directory which is then rmdir'd. If the parent is marked
3300 * destroyed there is no point trying to resolve it.
3302 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
3304 par = ncp->nc_parent;
3307 while ((par_tmp = par->nc_parent) != NULL &&
3308 par_tmp->nc_vp == NULL) {
3309 _cache_hold(par_tmp);
3310 _cache_lock(par_tmp);
3314 if (par->nc_parent == NULL) {
3315 kprintf("EXDEV case 2 %*.*s\n",
3316 par->nc_nlen, par->nc_nlen, par->nc_name);
3320 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
3321 par->nc_nlen, par->nc_nlen, par->nc_name);
3323 * The parent is not set in stone, ref and lock it to prevent
3324 * it from disappearing. Also note that due to renames it
3325 * is possible for our ncp to move and for par to no longer
3326 * be one of its parents. We resolve it anyway, the loop
3327 * will handle any moves.
3329 _cache_get(par); /* additional hold/lock */
3330 _cache_put(par); /* from earlier hold/lock */
3331 if (par == nch->mount->mnt_ncmountpt.ncp) {
3332 cache_resolve_mp(nch->mount);
3333 } else if ((dvp = cache_dvpref(par)) == NULL) {
3334 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
3338 if (par->nc_flag & NCF_UNRESOLVED) {
3341 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
3345 if ((error = par->nc_error) != 0) {
3346 if (par->nc_error != EAGAIN) {
3347 kprintf("EXDEV case 3 %*.*s error %d\n",
3348 par->nc_nlen, par->nc_nlen, par->nc_name,
3353 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
3354 par, par->nc_nlen, par->nc_nlen, par->nc_name);
3361 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
3362 * ncp's and reattach them. If this occurs the original ncp is marked
3363 * EAGAIN to force a relookup.
3365 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
3366 * ncp must already be resolved.
3371 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
3374 ncp->nc_error = EPERM;
3376 if (ncp->nc_error == EAGAIN) {
3377 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
3378 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
3381 return(ncp->nc_error);
3385 * Resolve the ncp associated with a mount point. Such ncp's almost always
3386 * remain resolved and this routine is rarely called. NFS MPs tends to force
3387 * re-resolution more often due to its mac-truck-smash-the-namecache
3388 * method of tracking namespace changes.
3390 * The semantics for this call is that the passed ncp must be locked on
3391 * entry and will be locked on return. However, if we actually have to
3392 * resolve the mount point we temporarily unlock the entry in order to
3393 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
3394 * the unlock we have to recheck the flags after we relock.
3397 cache_resolve_mp(struct mount *mp)
3399 struct namecache *ncp = mp->mnt_ncmountpt.ncp;
3403 KKASSERT(mp != NULL);
3406 * If the ncp is already resolved we have nothing to do. However,
3407 * we do want to guarentee that a usable vnode is returned when
3408 * a vnode is present, so make sure it hasn't been reclaimed.
3410 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
3411 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
3412 _cache_setunresolved(ncp);
3415 if (ncp->nc_flag & NCF_UNRESOLVED) {
3417 while (vfs_busy(mp, 0))
3419 error = VFS_ROOT(mp, &vp);
3423 * recheck the ncp state after relocking.
3425 if (ncp->nc_flag & NCF_UNRESOLVED) {
3426 ncp->nc_error = error;
3428 _cache_setvp(mp, ncp, vp);
3431 kprintf("[diagnostic] cache_resolve_mp: failed"
3432 " to resolve mount %p err=%d ncp=%p\n",
3434 _cache_setvp(mp, ncp, NULL);
3436 } else if (error == 0) {
3441 return(ncp->nc_error);
3445 * Clean out negative cache entries when too many have accumulated.
3448 _cache_cleanneg(int count)
3450 struct namecache *ncp;
3453 * Attempt to clean out the specified number of negative cache
3458 ncp = TAILQ_FIRST(&ncneglist);
3460 spin_unlock(&ncspin);
3463 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
3464 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
3466 spin_unlock(&ncspin);
3469 * This can race, so we must re-check that the ncp
3470 * is on the ncneglist after successfully locking it.
3472 if (_cache_lock_special(ncp) == 0) {
3473 if (ncp->nc_vp == NULL &&
3474 (ncp->nc_flag & NCF_UNRESOLVED) == 0) {
3475 ncp = cache_zap(ncp, 1);
3479 kprintf("cache_cleanneg: race avoided\n");
3490 * Clean out positive cache entries when too many have accumulated.
3493 _cache_cleanpos(int count)
3495 static volatile int rover;
3496 struct nchash_head *nchpp;
3497 struct namecache *ncp;
3501 * Attempt to clean out the specified number of negative cache
3505 rover_copy = ++rover; /* MPSAFEENOUGH */
3507 nchpp = NCHHASH(rover_copy);
3509 spin_lock_shared(&nchpp->spin);
3510 ncp = LIST_FIRST(&nchpp->list);
3511 while (ncp && (ncp->nc_flag & NCF_DESTROYED))
3512 ncp = LIST_NEXT(ncp, nc_hash);
3515 spin_unlock_shared(&nchpp->spin);
3518 if (_cache_lock_special(ncp) == 0) {
3519 ncp = cache_zap(ncp, 1);
3531 * This is a kitchen sink function to clean out ncps which we
3532 * tried to zap from cache_drop() but failed because we were
3533 * unable to acquire the parent lock.
3535 * Such entries can also be removed via cache_inval_vp(), such
3536 * as when unmounting.
3539 _cache_cleandefered(void)
3541 struct nchash_head *nchpp;
3542 struct namecache *ncp;
3543 struct namecache dummy;
3547 bzero(&dummy, sizeof(dummy));
3548 dummy.nc_flag = NCF_DESTROYED;
3551 for (i = 0; i <= nchash; ++i) {
3552 nchpp = &nchashtbl[i];
3554 spin_lock(&nchpp->spin);
3555 LIST_INSERT_HEAD(&nchpp->list, &dummy, nc_hash);
3557 while ((ncp = LIST_NEXT(ncp, nc_hash)) != NULL) {
3558 if ((ncp->nc_flag & NCF_DEFEREDZAP) == 0)
3560 LIST_REMOVE(&dummy, nc_hash);
3561 LIST_INSERT_AFTER(ncp, &dummy, nc_hash);
3563 spin_unlock(&nchpp->spin);
3564 if (_cache_lock_nonblock(ncp) == 0) {
3565 ncp->nc_flag &= ~NCF_DEFEREDZAP;
3569 spin_lock(&nchpp->spin);
3572 LIST_REMOVE(&dummy, nc_hash);
3573 spin_unlock(&nchpp->spin);
3578 * Name cache initialization, from vfsinit() when we are booting
3586 /* initialise per-cpu namecache effectiveness statistics. */
3587 for (i = 0; i < ncpus; ++i) {
3588 gd = globaldata_find(i);
3589 gd->gd_nchstats = &nchstats[i];
3591 TAILQ_INIT(&ncneglist);
3593 nchashtbl = hashinit_ext(desiredvnodes / 2,
3594 sizeof(struct nchash_head),
3595 M_VFSCACHE, &nchash);
3596 for (i = 0; i <= (int)nchash; ++i) {
3597 LIST_INIT(&nchashtbl[i].list);
3598 spin_init(&nchashtbl[i].spin);
3600 for (i = 0; i < NCMOUNT_NUMCACHE; ++i)
3601 spin_init(&ncmount_cache[i].spin);
3602 nclockwarn = 5 * hz;
3606 * Called from start_init() to bootstrap the root filesystem. Returns
3607 * a referenced, unlocked namecache record.
3610 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp)
3612 nch->ncp = cache_alloc(0);
3614 atomic_add_int(&mp->mnt_refs, 1);
3616 _cache_setvp(nch->mount, nch->ncp, vp);
3620 * vfs_cache_setroot()
3622 * Create an association between the root of our namecache and
3623 * the root vnode. This routine may be called several times during
3626 * If the caller intends to save the returned namecache pointer somewhere
3627 * it must cache_hold() it.
3630 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch)
3633 struct nchandle onch;
3641 cache_zero(&rootnch);
3649 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
3650 * topology and is being removed as quickly as possible. The new VOP_N*()
3651 * API calls are required to make specific adjustments using the supplied
3652 * ncp pointers rather then just bogusly purging random vnodes.
3654 * Invalidate all namecache entries to a particular vnode as well as
3655 * any direct children of that vnode in the namecache. This is a
3656 * 'catch all' purge used by filesystems that do not know any better.
3658 * Note that the linkage between the vnode and its namecache entries will
3659 * be removed, but the namecache entries themselves might stay put due to
3660 * active references from elsewhere in the system or due to the existance of
3661 * the children. The namecache topology is left intact even if we do not
3662 * know what the vnode association is. Such entries will be marked
3666 cache_purge(struct vnode *vp)
3668 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
3672 * Flush all entries referencing a particular filesystem.
3674 * Since we need to check it anyway, we will flush all the invalid
3675 * entries at the same time.
3680 cache_purgevfs(struct mount *mp)
3682 struct nchash_head *nchpp;
3683 struct namecache *ncp, *nnp;
3686 * Scan hash tables for applicable entries.
3688 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) {
3689 spin_lock_wr(&nchpp->spin); XXX
3690 ncp = LIST_FIRST(&nchpp->list);
3694 nnp = LIST_NEXT(ncp, nc_hash);
3697 if (ncp->nc_mount == mp) {
3699 ncp = cache_zap(ncp, 0);
3707 spin_unlock_wr(&nchpp->spin); XXX
3713 static int disablecwd;
3714 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
3717 static u_long numcwdcalls;
3718 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdcalls, CTLFLAG_RD, &numcwdcalls, 0,
3719 "Number of current directory resolution calls");
3720 static u_long numcwdfailnf;
3721 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfailnf, CTLFLAG_RD, &numcwdfailnf, 0,
3722 "Number of current directory failures due to lack of file");
3723 static u_long numcwdfailsz;
3724 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfailsz, CTLFLAG_RD, &numcwdfailsz, 0,
3725 "Number of current directory failures due to large result");
3726 static u_long numcwdfound;
3727 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfound, CTLFLAG_RD, &numcwdfound, 0,
3728 "Number of current directory resolution successes");
3734 sys___getcwd(struct __getcwd_args *uap)
3744 buflen = uap->buflen;
3747 if (buflen > MAXPATHLEN)
3748 buflen = MAXPATHLEN;
3750 buf = kmalloc(buflen, M_TEMP, M_WAITOK);
3751 bp = kern_getcwd(buf, buflen, &error);
3753 error = copyout(bp, uap->buf, strlen(bp) + 1);
3759 kern_getcwd(char *buf, size_t buflen, int *error)
3761 struct proc *p = curproc;
3763 int i, slash_prefixed;
3764 struct filedesc *fdp;
3765 struct nchandle nch;
3766 struct namecache *ncp;
3775 nch = fdp->fd_ncdir;
3780 while (ncp && (ncp != fdp->fd_nrdir.ncp ||
3781 nch.mount != fdp->fd_nrdir.mount)
3784 * While traversing upwards if we encounter the root
3785 * of the current mount we have to skip to the mount point
3786 * in the underlying filesystem.
3788 if (ncp == nch.mount->mnt_ncmountpt.ncp) {
3789 nch = nch.mount->mnt_ncmounton;
3798 * Prepend the path segment
3800 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
3807 *--bp = ncp->nc_name[i];
3819 * Go up a directory. This isn't a mount point so we don't
3820 * have to check again.
3822 while ((nch.ncp = ncp->nc_parent) != NULL) {
3823 if (ncp_shared_lock_disable)
3826 _cache_lock_shared(ncp);
3827 if (nch.ncp != ncp->nc_parent) {
3831 _cache_hold(nch.ncp);
3844 if (!slash_prefixed) {
3862 * Thus begins the fullpath magic.
3864 * The passed nchp is referenced but not locked.
3866 static int disablefullpath;
3867 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
3868 &disablefullpath, 0,
3869 "Disable fullpath lookups");
3871 static u_int numfullpathcalls;
3872 SYSCTL_UINT(_vfs_cache, OID_AUTO, numfullpathcalls, CTLFLAG_RD,
3873 &numfullpathcalls, 0,
3874 "Number of full path resolutions in progress");
3875 static u_int numfullpathfailnf;
3876 SYSCTL_UINT(_vfs_cache, OID_AUTO, numfullpathfailnf, CTLFLAG_RD,
3877 &numfullpathfailnf, 0,
3878 "Number of full path resolution failures due to lack of file");
3879 static u_int numfullpathfailsz;
3880 SYSCTL_UINT(_vfs_cache, OID_AUTO, numfullpathfailsz, CTLFLAG_RD,
3881 &numfullpathfailsz, 0,
3882 "Number of full path resolution failures due to insufficient memory");
3883 static u_int numfullpathfound;
3884 SYSCTL_UINT(_vfs_cache, OID_AUTO, numfullpathfound, CTLFLAG_RD,
3885 &numfullpathfound, 0,
3886 "Number of full path resolution successes");
3889 cache_fullpath(struct proc *p, struct nchandle *nchp, struct nchandle *nchbase,
3890 char **retbuf, char **freebuf, int guess)
3892 struct nchandle fd_nrdir;
3893 struct nchandle nch;
3894 struct namecache *ncp;
3895 struct mount *mp, *new_mp;
3901 atomic_add_int(&numfullpathcalls, -1);
3906 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK);
3907 bp = buf + MAXPATHLEN - 1;
3910 fd_nrdir = *nchbase;
3912 fd_nrdir = p->p_fd->fd_nrdir;
3922 while (ncp && (ncp != fd_nrdir.ncp || mp != fd_nrdir.mount)) {
3926 * If we are asked to guess the upwards path, we do so whenever
3927 * we encounter an ncp marked as a mountpoint. We try to find
3928 * the actual mountpoint by finding the mountpoint with this
3931 if (guess && (ncp->nc_flag & NCF_ISMOUNTPT)) {
3932 new_mp = mount_get_by_nc(ncp);
3935 * While traversing upwards if we encounter the root
3936 * of the current mount we have to skip to the mount point.
3938 if (ncp == mp->mnt_ncmountpt.ncp) {
3942 nch = new_mp->mnt_ncmounton;
3952 * Prepend the path segment
3954 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
3956 numfullpathfailsz++;
3961 *--bp = ncp->nc_name[i];
3964 numfullpathfailsz++;
3973 * Go up a directory. This isn't a mount point so we don't
3974 * have to check again.
3976 * We can only safely access nc_parent with ncp held locked.
3978 while ((nch.ncp = ncp->nc_parent) != NULL) {
3980 if (nch.ncp != ncp->nc_parent) {
3984 _cache_hold(nch.ncp);
3992 numfullpathfailnf++;
3998 if (!slash_prefixed) {
4000 numfullpathfailsz++;
4018 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf,
4019 char **freebuf, int guess)
4021 struct namecache *ncp;
4022 struct nchandle nch;
4026 atomic_add_int(&numfullpathcalls, 1);
4027 if (disablefullpath)
4033 /* vn is NULL, client wants us to use p->p_textvp */
4035 if ((vn = p->p_textvp) == NULL)
4038 spin_lock_shared(&vn->v_spin);
4039 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
4044 spin_unlock_shared(&vn->v_spin);
4048 spin_unlock_shared(&vn->v_spin);
4050 atomic_add_int(&numfullpathcalls, -1);
4052 nch.mount = vn->v_mount;
4053 error = cache_fullpath(p, &nch, NULL, retbuf, freebuf, guess);