2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 #include <sys/param.h>
66 #include <sys/systm.h>
68 #include <sys/kernel.h>
69 #include <sys/sysctl.h>
70 #include <sys/mount.h>
71 #include <sys/vnode.h>
72 #include <sys/malloc.h>
73 #include <sys/sysproto.h>
74 #include <sys/spinlock.h>
76 #include <sys/namei.h>
77 #include <sys/nlookup.h>
78 #include <sys/filedesc.h>
79 #include <sys/fnv_hash.h>
80 #include <sys/globaldata.h>
81 #include <sys/kern_syscall.h>
82 #include <sys/dirent.h>
85 #include <sys/spinlock2.h>
87 #define MAX_RECURSION_DEPTH 64
90 * Random lookups in the cache are accomplished with a hash table using
91 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock.
93 * Negative entries may exist and correspond to resolved namecache
94 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT
95 * will be set if the entry corresponds to a whited-out directory entry
96 * (verses simply not finding the entry at all). pcpu_ncache[n].neg_list
97 * is locked via pcpu_ncache[n].neg_spin;
101 * (1) A ncp must be referenced before it can be locked.
103 * (2) A ncp must be locked in order to modify it.
105 * (3) ncp locks are always ordered child -> parent. That may seem
106 * backwards but forward scans use the hash table and thus can hold
107 * the parent unlocked when traversing downward.
109 * This allows insert/rename/delete/dot-dot and other operations
110 * to use ncp->nc_parent links.
112 * This also prevents a locked up e.g. NFS node from creating a
113 * chain reaction all the way back to the root vnode / namecache.
115 * (4) parent linkages require both the parent and child to be locked.
119 * Structures associated with name cacheing.
121 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
124 #define NCMOUNT_NUMCACHE 16301 /* prime number */
126 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
128 TAILQ_HEAD(nchash_list, namecache);
131 * Don't cachealign, but at least pad to 32 bytes so entries
132 * don't cross a cache line.
135 struct nchash_list list; /* 16 bytes */
136 struct spinlock spin; /* 8 bytes */
137 long pad01; /* 8 bytes */
140 struct ncmount_cache {
141 struct spinlock spin;
142 struct namecache *ncp;
144 int isneg; /* if != 0 mp is originator and not target */
148 struct spinlock neg_spin; /* for neg_list and neg_count */
149 struct namecache_list neg_list;
157 __read_mostly static struct nchash_head *nchashtbl;
158 __read_mostly static struct pcpu_ncache *pcpu_ncache;
159 static struct ncmount_cache ncmount_cache[NCMOUNT_NUMCACHE];
162 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
163 * to create the namecache infrastructure leading to a dangling vnode.
165 * 0 Only errors are reported
166 * 1 Successes are reported
167 * 2 Successes + the whole directory scan is reported
168 * 3 Force the directory scan code run as if the parent vnode did not
169 * have a namecache record, even if it does have one.
171 __read_mostly static int ncvp_debug;
172 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0,
173 "Namecache debug level (0-3)");
175 __read_mostly static u_long nchash; /* size of hash table */
176 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
177 "Size of namecache hash table");
179 __read_mostly static int ncnegflush = 10; /* burst for negative flush */
180 SYSCTL_INT(_debug, OID_AUTO, ncnegflush, CTLFLAG_RW, &ncnegflush, 0,
181 "Batch flush negative entries");
183 __read_mostly static int ncposflush = 10; /* burst for positive flush */
184 SYSCTL_INT(_debug, OID_AUTO, ncposflush, CTLFLAG_RW, &ncposflush, 0,
185 "Batch flush positive entries");
187 __read_mostly static int ncnegfactor = 16; /* ratio of negative entries */
188 SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
189 "Ratio of namecache negative entries");
191 __read_mostly static int nclockwarn; /* warn on locked entries in ticks */
192 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0,
193 "Warn on locked namecache entries in ticks");
195 __read_mostly static int ncposlimit; /* number of cache entries allocated */
196 SYSCTL_INT(_debug, OID_AUTO, ncposlimit, CTLFLAG_RW, &ncposlimit, 0,
197 "Number of cache entries allocated");
199 __read_mostly static int ncp_shared_lock_disable = 0;
200 SYSCTL_INT(_debug, OID_AUTO, ncp_shared_lock_disable, CTLFLAG_RW,
201 &ncp_shared_lock_disable, 0, "Disable shared namecache locks");
203 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode),
204 "sizeof(struct vnode)");
205 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache),
206 "sizeof(struct namecache)");
208 __read_mostly static int ncmount_cache_enable = 1;
209 SYSCTL_INT(_debug, OID_AUTO, ncmount_cache_enable, CTLFLAG_RW,
210 &ncmount_cache_enable, 0, "mount point cache");
212 static __inline void _cache_drop(struct namecache *ncp);
213 static int cache_resolve_mp(struct mount *mp);
214 static struct vnode *cache_dvpref(struct namecache *ncp);
215 static void _cache_lock(struct namecache *ncp);
216 static void _cache_setunresolved(struct namecache *ncp);
217 static void _cache_cleanneg(long count);
218 static void _cache_cleanpos(long count);
219 static void _cache_cleandefered(void);
220 static void _cache_unlink(struct namecache *ncp);
222 static void vfscache_rollup_all(void);
226 * The new name cache statistics (these are rolled up globals and not
227 * modified in the critical path, see struct pcpu_ncache).
229 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
230 static long vfscache_negs;
231 SYSCTL_LONG(_vfs_cache, OID_AUTO, numneg, CTLFLAG_RD, &vfscache_negs, 0,
232 "Number of negative namecache entries");
233 static long vfscache_count;
234 SYSCTL_LONG(_vfs_cache, OID_AUTO, numcache, CTLFLAG_RD, &vfscache_count, 0,
235 "Number of namecaches entries");
236 static long vfscache_leafs;
237 SYSCTL_LONG(_vfs_cache, OID_AUTO, numleafs, CTLFLAG_RD, &vfscache_leafs, 0,
238 "Number of namecaches entries");
239 static long numdefered;
240 SYSCTL_LONG(_debug, OID_AUTO, numdefered, CTLFLAG_RD, &numdefered, 0,
241 "Number of cache entries allocated");
244 struct nchstats nchstats[SMP_MAXCPU];
246 * Export VFS cache effectiveness statistics to user-land.
248 * The statistics are left for aggregation to user-land so
249 * neat things can be achieved, like observing per-CPU cache
253 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
255 struct globaldata *gd;
259 for (i = 0; i < ncpus; ++i) {
260 gd = globaldata_find(i);
261 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
262 sizeof(struct nchstats))))
268 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
269 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
271 static struct namecache *cache_zap(struct namecache *ncp, int nonblock);
274 * Cache mount points and namecache records in order to avoid unnecessary
275 * atomic ops on mnt_refs and ncp->refs. This improves concurrent SMP
276 * performance and is particularly important on multi-socket systems to
277 * reduce cache-line ping-ponging.
279 * Try to keep the pcpu structure within one cache line (~64 bytes).
281 #define MNTCACHE_COUNT 5
284 struct mount *mntary[MNTCACHE_COUNT];
285 struct namecache *ncp1;
286 struct namecache *ncp2;
287 struct nchandle ncdir;
292 static struct mntcache pcpu_mntcache[MAXCPU];
296 _cache_mntref(struct mount *mp)
298 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid];
301 for (i = 0; i < MNTCACHE_COUNT; ++i) {
302 if (cache->mntary[i] != mp)
304 if (atomic_cmpset_ptr((void *)&cache->mntary[i], mp, NULL))
307 atomic_add_int(&mp->mnt_refs, 1);
312 _cache_mntrel(struct mount *mp)
314 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid];
317 for (i = 0; i < MNTCACHE_COUNT; ++i) {
318 if (cache->mntary[i] == NULL) {
319 mp = atomic_swap_ptr((void *)&cache->mntary[i], mp);
324 i = (int)((uint32_t)++cache->iter % (uint32_t)MNTCACHE_COUNT);
325 mp = atomic_swap_ptr((void *)&cache->mntary[i], mp);
327 atomic_add_int(&mp->mnt_refs, -1);
331 * Clears all cached mount points on all cpus. This routine should only
332 * be called when we are waiting for a mount to clear, e.g. so we can
336 cache_clearmntcache(void)
340 for (n = 0; n < ncpus; ++n) {
341 struct mntcache *cache = &pcpu_mntcache[n];
342 struct namecache *ncp;
346 for (i = 0; i < MNTCACHE_COUNT; ++i) {
347 if (cache->mntary[i]) {
348 mp = atomic_swap_ptr(
349 (void *)&cache->mntary[i], NULL);
351 atomic_add_int(&mp->mnt_refs, -1);
355 ncp = atomic_swap_ptr((void *)&cache->ncp1, NULL);
360 ncp = atomic_swap_ptr((void *)&cache->ncp2, NULL);
364 if (cache->ncdir.ncp) {
365 ncp = atomic_swap_ptr((void *)&cache->ncdir.ncp, NULL);
369 if (cache->ncdir.mount) {
370 mp = atomic_swap_ptr((void *)&cache->ncdir.mount, NULL);
372 atomic_add_int(&mp->mnt_refs, -1);
379 * Namespace locking. The caller must already hold a reference to the
380 * namecache structure in order to lock/unlock it. This function prevents
381 * the namespace from being created or destroyed by accessors other then
384 * Note that holding a locked namecache structure prevents other threads
385 * from making namespace changes (e.g. deleting or creating), prevents
386 * vnode association state changes by other threads, and prevents the
387 * namecache entry from being resolved or unresolved by other threads.
389 * An exclusive lock owner has full authority to associate/disassociate
390 * vnodes and resolve/unresolve the locked ncp.
392 * A shared lock owner only has authority to acquire the underlying vnode,
395 * The primary lock field is nc_lockstatus. nc_locktd is set after the
396 * fact (when locking) or cleared prior to unlocking.
398 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
399 * or recycled, but it does NOT help you if the vnode had already
400 * initiated a recyclement. If this is important, use cache_get()
401 * rather then cache_lock() (and deal with the differences in the
402 * way the refs counter is handled). Or, alternatively, make an
403 * unconditional call to cache_validate() or cache_resolve()
404 * after cache_lock() returns.
408 _cache_lock(struct namecache *ncp)
416 KKASSERT(ncp->nc_refs != 0);
422 count = ncp->nc_lockstatus;
425 if ((count & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 0) {
426 if (atomic_cmpset_int(&ncp->nc_lockstatus,
429 * The vp associated with a locked ncp must
430 * be held to prevent it from being recycled.
432 * WARNING! If VRECLAIMED is set the vnode
433 * could already be in the middle of a recycle.
434 * Callers must use cache_vref() or
435 * cache_vget() on the locked ncp to
436 * validate the vp or set the cache entry
439 * NOTE! vhold() is allowed if we hold a
440 * lock on the ncp (which we do).
450 if (ncp->nc_locktd == td) {
451 KKASSERT((count & NC_SHLOCK_FLAG) == 0);
452 if (atomic_cmpset_int(&ncp->nc_lockstatus,
459 tsleep_interlock(&ncp->nc_locktd, 0);
460 if (atomic_cmpset_int(&ncp->nc_lockstatus, count,
461 count | NC_EXLOCK_REQ) == 0) {
467 error = tsleep(&ncp->nc_locktd, PINTERLOCKED,
468 "clock", nclockwarn);
469 if (error == EWOULDBLOCK) {
472 kprintf("[diagnostic] cache_lock: "
473 "%s blocked on %p %08x",
474 td->td_comm, ncp, count);
475 kprintf(" \"%*.*s\"\n",
476 ncp->nc_nlen, ncp->nc_nlen,
483 kprintf("[diagnostic] cache_lock: %s unblocked %*.*s after "
486 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name,
487 (int)(ticks + (hz / 2) - begticks) / hz);
492 * The shared lock works similarly to the exclusive lock except
493 * nc_locktd is left NULL and we need an interlock (VHOLD) to
494 * prevent vhold() races, since the moment our cmpset_int succeeds
495 * another cpu can come in and get its own shared lock.
497 * A critical section is needed to prevent interruption during the
502 _cache_lock_shared(struct namecache *ncp)
507 u_int optreq = NC_EXLOCK_REQ;
509 KKASSERT(ncp->nc_refs != 0);
513 count = ncp->nc_lockstatus;
516 if ((count & ~NC_SHLOCK_REQ) == 0) {
518 if (atomic_cmpset_int(&ncp->nc_lockstatus,
520 (count + 1) | NC_SHLOCK_FLAG |
523 * The vp associated with a locked ncp must
524 * be held to prevent it from being recycled.
526 * WARNING! If VRECLAIMED is set the vnode
527 * could already be in the middle of a recycle.
528 * Callers must use cache_vref() or
529 * cache_vget() on the locked ncp to
530 * validate the vp or set the cache entry
533 * NOTE! vhold() is allowed if we hold a
534 * lock on the ncp (which we do).
538 atomic_clear_int(&ncp->nc_lockstatus,
549 * If already held shared we can just bump the count, but
550 * only allow this if nobody is trying to get the lock
551 * exclusively. If we are blocking too long ignore excl
552 * requests (which can race/deadlock us).
554 * VHOLD is a bit of a hack. Even though we successfully
555 * added another shared ref, the cpu that got the first
556 * shared ref might not yet have held the vnode.
558 if ((count & (optreq|NC_SHLOCK_FLAG)) == NC_SHLOCK_FLAG) {
559 KKASSERT((count & ~(NC_EXLOCK_REQ |
561 NC_SHLOCK_FLAG)) > 0);
562 if (atomic_cmpset_int(&ncp->nc_lockstatus,
564 while (ncp->nc_lockstatus & NC_SHLOCK_VHOLD)
570 tsleep_interlock(ncp, 0);
571 if (atomic_cmpset_int(&ncp->nc_lockstatus, count,
572 count | NC_SHLOCK_REQ) == 0) {
576 error = tsleep(ncp, PINTERLOCKED, "clocksh", nclockwarn);
577 if (error == EWOULDBLOCK) {
580 didwarn = ticks - nclockwarn;
581 kprintf("[diagnostic] cache_lock_shared: "
582 "%s blocked on %p %08x "
584 curthread->td_comm, ncp, count,
585 ncp->nc_nlen, ncp->nc_nlen,
592 kprintf("[diagnostic] cache_lock_shared: "
593 "%s unblocked %*.*s after %d secs\n",
595 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name,
596 (int)(ticks - didwarn) / hz);
601 * Lock ncp exclusively, return 0 on success.
603 * NOTE: nc_refs may be zero if the ncp is interlocked by circumstance,
604 * such as the case where one of its children is locked.
608 _cache_lock_nonblock(struct namecache *ncp)
616 count = ncp->nc_lockstatus;
618 if ((count & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 0) {
619 if (atomic_cmpset_int(&ncp->nc_lockstatus,
622 * The vp associated with a locked ncp must
623 * be held to prevent it from being recycled.
625 * WARNING! If VRECLAIMED is set the vnode
626 * could already be in the middle of a recycle.
627 * Callers must use cache_vref() or
628 * cache_vget() on the locked ncp to
629 * validate the vp or set the cache entry
632 * NOTE! vhold() is allowed if we hold a
633 * lock on the ncp (which we do).
643 if (ncp->nc_locktd == td) {
644 if (atomic_cmpset_int(&ncp->nc_lockstatus,
657 * The shared lock works similarly to the exclusive lock except
658 * nc_locktd is left NULL and we need an interlock (VHOLD) to
659 * prevent vhold() races, since the moment our cmpset_int succeeds
660 * another cpu can come in and get its own shared lock.
662 * A critical section is needed to prevent interruption during the
667 _cache_lock_shared_nonblock(struct namecache *ncp)
672 count = ncp->nc_lockstatus;
674 if ((count & ~NC_SHLOCK_REQ) == 0) {
676 if (atomic_cmpset_int(&ncp->nc_lockstatus,
678 (count + 1) | NC_SHLOCK_FLAG |
681 * The vp associated with a locked ncp must
682 * be held to prevent it from being recycled.
684 * WARNING! If VRECLAIMED is set the vnode
685 * could already be in the middle of a recycle.
686 * Callers must use cache_vref() or
687 * cache_vget() on the locked ncp to
688 * validate the vp or set the cache entry
691 * NOTE! vhold() is allowed if we hold a
692 * lock on the ncp (which we do).
696 atomic_clear_int(&ncp->nc_lockstatus,
707 * If already held shared we can just bump the count, but
708 * only allow this if nobody is trying to get the lock
711 * VHOLD is a bit of a hack. Even though we successfully
712 * added another shared ref, the cpu that got the first
713 * shared ref might not yet have held the vnode.
715 if ((count & (NC_EXLOCK_REQ|NC_SHLOCK_FLAG)) ==
717 KKASSERT((count & ~(NC_EXLOCK_REQ |
719 NC_SHLOCK_FLAG)) > 0);
720 if (atomic_cmpset_int(&ncp->nc_lockstatus,
722 while (ncp->nc_lockstatus & NC_SHLOCK_VHOLD)
736 * NOTE: nc_refs can be 0 (degenerate case during _cache_drop).
738 * nc_locktd must be NULLed out prior to nc_lockstatus getting cleared.
742 _cache_unlock(struct namecache *ncp)
744 thread_t td __debugvar = curthread;
747 struct vnode *dropvp;
749 KKASSERT(ncp->nc_refs >= 0);
750 KKASSERT((ncp->nc_lockstatus & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) > 0);
751 KKASSERT((ncp->nc_lockstatus & NC_SHLOCK_FLAG) || ncp->nc_locktd == td);
753 count = ncp->nc_lockstatus;
757 * Clear nc_locktd prior to the atomic op (excl lock only)
759 if ((count & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 1)
760 ncp->nc_locktd = NULL;
765 ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ|NC_SHLOCK_FLAG)) == 1) {
767 if (count & NC_EXLOCK_REQ)
768 ncount = count & NC_SHLOCK_REQ; /* cnt->0 */
772 if (atomic_cmpset_int(&ncp->nc_lockstatus,
774 if (count & NC_EXLOCK_REQ)
775 wakeup(&ncp->nc_locktd);
776 else if (count & NC_SHLOCK_REQ)
782 KKASSERT((count & NC_SHLOCK_VHOLD) == 0);
783 KKASSERT((count & ~(NC_EXLOCK_REQ |
785 NC_SHLOCK_FLAG)) > 1);
786 if (atomic_cmpset_int(&ncp->nc_lockstatus,
791 count = ncp->nc_lockstatus;
796 * Don't actually drop the vp until we successfully clean out
797 * the lock, otherwise we may race another shared lock.
805 _cache_lockstatus(struct namecache *ncp)
807 if (ncp->nc_locktd == curthread)
808 return(LK_EXCLUSIVE);
809 if (ncp->nc_lockstatus & NC_SHLOCK_FLAG)
815 * cache_hold() and cache_drop() prevent the premature deletion of a
816 * namecache entry but do not prevent operations (such as zapping) on
817 * that namecache entry.
819 * This routine may only be called from outside this source module if
820 * nc_refs is already at least 1.
822 * This is a rare case where callers are allowed to hold a spinlock,
823 * so we can't ourselves.
827 _cache_hold(struct namecache *ncp)
829 atomic_add_int(&ncp->nc_refs, 1);
834 * Drop a cache entry, taking care to deal with races.
836 * For potential 1->0 transitions we must hold the ncp lock to safely
837 * test its flags. An unresolved entry with no children must be zapped
840 * The call to cache_zap() itself will handle all remaining races and
841 * will decrement the ncp's refs regardless. If we are resolved or
842 * have children nc_refs can safely be dropped to 0 without having to
845 * NOTE: cache_zap() will re-check nc_refs and nc_list in a MPSAFE fashion.
847 * NOTE: cache_zap() may return a non-NULL referenced parent which must
848 * be dropped in a loop.
852 _cache_drop(struct namecache *ncp)
857 KKASSERT(ncp->nc_refs > 0);
861 if (_cache_lock_nonblock(ncp) == 0) {
862 ncp->nc_flag &= ~NCF_DEFEREDZAP;
863 if ((ncp->nc_flag & NCF_UNRESOLVED) &&
864 TAILQ_EMPTY(&ncp->nc_list)) {
865 ncp = cache_zap(ncp, 1);
868 if (atomic_cmpset_int(&ncp->nc_refs, 1, 0)) {
875 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1))
883 * Link a new namecache entry to its parent and to the hash table. Be
884 * careful to avoid races if vhold() blocks in the future.
886 * Both ncp and par must be referenced and locked.
888 * NOTE: The hash table spinlock is held during this call, we can't do
892 _cache_link_parent(struct namecache *ncp, struct namecache *par,
893 struct nchash_head *nchpp)
895 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid];
897 KKASSERT(ncp->nc_parent == NULL);
898 ncp->nc_parent = par;
899 ncp->nc_head = nchpp;
902 * Set inheritance flags. Note that the parent flags may be
903 * stale due to getattr potentially not having been run yet
904 * (it gets run during nlookup()'s).
906 ncp->nc_flag &= ~(NCF_SF_PNOCACHE | NCF_UF_PCACHE);
907 if (par->nc_flag & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE))
908 ncp->nc_flag |= NCF_SF_PNOCACHE;
909 if (par->nc_flag & (NCF_UF_CACHE | NCF_UF_PCACHE))
910 ncp->nc_flag |= NCF_UF_PCACHE;
913 * Add to hash table and parent, adjust accounting
915 TAILQ_INSERT_HEAD(&nchpp->list, ncp, nc_hash);
916 atomic_add_long(&pn->vfscache_count, 1);
917 if (TAILQ_EMPTY(&ncp->nc_list))
918 atomic_add_long(&pn->vfscache_leafs, 1);
920 if (TAILQ_EMPTY(&par->nc_list)) {
921 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
922 atomic_add_long(&pn->vfscache_leafs, -1);
924 * Any vp associated with an ncp which has children must
925 * be held to prevent it from being recycled.
930 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
935 * Remove the parent and hash associations from a namecache structure.
936 * If this is the last child of the parent the cache_drop(par) will
937 * attempt to recursively zap the parent.
939 * ncp must be locked. This routine will acquire a temporary lock on
940 * the parent as wlel as the appropriate hash chain.
943 _cache_unlink_parent(struct namecache *ncp)
945 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid];
946 struct namecache *par;
947 struct vnode *dropvp;
949 if ((par = ncp->nc_parent) != NULL) {
950 KKASSERT(ncp->nc_parent == par);
953 spin_lock(&ncp->nc_head->spin);
956 * Remove from hash table and parent, adjust accounting
958 TAILQ_REMOVE(&ncp->nc_head->list, ncp, nc_hash);
959 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
960 atomic_add_long(&pn->vfscache_count, -1);
961 if (TAILQ_EMPTY(&ncp->nc_list))
962 atomic_add_long(&pn->vfscache_leafs, -1);
965 if (TAILQ_EMPTY(&par->nc_list)) {
966 atomic_add_long(&pn->vfscache_leafs, 1);
970 spin_unlock(&ncp->nc_head->spin);
971 ncp->nc_parent = NULL;
977 * We can only safely vdrop with no spinlocks held.
985 * Allocate a new namecache structure. Most of the code does not require
986 * zero-termination of the string but it makes vop_compat_ncreate() easier.
988 static struct namecache *
989 cache_alloc(int nlen)
991 struct namecache *ncp;
993 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
995 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK);
997 ncp->nc_flag = NCF_UNRESOLVED;
998 ncp->nc_error = ENOTCONN; /* needs to be resolved */
1001 TAILQ_INIT(&ncp->nc_list);
1007 * Can only be called for the case where the ncp has never been
1008 * associated with anything (so no spinlocks are needed).
1011 _cache_free(struct namecache *ncp)
1013 KKASSERT(ncp->nc_refs == 1 && ncp->nc_lockstatus == 1);
1015 kfree(ncp->nc_name, M_VFSCACHE);
1016 kfree(ncp, M_VFSCACHE);
1020 * [re]initialize a nchandle.
1023 cache_zero(struct nchandle *nch)
1030 * Ref and deref a namecache structure.
1032 * The caller must specify a stable ncp pointer, typically meaning the
1033 * ncp is already referenced but this can also occur indirectly through
1034 * e.g. holding a lock on a direct child.
1036 * WARNING: Caller may hold an unrelated read spinlock, which means we can't
1037 * use read spinlocks here.
1040 cache_hold(struct nchandle *nch)
1042 _cache_hold(nch->ncp);
1043 _cache_mntref(nch->mount);
1048 * Create a copy of a namecache handle for an already-referenced
1052 cache_copy(struct nchandle *nch, struct nchandle *target)
1054 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid];
1055 struct namecache *ncp;
1058 _cache_mntref(target->mount);
1061 if (ncp == cache->ncp1) {
1062 if (atomic_cmpset_ptr((void *)&cache->ncp1, ncp, NULL))
1065 if (ncp == cache->ncp2) {
1066 if (atomic_cmpset_ptr((void *)&cache->ncp2, ncp, NULL))
1074 * Caller wants to copy the current directory, copy it out from our
1075 * pcpu cache if possible (the entire critical path is just two localized
1076 * cmpset ops). If the pcpu cache has a snapshot at all it will be a
1077 * valid one, so we don't have to lock p->p_fd even though we are loading
1080 * This has a limited effect since nlookup must still ref and shlock the
1081 * vnode to check perms. We do avoid the per-proc spin-lock though, which
1082 * can aid threaded programs.
1085 cache_copy_ncdir(struct proc *p, struct nchandle *target)
1087 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid];
1089 *target = p->p_fd->fd_ncdir;
1090 if (target->ncp == cache->ncdir.ncp &&
1091 target->mount == cache->ncdir.mount) {
1092 if (atomic_cmpset_ptr((void *)&cache->ncdir.ncp,
1093 target->ncp, NULL)) {
1094 if (atomic_cmpset_ptr((void *)&cache->ncdir.mount,
1095 target->mount, NULL)) {
1099 _cache_drop(target->ncp);
1102 spin_lock_shared(&p->p_fd->fd_spin);
1103 cache_copy(&p->p_fd->fd_ncdir, target);
1104 spin_unlock_shared(&p->p_fd->fd_spin);
1108 cache_changemount(struct nchandle *nch, struct mount *mp)
1111 _cache_mntrel(nch->mount);
1116 cache_drop(struct nchandle *nch)
1118 _cache_mntrel(nch->mount);
1119 _cache_drop(nch->ncp);
1125 * Drop the nchandle, but try to cache the ref to avoid global atomic
1126 * ops. This is typically done on the system root and jail root nchandles.
1129 cache_drop_and_cache(struct nchandle *nch)
1131 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid];
1132 struct namecache *ncp;
1134 _cache_mntrel(nch->mount);
1136 if (cache->ncp1 == NULL) {
1137 ncp = atomic_swap_ptr((void *)&cache->ncp1, ncp);
1141 if (cache->ncp2 == NULL) {
1142 ncp = atomic_swap_ptr((void *)&cache->ncp2, ncp);
1146 if (++cache->iter & 1)
1147 ncp = atomic_swap_ptr((void *)&cache->ncp2, ncp);
1149 ncp = atomic_swap_ptr((void *)&cache->ncp1, ncp);
1158 * We are dropping what the caller believes is the current directory,
1159 * unconditionally store it in our pcpu cache. Anything already in
1160 * the cache will be discarded.
1163 cache_drop_ncdir(struct nchandle *nch)
1165 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid];
1167 nch->ncp = atomic_swap_ptr((void *)&cache->ncdir.ncp, nch->ncp);
1168 nch->mount = atomic_swap_ptr((void *)&cache->ncdir.mount, nch->mount);
1170 _cache_drop(nch->ncp);
1172 _cache_mntrel(nch->mount);
1178 cache_lockstatus(struct nchandle *nch)
1180 return(_cache_lockstatus(nch->ncp));
1184 cache_lock(struct nchandle *nch)
1186 _cache_lock(nch->ncp);
1190 cache_lock_maybe_shared(struct nchandle *nch, int excl)
1192 struct namecache *ncp = nch->ncp;
1194 if (ncp_shared_lock_disable || excl ||
1195 (ncp->nc_flag & NCF_UNRESOLVED)) {
1198 _cache_lock_shared(ncp);
1199 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1200 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) {
1212 * Relock nch1 given an unlocked nch1 and a locked nch2. The caller
1213 * is responsible for checking both for validity on return as they
1214 * may have become invalid.
1216 * We have to deal with potential deadlocks here, just ping pong
1217 * the lock until we get it (we will always block somewhere when
1218 * looping so this is not cpu-intensive).
1220 * which = 0 nch1 not locked, nch2 is locked
1221 * which = 1 nch1 is locked, nch2 is not locked
1224 cache_relock(struct nchandle *nch1, struct ucred *cred1,
1225 struct nchandle *nch2, struct ucred *cred2)
1233 if (cache_lock_nonblock(nch1) == 0) {
1234 cache_resolve(nch1, cred1);
1239 cache_resolve(nch1, cred1);
1242 if (cache_lock_nonblock(nch2) == 0) {
1243 cache_resolve(nch2, cred2);
1248 cache_resolve(nch2, cred2);
1255 cache_lock_nonblock(struct nchandle *nch)
1257 return(_cache_lock_nonblock(nch->ncp));
1261 cache_unlock(struct nchandle *nch)
1263 _cache_unlock(nch->ncp);
1267 * ref-and-lock, unlock-and-deref functions.
1269 * This function is primarily used by nlookup. Even though cache_lock
1270 * holds the vnode, it is possible that the vnode may have already
1271 * initiated a recyclement.
1273 * We want cache_get() to return a definitively usable vnode or a
1274 * definitively unresolved ncp.
1278 _cache_get(struct namecache *ncp)
1282 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1283 _cache_setunresolved(ncp);
1288 * Attempt to obtain a shared lock on the ncp. A shared lock will only
1289 * be obtained if the ncp is resolved and the vnode (if not ENOENT) is
1290 * valid. Otherwise an exclusive lock will be acquired instead.
1294 _cache_get_maybe_shared(struct namecache *ncp, int excl)
1296 if (ncp_shared_lock_disable || excl ||
1297 (ncp->nc_flag & NCF_UNRESOLVED)) {
1298 return(_cache_get(ncp));
1301 _cache_lock_shared(ncp);
1302 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1303 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) {
1305 ncp = _cache_get(ncp);
1310 ncp = _cache_get(ncp);
1317 * This is a special form of _cache_lock() which only succeeds if
1318 * it can get a pristine, non-recursive lock. The caller must have
1319 * already ref'd the ncp.
1321 * On success the ncp will be locked, on failure it will not. The
1322 * ref count does not change either way.
1324 * We want _cache_lock_special() (on success) to return a definitively
1325 * usable vnode or a definitively unresolved ncp.
1328 _cache_lock_special(struct namecache *ncp)
1330 if (_cache_lock_nonblock(ncp) == 0) {
1331 if ((ncp->nc_lockstatus &
1332 ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 1) {
1333 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1334 _cache_setunresolved(ncp);
1339 return(EWOULDBLOCK);
1343 * This function tries to get a shared lock but will back-off to an exclusive
1346 * (1) Some other thread is trying to obtain an exclusive lock
1347 * (to prevent the exclusive requester from getting livelocked out
1348 * by many shared locks).
1350 * (2) The current thread already owns an exclusive lock (to avoid
1353 * WARNING! On machines with lots of cores we really want to try hard to
1354 * get a shared lock or concurrent path lookups can chain-react
1355 * into a very high-latency exclusive lock.
1358 _cache_lock_shared_special(struct namecache *ncp)
1361 * Only honor a successful shared lock (returning 0) if there is
1362 * no exclusive request pending and the vnode, if present, is not
1363 * in a reclaimed state.
1365 if (_cache_lock_shared_nonblock(ncp) == 0) {
1366 if ((ncp->nc_lockstatus & NC_EXLOCK_REQ) == 0) {
1367 if (ncp->nc_vp == NULL ||
1368 (ncp->nc_vp->v_flag & VRECLAIMED) == 0) {
1373 return(EWOULDBLOCK);
1377 * Non-blocking shared lock failed. If we already own the exclusive
1378 * lock just acquire another exclusive lock (instead of deadlocking).
1379 * Otherwise acquire a shared lock.
1381 if (ncp->nc_locktd == curthread) {
1385 _cache_lock_shared(ncp);
1391 * NOTE: The same nchandle can be passed for both arguments.
1394 cache_get(struct nchandle *nch, struct nchandle *target)
1396 KKASSERT(nch->ncp->nc_refs > 0);
1397 target->mount = nch->mount;
1398 target->ncp = _cache_get(nch->ncp);
1399 _cache_mntref(target->mount);
1403 cache_get_maybe_shared(struct nchandle *nch, struct nchandle *target, int excl)
1405 KKASSERT(nch->ncp->nc_refs > 0);
1406 target->mount = nch->mount;
1407 target->ncp = _cache_get_maybe_shared(nch->ncp, excl);
1408 _cache_mntref(target->mount);
1416 _cache_put(struct namecache *ncp)
1426 cache_put(struct nchandle *nch)
1428 _cache_mntrel(nch->mount);
1429 _cache_put(nch->ncp);
1435 * Resolve an unresolved ncp by associating a vnode with it. If the
1436 * vnode is NULL, a negative cache entry is created.
1438 * The ncp should be locked on entry and will remain locked on return.
1442 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp)
1444 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
1445 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE);
1449 * Any vp associated with an ncp which has children must
1450 * be held. Any vp associated with a locked ncp must be held.
1452 if (!TAILQ_EMPTY(&ncp->nc_list))
1454 spin_lock(&vp->v_spin);
1456 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
1457 spin_unlock(&vp->v_spin);
1458 if (ncp->nc_lockstatus & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ))
1462 * Set auxiliary flags
1464 switch(vp->v_type) {
1466 ncp->nc_flag |= NCF_ISDIR;
1469 ncp->nc_flag |= NCF_ISSYMLINK;
1470 /* XXX cache the contents of the symlink */
1476 /* XXX: this is a hack to work-around the lack of a real pfs vfs
1479 if (strncmp(mp->mnt_stat.f_fstypename, "null", 5) == 0)
1483 * When creating a negative cache hit we set the
1484 * namecache_gen. A later resolve will clean out the
1485 * negative cache hit if the mount point's namecache_gen
1486 * has changed. Used by devfs, could also be used by
1489 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid];
1492 ncp->nc_negcpu = mycpu->gd_cpuid;
1493 spin_lock(&pn->neg_spin);
1494 TAILQ_INSERT_TAIL(&pn->neg_list, ncp, nc_vnode);
1496 spin_unlock(&pn->neg_spin);
1497 atomic_add_long(&pn->vfscache_negs, 1);
1499 ncp->nc_error = ENOENT;
1501 VFS_NCPGEN_SET(mp, ncp);
1503 ncp->nc_flag &= ~(NCF_UNRESOLVED | NCF_DEFEREDZAP);
1510 cache_setvp(struct nchandle *nch, struct vnode *vp)
1512 _cache_setvp(nch->mount, nch->ncp, vp);
1519 cache_settimeout(struct nchandle *nch, int nticks)
1521 struct namecache *ncp = nch->ncp;
1523 if ((ncp->nc_timeout = ticks + nticks) == 0)
1524 ncp->nc_timeout = 1;
1528 * Disassociate the vnode or negative-cache association and mark a
1529 * namecache entry as unresolved again. Note that the ncp is still
1530 * left in the hash table and still linked to its parent.
1532 * The ncp should be locked and refd on entry and will remain locked and refd
1535 * This routine is normally never called on a directory containing children.
1536 * However, NFS often does just that in its rename() code as a cop-out to
1537 * avoid complex namespace operations. This disconnects a directory vnode
1538 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
1544 _cache_setunresolved(struct namecache *ncp)
1548 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1549 ncp->nc_flag |= NCF_UNRESOLVED;
1550 ncp->nc_timeout = 0;
1551 ncp->nc_error = ENOTCONN;
1552 if ((vp = ncp->nc_vp) != NULL) {
1553 spin_lock(&vp->v_spin);
1555 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
1556 spin_unlock(&vp->v_spin);
1559 * Any vp associated with an ncp with children is
1560 * held by that ncp. Any vp associated with a locked
1561 * ncp is held by that ncp. These conditions must be
1562 * undone when the vp is cleared out from the ncp.
1564 if (!TAILQ_EMPTY(&ncp->nc_list))
1566 if (ncp->nc_lockstatus & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ))
1569 struct pcpu_ncache *pn;
1571 pn = &pcpu_ncache[ncp->nc_negcpu];
1573 atomic_add_long(&pn->vfscache_negs, -1);
1574 spin_lock(&pn->neg_spin);
1575 TAILQ_REMOVE(&pn->neg_list, ncp, nc_vnode);
1577 spin_unlock(&pn->neg_spin);
1579 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK);
1584 * The cache_nresolve() code calls this function to automatically
1585 * set a resolved cache element to unresolved if it has timed out
1586 * or if it is a negative cache hit and the mount point namecache_gen
1590 _cache_auto_unresolve_test(struct mount *mp, struct namecache *ncp)
1593 * Try to zap entries that have timed out. We have
1594 * to be careful here because locked leafs may depend
1595 * on the vnode remaining intact in a parent, so only
1596 * do this under very specific conditions.
1598 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 &&
1599 TAILQ_EMPTY(&ncp->nc_list)) {
1604 * If a resolved negative cache hit is invalid due to
1605 * the mount's namecache generation being bumped, zap it.
1607 if (ncp->nc_vp == NULL && VFS_NCPGEN_TEST(mp, ncp)) {
1612 * Otherwise we are good
1617 static __inline void
1618 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp)
1621 * Already in an unresolved state, nothing to do.
1623 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1624 if (_cache_auto_unresolve_test(mp, ncp))
1625 _cache_setunresolved(ncp);
1633 cache_setunresolved(struct nchandle *nch)
1635 _cache_setunresolved(nch->ncp);
1639 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
1640 * looking for matches. This flag tells the lookup code when it must
1641 * check for a mount linkage and also prevents the directories in question
1642 * from being deleted or renamed.
1646 cache_clrmountpt_callback(struct mount *mp, void *data)
1648 struct nchandle *nch = data;
1650 if (mp->mnt_ncmounton.ncp == nch->ncp)
1652 if (mp->mnt_ncmountpt.ncp == nch->ncp)
1658 * Clear NCF_ISMOUNTPT on nch->ncp if it is no longer associated
1659 * with a mount point.
1662 cache_clrmountpt(struct nchandle *nch)
1666 count = mountlist_scan(cache_clrmountpt_callback, nch,
1667 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
1669 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT;
1673 * Invalidate portions of the namecache topology given a starting entry.
1674 * The passed ncp is set to an unresolved state and:
1676 * The passed ncp must be referencxed and locked. The routine may unlock
1677 * and relock ncp several times, and will recheck the children and loop
1678 * to catch races. When done the passed ncp will be returned with the
1679 * reference and lock intact.
1681 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
1682 * that the physical underlying nodes have been
1683 * destroyed... as in deleted. For example, when
1684 * a directory is removed. This will cause record
1685 * lookups on the name to no longer be able to find
1686 * the record and tells the resolver to return failure
1687 * rather then trying to resolve through the parent.
1689 * The topology itself, including ncp->nc_name,
1692 * This only applies to the passed ncp, if CINV_CHILDREN
1693 * is specified the children are not flagged.
1695 * CINV_CHILDREN - Set all children (recursively) to an unresolved
1698 * Note that this will also have the side effect of
1699 * cleaning out any unreferenced nodes in the topology
1700 * from the leaves up as the recursion backs out.
1702 * Note that the topology for any referenced nodes remains intact, but
1703 * the nodes will be marked as having been destroyed and will be set
1704 * to an unresolved state.
1706 * It is possible for cache_inval() to race a cache_resolve(), meaning that
1707 * the namecache entry may not actually be invalidated on return if it was
1708 * revalidated while recursing down into its children. This code guarentees
1709 * that the node(s) will go through an invalidation cycle, but does not
1710 * guarentee that they will remain in an invalidated state.
1712 * Returns non-zero if a revalidation was detected during the invalidation
1713 * recursion, zero otherwise. Note that since only the original ncp is
1714 * locked the revalidation ultimately can only indicate that the original ncp
1715 * *MIGHT* no have been reresolved.
1717 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
1718 * have to avoid blowing out the kernel stack. We do this by saving the
1719 * deep namecache node and aborting the recursion, then re-recursing at that
1720 * node using a depth-first algorithm in order to allow multiple deep
1721 * recursions to chain through each other, then we restart the invalidation
1726 struct namecache *resume_ncp;
1730 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *);
1734 _cache_inval(struct namecache *ncp, int flags)
1736 struct cinvtrack track;
1737 struct namecache *ncp2;
1741 track.resume_ncp = NULL;
1744 r = _cache_inval_internal(ncp, flags, &track);
1745 if (track.resume_ncp == NULL)
1748 while ((ncp2 = track.resume_ncp) != NULL) {
1749 track.resume_ncp = NULL;
1751 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY,
1761 cache_inval(struct nchandle *nch, int flags)
1763 return(_cache_inval(nch->ncp, flags));
1767 * Helper for _cache_inval(). The passed ncp is refd and locked and
1768 * remains that way on return, but may be unlocked/relocked multiple
1769 * times by the routine.
1772 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track)
1774 struct namecache *nextkid;
1777 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE);
1779 _cache_setunresolved(ncp);
1780 if (flags & CINV_DESTROY) {
1781 ncp->nc_flag |= NCF_DESTROYED;
1782 ++ncp->nc_generation;
1784 while ((flags & CINV_CHILDREN) &&
1785 (nextkid = TAILQ_FIRST(&ncp->nc_list)) != NULL
1787 struct namecache *kid;
1791 _cache_hold(nextkid);
1792 if (++track->depth > MAX_RECURSION_DEPTH) {
1793 track->resume_ncp = ncp;
1797 while ((kid = nextkid) != NULL) {
1799 * Parent (ncp) must be locked for the iteration.
1802 if (kid->nc_parent != ncp) {
1804 kprintf("cache_inval_internal restartA %s\n",
1809 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
1810 _cache_hold(nextkid);
1813 * Parent unlocked for this section to avoid
1817 if (track->resume_ncp) {
1822 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
1823 TAILQ_FIRST(&kid->nc_list)
1826 if (kid->nc_parent != ncp) {
1827 kprintf("cache_inval_internal "
1837 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track);
1844 _cache_drop(nextkid);
1851 * Someone could have gotten in there while ncp was unlocked,
1854 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1860 * Invalidate a vnode's namecache associations. To avoid races against
1861 * the resolver we do not invalidate a node which we previously invalidated
1862 * but which was then re-resolved while we were in the invalidation loop.
1864 * Returns non-zero if any namecache entries remain after the invalidation
1867 * NOTE: Unlike the namecache topology which guarentees that ncp's will not
1868 * be ripped out of the topology while held, the vnode's v_namecache
1869 * list has no such restriction. NCP's can be ripped out of the list
1870 * at virtually any time if not locked, even if held.
1872 * In addition, the v_namecache list itself must be locked via
1873 * the vnode's spinlock.
1876 cache_inval_vp(struct vnode *vp, int flags)
1878 struct namecache *ncp;
1879 struct namecache *next;
1882 spin_lock(&vp->v_spin);
1883 ncp = TAILQ_FIRST(&vp->v_namecache);
1887 /* loop entered with ncp held and vp spin-locked */
1888 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1890 spin_unlock(&vp->v_spin);
1892 if (ncp->nc_vp != vp) {
1893 kprintf("Warning: cache_inval_vp: race-A detected on "
1894 "%s\n", ncp->nc_name);
1900 _cache_inval(ncp, flags);
1901 _cache_put(ncp); /* also releases reference */
1903 spin_lock(&vp->v_spin);
1904 if (ncp && ncp->nc_vp != vp) {
1905 spin_unlock(&vp->v_spin);
1906 kprintf("Warning: cache_inval_vp: race-B detected on "
1907 "%s\n", ncp->nc_name);
1912 spin_unlock(&vp->v_spin);
1913 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1917 * This routine is used instead of the normal cache_inval_vp() when we
1918 * are trying to recycle otherwise good vnodes.
1920 * Return 0 on success, non-zero if not all namecache records could be
1921 * disassociated from the vnode (for various reasons).
1924 cache_inval_vp_nonblock(struct vnode *vp)
1926 struct namecache *ncp;
1927 struct namecache *next;
1929 spin_lock(&vp->v_spin);
1930 ncp = TAILQ_FIRST(&vp->v_namecache);
1934 /* loop entered with ncp held */
1935 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1937 spin_unlock(&vp->v_spin);
1938 if (_cache_lock_nonblock(ncp)) {
1944 if (ncp->nc_vp != vp) {
1945 kprintf("Warning: cache_inval_vp: race-A detected on "
1946 "%s\n", ncp->nc_name);
1952 _cache_inval(ncp, 0);
1953 _cache_put(ncp); /* also releases reference */
1955 spin_lock(&vp->v_spin);
1956 if (ncp && ncp->nc_vp != vp) {
1957 spin_unlock(&vp->v_spin);
1958 kprintf("Warning: cache_inval_vp: race-B detected on "
1959 "%s\n", ncp->nc_name);
1964 spin_unlock(&vp->v_spin);
1966 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1970 * Clears the universal directory search 'ok' flag. This flag allows
1971 * nlookup() to bypass normal vnode checks. This flag is a cached flag
1972 * so clearing it simply forces revalidation.
1975 cache_inval_wxok(struct vnode *vp)
1977 struct namecache *ncp;
1979 spin_lock(&vp->v_spin);
1980 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
1981 if (ncp->nc_flag & NCF_WXOK)
1982 atomic_clear_short(&ncp->nc_flag, NCF_WXOK);
1984 spin_unlock(&vp->v_spin);
1988 * The source ncp has been renamed to the target ncp. Both fncp and tncp
1989 * must be locked. The target ncp is destroyed (as a normal rename-over
1990 * would destroy the target file or directory).
1992 * Because there may be references to the source ncp we cannot copy its
1993 * contents to the target. Instead the source ncp is relinked as the target
1994 * and the target ncp is removed from the namecache topology.
1997 cache_rename(struct nchandle *fnch, struct nchandle *tnch)
1999 struct namecache *fncp = fnch->ncp;
2000 struct namecache *tncp = tnch->ncp;
2001 struct namecache *tncp_par;
2002 struct nchash_head *nchpp;
2007 ++fncp->nc_generation;
2008 ++tncp->nc_generation;
2009 if (tncp->nc_nlen) {
2010 nname = kmalloc(tncp->nc_nlen + 1, M_VFSCACHE, M_WAITOK);
2011 bcopy(tncp->nc_name, nname, tncp->nc_nlen);
2012 nname[tncp->nc_nlen] = 0;
2018 * Rename fncp (unlink)
2020 _cache_unlink_parent(fncp);
2021 oname = fncp->nc_name;
2022 fncp->nc_name = nname;
2023 fncp->nc_nlen = tncp->nc_nlen;
2025 kfree(oname, M_VFSCACHE);
2027 tncp_par = tncp->nc_parent;
2028 _cache_hold(tncp_par);
2029 _cache_lock(tncp_par);
2032 * Rename fncp (relink)
2034 hash = fnv_32_buf(fncp->nc_name, fncp->nc_nlen, FNV1_32_INIT);
2035 hash = fnv_32_buf(&tncp_par, sizeof(tncp_par), hash);
2036 nchpp = NCHHASH(hash);
2038 spin_lock(&nchpp->spin);
2039 _cache_link_parent(fncp, tncp_par, nchpp);
2040 spin_unlock(&nchpp->spin);
2042 _cache_put(tncp_par);
2045 * Get rid of the overwritten tncp (unlink)
2047 _cache_unlink(tncp);
2051 * Perform actions consistent with unlinking a file. The passed-in ncp
2054 * The ncp is marked DESTROYED so it no longer shows up in searches,
2055 * and will be physically deleted when the vnode goes away.
2057 * If the related vnode has no refs then we cycle it through vget()/vput()
2058 * to (possibly if we don't have a ref race) trigger a deactivation,
2059 * allowing the VFS to trivially detect and recycle the deleted vnode
2060 * via VOP_INACTIVE().
2062 * NOTE: _cache_rename() will automatically call _cache_unlink() on the
2066 cache_unlink(struct nchandle *nch)
2068 _cache_unlink(nch->ncp);
2072 _cache_unlink(struct namecache *ncp)
2077 * Causes lookups to fail and allows another ncp with the same
2078 * name to be created under ncp->nc_parent.
2080 ncp->nc_flag |= NCF_DESTROYED;
2081 ++ncp->nc_generation;
2084 * Attempt to trigger a deactivation. Set VREF_FINALIZE to
2085 * force action on the 1->0 transition.
2087 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
2088 (vp = ncp->nc_vp) != NULL) {
2089 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE);
2090 if (VREFCNT(vp) <= 0) {
2091 if (vget(vp, LK_SHARED) == 0)
2098 * Return non-zero if the nch might be associated with an open and/or mmap()'d
2099 * file. The easy solution is to just return non-zero if the vnode has refs.
2100 * Used to interlock hammer2 reclaims (VREF_FINALIZE should already be set to
2101 * force the reclaim).
2104 cache_isopen(struct nchandle *nch)
2107 struct namecache *ncp = nch->ncp;
2109 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
2110 (vp = ncp->nc_vp) != NULL &&
2119 * vget the vnode associated with the namecache entry. Resolve the namecache
2120 * entry if necessary. The passed ncp must be referenced and locked. If
2121 * the ncp is resolved it might be locked shared.
2123 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
2124 * (depending on the passed lk_type) will be returned in *vpp with an error
2125 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
2126 * most typical error is ENOENT, meaning that the ncp represents a negative
2127 * cache hit and there is no vnode to retrieve, but other errors can occur
2130 * The vget() can race a reclaim. If this occurs we re-resolve the
2133 * There are numerous places in the kernel where vget() is called on a
2134 * vnode while one or more of its namecache entries is locked. Releasing
2135 * a vnode never deadlocks against locked namecache entries (the vnode
2136 * will not get recycled while referenced ncp's exist). This means we
2137 * can safely acquire the vnode. In fact, we MUST NOT release the ncp
2138 * lock when acquiring the vp lock or we might cause a deadlock.
2140 * NOTE: The passed-in ncp must be locked exclusively if it is initially
2141 * unresolved. If a reclaim race occurs the passed-in ncp will be
2142 * relocked exclusively before being re-resolved.
2145 cache_vget(struct nchandle *nch, struct ucred *cred,
2146 int lk_type, struct vnode **vpp)
2148 struct namecache *ncp;
2155 if (ncp->nc_flag & NCF_UNRESOLVED)
2156 error = cache_resolve(nch, cred);
2160 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
2161 error = vget(vp, lk_type);
2166 * The ncp may have been locked shared, we must relock
2167 * it exclusively before we can set it to unresolved.
2169 if (error == ENOENT) {
2170 kprintf("Warning: vnode reclaim race detected "
2171 "in cache_vget on %p (%s)\n",
2175 _cache_setunresolved(ncp);
2180 * Not a reclaim race, some other error.
2182 KKASSERT(ncp->nc_vp == vp);
2185 KKASSERT(ncp->nc_vp == vp);
2186 KKASSERT((vp->v_flag & VRECLAIMED) == 0);
2189 if (error == 0 && vp == NULL)
2196 * Similar to cache_vget() but only acquires a ref on the vnode.
2198 * NOTE: The passed-in ncp must be locked exclusively if it is initially
2199 * unresolved. If a reclaim race occurs the passed-in ncp will be
2200 * relocked exclusively before being re-resolved.
2203 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp)
2205 struct namecache *ncp;
2212 if (ncp->nc_flag & NCF_UNRESOLVED)
2213 error = cache_resolve(nch, cred);
2217 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
2218 error = vget(vp, LK_SHARED);
2223 if (error == ENOENT) {
2224 kprintf("Warning: vnode reclaim race detected "
2225 "in cache_vget on %p (%s)\n",
2229 _cache_setunresolved(ncp);
2234 * Not a reclaim race, some other error.
2236 KKASSERT(ncp->nc_vp == vp);
2239 KKASSERT(ncp->nc_vp == vp);
2240 KKASSERT((vp->v_flag & VRECLAIMED) == 0);
2241 /* caller does not want a lock */
2245 if (error == 0 && vp == NULL)
2252 * Return a referenced vnode representing the parent directory of
2255 * Because the caller has locked the ncp it should not be possible for
2256 * the parent ncp to go away. However, the parent can unresolve its
2257 * dvp at any time so we must be able to acquire a lock on the parent
2258 * to safely access nc_vp.
2260 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock,
2261 * so use vhold()/vdrop() while holding the lock to prevent dvp from
2262 * getting destroyed.
2264 * NOTE: vhold() is allowed when dvp has 0 refs if we hold a
2265 * lock on the ncp in question..
2267 static struct vnode *
2268 cache_dvpref(struct namecache *ncp)
2270 struct namecache *par;
2274 if ((par = ncp->nc_parent) != NULL) {
2277 if ((par->nc_flag & NCF_UNRESOLVED) == 0) {
2278 if ((dvp = par->nc_vp) != NULL)
2283 if (vget(dvp, LK_SHARED) == 0) {
2286 /* return refd, unlocked dvp */
2298 * Convert a directory vnode to a namecache record without any other
2299 * knowledge of the topology. This ONLY works with directory vnodes and
2300 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
2301 * returned ncp (if not NULL) will be held and unlocked.
2303 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
2304 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
2305 * for dvp. This will fail only if the directory has been deleted out from
2308 * Callers must always check for a NULL return no matter the value of 'makeit'.
2310 * To avoid underflowing the kernel stack each recursive call increments
2311 * the makeit variable.
2314 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
2315 struct vnode *dvp, char *fakename);
2316 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
2317 struct vnode **saved_dvp);
2320 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit,
2321 struct nchandle *nch)
2323 struct vnode *saved_dvp;
2329 nch->mount = dvp->v_mount;
2334 * Handle the makeit == 0 degenerate case
2337 spin_lock_shared(&dvp->v_spin);
2338 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
2341 spin_unlock_shared(&dvp->v_spin);
2345 * Loop until resolution, inside code will break out on error.
2349 * Break out if we successfully acquire a working ncp.
2351 spin_lock_shared(&dvp->v_spin);
2352 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
2355 spin_unlock_shared(&dvp->v_spin);
2358 spin_unlock_shared(&dvp->v_spin);
2361 * If dvp is the root of its filesystem it should already
2362 * have a namecache pointer associated with it as a side
2363 * effect of the mount, but it may have been disassociated.
2365 if (dvp->v_flag & VROOT) {
2366 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp);
2367 error = cache_resolve_mp(nch->mount);
2368 _cache_put(nch->ncp);
2370 kprintf("cache_fromdvp: resolve root of mount %p error %d",
2371 dvp->v_mount, error);
2375 kprintf(" failed\n");
2380 kprintf(" succeeded\n");
2385 * If we are recursed too deeply resort to an O(n^2)
2386 * algorithm to resolve the namecache topology. The
2387 * resolved pvp is left referenced in saved_dvp to
2388 * prevent the tree from being destroyed while we loop.
2391 error = cache_fromdvp_try(dvp, cred, &saved_dvp);
2393 kprintf("lookupdotdot(longpath) failed %d "
2394 "dvp %p\n", error, dvp);
2402 * Get the parent directory and resolve its ncp.
2405 kfree(fakename, M_TEMP);
2408 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
2411 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp);
2417 * Reuse makeit as a recursion depth counter. On success
2418 * nch will be fully referenced.
2420 cache_fromdvp(pvp, cred, makeit + 1, nch);
2422 if (nch->ncp == NULL)
2426 * Do an inefficient scan of pvp (embodied by ncp) to look
2427 * for dvp. This will create a namecache record for dvp on
2428 * success. We loop up to recheck on success.
2430 * ncp and dvp are both held but not locked.
2432 error = cache_inefficient_scan(nch, cred, dvp, fakename);
2434 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
2435 pvp, nch->ncp->nc_name, dvp);
2437 /* nch was NULLed out, reload mount */
2438 nch->mount = dvp->v_mount;
2442 kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
2443 pvp, nch->ncp->nc_name);
2446 /* nch was NULLed out, reload mount */
2447 nch->mount = dvp->v_mount;
2451 * If nch->ncp is non-NULL it will have been held already.
2454 kfree(fakename, M_TEMP);
2463 * Go up the chain of parent directories until we find something
2464 * we can resolve into the namecache. This is very inefficient.
2468 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
2469 struct vnode **saved_dvp)
2471 struct nchandle nch;
2474 static time_t last_fromdvp_report;
2478 * Loop getting the parent directory vnode until we get something we
2479 * can resolve in the namecache.
2482 nch.mount = dvp->v_mount;
2488 kfree(fakename, M_TEMP);
2491 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
2498 spin_lock_shared(&pvp->v_spin);
2499 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
2500 _cache_hold(nch.ncp);
2501 spin_unlock_shared(&pvp->v_spin);
2505 spin_unlock_shared(&pvp->v_spin);
2506 if (pvp->v_flag & VROOT) {
2507 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp);
2508 error = cache_resolve_mp(nch.mount);
2509 _cache_unlock(nch.ncp);
2512 _cache_drop(nch.ncp);
2522 if (last_fromdvp_report != time_uptime) {
2523 last_fromdvp_report = time_uptime;
2524 kprintf("Warning: extremely inefficient path "
2525 "resolution on %s\n",
2528 error = cache_inefficient_scan(&nch, cred, dvp, fakename);
2531 * Hopefully dvp now has a namecache record associated with
2532 * it. Leave it referenced to prevent the kernel from
2533 * recycling the vnode. Otherwise extremely long directory
2534 * paths could result in endless recycling.
2539 _cache_drop(nch.ncp);
2542 kfree(fakename, M_TEMP);
2547 * Do an inefficient scan of the directory represented by ncp looking for
2548 * the directory vnode dvp. ncp must be held but not locked on entry and
2549 * will be held on return. dvp must be refd but not locked on entry and
2550 * will remain refd on return.
2552 * Why do this at all? Well, due to its stateless nature the NFS server
2553 * converts file handles directly to vnodes without necessarily going through
2554 * the namecache ops that would otherwise create the namecache topology
2555 * leading to the vnode. We could either (1) Change the namecache algorithms
2556 * to allow disconnect namecache records that are re-merged opportunistically,
2557 * or (2) Make the NFS server backtrack and scan to recover a connected
2558 * namecache topology in order to then be able to issue new API lookups.
2560 * It turns out that (1) is a huge mess. It takes a nice clean set of
2561 * namecache algorithms and introduces a lot of complication in every subsystem
2562 * that calls into the namecache to deal with the re-merge case, especially
2563 * since we are using the namecache to placehold negative lookups and the
2564 * vnode might not be immediately assigned. (2) is certainly far less
2565 * efficient then (1), but since we are only talking about directories here
2566 * (which are likely to remain cached), the case does not actually run all
2567 * that often and has the supreme advantage of not polluting the namecache
2570 * If a fakename is supplied just construct a namecache entry using the
2574 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
2575 struct vnode *dvp, char *fakename)
2577 struct nlcomponent nlc;
2578 struct nchandle rncp;
2590 vat.va_blocksize = 0;
2591 if ((error = VOP_GETATTR(dvp, &vat)) != 0)
2594 error = cache_vref(nch, cred, &pvp);
2599 kprintf("inefficient_scan of (%p,%s): directory iosize %ld "
2600 "vattr fileid = %lld\n",
2601 nch->ncp, nch->ncp->nc_name,
2603 (long long)vat.va_fileid);
2607 * Use the supplied fakename if not NULL. Fake names are typically
2608 * not in the actual filesystem hierarchy. This is used by HAMMER
2609 * to glue @@timestamp recursions together.
2612 nlc.nlc_nameptr = fakename;
2613 nlc.nlc_namelen = strlen(fakename);
2614 rncp = cache_nlookup(nch, &nlc);
2618 if ((blksize = vat.va_blocksize) == 0)
2619 blksize = DEV_BSIZE;
2620 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK);
2626 iov.iov_base = rbuf;
2627 iov.iov_len = blksize;
2630 uio.uio_resid = blksize;
2631 uio.uio_segflg = UIO_SYSSPACE;
2632 uio.uio_rw = UIO_READ;
2633 uio.uio_td = curthread;
2635 if (ncvp_debug >= 2)
2636 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
2637 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
2639 den = (struct dirent *)rbuf;
2640 bytes = blksize - uio.uio_resid;
2643 if (ncvp_debug >= 2) {
2644 kprintf("cache_inefficient_scan: %*.*s\n",
2645 den->d_namlen, den->d_namlen,
2648 if (den->d_type != DT_WHT &&
2649 den->d_ino == vat.va_fileid) {
2651 kprintf("cache_inefficient_scan: "
2652 "MATCHED inode %lld path %s/%*.*s\n",
2653 (long long)vat.va_fileid,
2655 den->d_namlen, den->d_namlen,
2658 nlc.nlc_nameptr = den->d_name;
2659 nlc.nlc_namelen = den->d_namlen;
2660 rncp = cache_nlookup(nch, &nlc);
2661 KKASSERT(rncp.ncp != NULL);
2664 bytes -= _DIRENT_DIRSIZ(den);
2665 den = _DIRENT_NEXT(den);
2667 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
2670 kfree(rbuf, M_TEMP);
2674 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) {
2675 _cache_setvp(rncp.mount, rncp.ncp, dvp);
2676 if (ncvp_debug >= 2) {
2677 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
2678 nch->ncp->nc_name, rncp.ncp->nc_name, dvp);
2681 if (ncvp_debug >= 2) {
2682 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
2683 nch->ncp->nc_name, rncp.ncp->nc_name, dvp,
2687 if (rncp.ncp->nc_vp == NULL)
2688 error = rncp.ncp->nc_error;
2690 * Release rncp after a successful nlookup. rncp was fully
2695 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
2696 dvp, nch->ncp->nc_name);
2703 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
2704 * state, which disassociates it from its vnode or pcpu_ncache[n].neg_list.
2706 * Then, if there are no additional references to the ncp and no children,
2707 * the ncp is removed from the topology and destroyed.
2709 * References and/or children may exist if the ncp is in the middle of the
2710 * topology, preventing the ncp from being destroyed.
2712 * This function must be called with the ncp held and locked and will unlock
2713 * and drop it during zapping.
2715 * If nonblock is non-zero and the parent ncp cannot be locked we give up.
2716 * This case can occur in the cache_drop() path.
2718 * This function may returned a held (but NOT locked) parent node which the
2719 * caller must drop. We do this so _cache_drop() can loop, to avoid
2720 * blowing out the kernel stack.
2722 * WARNING! For MPSAFE operation this routine must acquire up to three
2723 * spin locks to be able to safely test nc_refs. Lock order is
2726 * hash spinlock if on hash list
2727 * parent spinlock if child of parent
2728 * (the ncp is unresolved so there is no vnode association)
2730 static struct namecache *
2731 cache_zap(struct namecache *ncp, int nonblock)
2733 struct namecache *par;
2734 struct vnode *dropvp;
2735 struct nchash_head *nchpp;
2739 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
2741 _cache_setunresolved(ncp);
2744 * Try to scrap the entry and possibly tail-recurse on its parent.
2745 * We only scrap unref'd (other then our ref) unresolved entries,
2746 * we do not scrap 'live' entries.
2748 * Note that once the spinlocks are acquired if nc_refs == 1 no
2749 * other references are possible. If it isn't, however, we have
2750 * to decrement but also be sure to avoid a 1->0 transition.
2752 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
2753 KKASSERT(ncp->nc_refs > 0);
2756 * Acquire locks. Note that the parent can't go away while we hold
2760 if ((par = ncp->nc_parent) != NULL) {
2763 if (_cache_lock_nonblock(par) == 0)
2765 refs = ncp->nc_refs;
2766 ncp->nc_flag |= NCF_DEFEREDZAP;
2768 &pcpu_ncache[mycpu->gd_cpuid].numdefered,
2770 if (atomic_cmpset_int(&ncp->nc_refs,
2782 nchpp = ncp->nc_head;
2783 spin_lock(&nchpp->spin);
2787 * At this point if we find refs == 1 it should not be possible for
2788 * anyone else to have access to the ncp. We are holding the only
2789 * possible access point left (nchpp) spin-locked.
2791 * If someone other then us has a ref or we have children
2792 * we cannot zap the entry. The 1->0 transition and any
2793 * further list operation is protected by the spinlocks
2794 * we have acquired but other transitions are not.
2797 refs = ncp->nc_refs;
2799 if (refs == 1 && TAILQ_EMPTY(&ncp->nc_list))
2801 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) {
2803 spin_unlock(&nchpp->spin);
2813 * We are the only ref and with the spinlocks held no further
2814 * refs can be acquired by others.
2816 * Remove us from the hash list and parent list. We have to
2817 * drop a ref on the parent's vp if the parent's list becomes
2822 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid];
2824 KKASSERT(nchpp == ncp->nc_head);
2825 TAILQ_REMOVE(&ncp->nc_head->list, ncp, nc_hash);
2826 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
2827 atomic_add_long(&pn->vfscache_count, -1);
2828 if (TAILQ_EMPTY(&ncp->nc_list))
2829 atomic_add_long(&pn->vfscache_leafs, -1);
2831 if (TAILQ_EMPTY(&par->nc_list)) {
2832 atomic_add_long(&pn->vfscache_leafs, 1);
2834 dropvp = par->nc_vp;
2836 ncp->nc_head = NULL;
2837 ncp->nc_parent = NULL;
2838 spin_unlock(&nchpp->spin);
2841 KKASSERT(ncp->nc_head == NULL);
2845 * ncp should not have picked up any refs. Physically
2848 if (ncp->nc_refs != 1) {
2849 int save_refs = ncp->nc_refs;
2851 panic("cache_zap: %p bad refs %d (%d)\n",
2852 ncp, save_refs, atomic_fetchadd_int(&ncp->nc_refs, 0));
2854 KKASSERT(ncp->nc_refs == 1);
2855 /* _cache_unlock(ncp) not required */
2856 ncp->nc_refs = -1; /* safety */
2858 kfree(ncp->nc_name, M_VFSCACHE);
2859 kfree(ncp, M_VFSCACHE);
2862 * Delayed drop (we had to release our spinlocks)
2864 * The refed parent (if not NULL) must be dropped. The
2865 * caller is responsible for looping.
2873 * Clean up dangling negative cache and defered-drop entries in the
2876 * This routine is called in the critical path and also called from
2877 * vnlru(). When called from vnlru we use a lower limit to try to
2878 * deal with the negative cache before the critical path has to start
2881 typedef enum { CHI_LOW, CHI_HIGH } cache_hs_t;
2883 static cache_hs_t neg_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW };
2884 static cache_hs_t pos_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW };
2887 cache_hysteresis(int critpath)
2890 long neglimit = maxvnodes / ncnegfactor;
2891 long xnumcache = vfscache_leafs;
2894 neglimit = neglimit * 8 / 10;
2897 * Don't cache too many negative hits. We use hysteresis to reduce
2898 * the impact on the critical path.
2900 switch(neg_cache_hysteresis_state[critpath]) {
2902 if (vfscache_negs > MINNEG && vfscache_negs > neglimit) {
2904 _cache_cleanneg(ncnegflush);
2906 _cache_cleanneg(ncnegflush +
2907 vfscache_negs - neglimit);
2908 neg_cache_hysteresis_state[critpath] = CHI_HIGH;
2912 if (vfscache_negs > MINNEG * 9 / 10 &&
2913 vfscache_negs * 9 / 10 > neglimit
2916 _cache_cleanneg(ncnegflush);
2918 _cache_cleanneg(ncnegflush +
2919 vfscache_negs * 9 / 10 -
2922 neg_cache_hysteresis_state[critpath] = CHI_LOW;
2928 * Don't cache too many positive hits. We use hysteresis to reduce
2929 * the impact on the critical path.
2931 * Excessive positive hits can accumulate due to large numbers of
2932 * hardlinks (the vnode cache will not prevent hl ncps from growing
2935 if ((poslimit = ncposlimit) == 0)
2936 poslimit = maxvnodes * 2;
2938 poslimit = poslimit * 8 / 10;
2940 switch(pos_cache_hysteresis_state[critpath]) {
2942 if (xnumcache > poslimit && xnumcache > MINPOS) {
2944 _cache_cleanpos(ncposflush);
2946 _cache_cleanpos(ncposflush +
2947 xnumcache - poslimit);
2948 pos_cache_hysteresis_state[critpath] = CHI_HIGH;
2952 if (xnumcache > poslimit * 5 / 6 && xnumcache > MINPOS) {
2954 _cache_cleanpos(ncposflush);
2956 _cache_cleanpos(ncposflush +
2957 xnumcache - poslimit * 5 / 6);
2959 pos_cache_hysteresis_state[critpath] = CHI_LOW;
2965 * Clean out dangling defered-zap ncps which could not be cleanly
2966 * dropped if too many build up. Note that numdefered is
2967 * heuristical. Make sure we are real-time for the current cpu,
2968 * plus the global rollup.
2970 if (pcpu_ncache[mycpu->gd_cpuid].numdefered + numdefered > neglimit) {
2971 _cache_cleandefered();
2976 * NEW NAMECACHE LOOKUP API
2978 * Lookup an entry in the namecache. The passed par_nch must be referenced
2979 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp
2980 * is ALWAYS returned, eve if the supplied component is illegal.
2982 * The resulting namecache entry should be returned to the system with
2983 * cache_put() or cache_unlock() + cache_drop().
2985 * namecache locks are recursive but care must be taken to avoid lock order
2986 * reversals (hence why the passed par_nch must be unlocked). Locking
2987 * rules are to order for parent traversals, not for child traversals.
2989 * Nobody else will be able to manipulate the associated namespace (e.g.
2990 * create, delete, rename, rename-target) until the caller unlocks the
2993 * The returned entry will be in one of three states: positive hit (non-null
2994 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
2995 * Unresolved entries must be resolved through the filesystem to associate the
2996 * vnode and/or determine whether a positive or negative hit has occured.
2998 * It is not necessary to lock a directory in order to lock namespace under
2999 * that directory. In fact, it is explicitly not allowed to do that. A
3000 * directory is typically only locked when being created, renamed, or
3003 * The directory (par) may be unresolved, in which case any returned child
3004 * will likely also be marked unresolved. Likely but not guarenteed. Since
3005 * the filesystem lookup requires a resolved directory vnode the caller is
3006 * responsible for resolving the namecache chain top-down. This API
3007 * specifically allows whole chains to be created in an unresolved state.
3010 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc)
3012 struct nchandle nch;
3013 struct namecache *ncp;
3014 struct namecache *new_ncp;
3015 struct namecache *rep_ncp; /* reuse a destroyed ncp */
3016 struct nchash_head *nchpp;
3023 mp = par_nch->mount;
3027 * This is a good time to call it, no ncp's are locked by
3030 cache_hysteresis(1);
3033 * Try to locate an existing entry
3035 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
3036 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
3038 nchpp = NCHHASH(hash);
3042 spin_lock(&nchpp->spin);
3044 spin_lock_shared(&nchpp->spin);
3046 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) {
3048 * Break out if we find a matching entry. Note that
3049 * UNRESOLVED entries may match, but DESTROYED entries
3052 * We may be able to reuse DESTROYED entries that we come
3053 * across, even if the name does not match, as long as
3054 * nc_nlen is correct.
3056 if (ncp->nc_parent == par_nch->ncp &&
3057 ncp->nc_nlen == nlc->nlc_namelen) {
3058 if (ncp->nc_flag & NCF_DESTROYED) {
3059 if (ncp->nc_refs == 0 && rep_ncp == NULL)
3063 if (bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen))
3067 spin_unlock(&nchpp->spin);
3069 spin_unlock_shared(&nchpp->spin);
3071 _cache_unlock(par_nch->ncp);
3074 if (_cache_lock_special(ncp) == 0) {
3076 * Successfully locked but we must re-test
3077 * conditions that might have changed since
3078 * we did not have the lock before.
3080 if (ncp->nc_parent != par_nch->ncp ||
3081 ncp->nc_nlen != nlc->nlc_namelen ||
3082 bcmp(ncp->nc_name, nlc->nlc_nameptr,
3084 (ncp->nc_flag & NCF_DESTROYED)) {
3088 _cache_auto_unresolve(mp, ncp);
3090 _cache_free(new_ncp);
3093 _cache_get(ncp); /* cycle the lock to block */
3101 * We failed to locate the entry, try to resurrect a destroyed
3102 * entry that we did find that is already correctly linked into
3103 * nchpp and the parent. We must re-test conditions after
3104 * successfully locking rep_ncp.
3106 * This case can occur under heavy loads due to not being able
3107 * to safely lock the parent in cache_zap(). Nominally a repeated
3108 * create/unlink load, but only the namelen needs to match.
3110 if (rep_ncp && new_ncp == NULL) {
3111 if (_cache_lock_nonblock(rep_ncp) == 0) {
3112 _cache_hold(rep_ncp);
3113 if (rep_ncp->nc_parent == par_nch->ncp &&
3114 rep_ncp->nc_nlen == nlc->nlc_namelen &&
3115 (rep_ncp->nc_flag & NCF_DESTROYED)) {
3117 * Update nc_name as reuse as new.
3120 bcopy(nlc->nlc_nameptr, ncp->nc_name,
3122 spin_unlock_shared(&nchpp->spin);
3123 _cache_setunresolved(ncp);
3124 ncp->nc_flag = NCF_UNRESOLVED;
3125 ncp->nc_error = ENOTCONN;
3128 _cache_put(rep_ncp);
3133 * Otherwise create a new entry and add it to the cache. The parent
3134 * ncp must also be locked so we can link into it.
3136 * We have to relookup after possibly blocking in kmalloc or
3137 * when locking par_nch.
3139 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
3140 * mount case, in which case nc_name will be NULL.
3142 if (new_ncp == NULL) {
3143 spin_unlock_shared(&nchpp->spin);
3144 new_ncp = cache_alloc(nlc->nlc_namelen);
3145 if (nlc->nlc_namelen) {
3146 bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
3148 new_ncp->nc_name[nlc->nlc_namelen] = 0;
3154 * NOTE! The spinlock is held exclusively here because new_ncp
3157 if (par_locked == 0) {
3158 spin_unlock(&nchpp->spin);
3159 _cache_lock(par_nch->ncp);
3165 * WARNING! We still hold the spinlock. We have to set the hash
3166 * table entry atomically.
3169 _cache_link_parent(ncp, par_nch->ncp, nchpp);
3170 spin_unlock(&nchpp->spin);
3171 _cache_unlock(par_nch->ncp);
3172 /* par_locked = 0 - not used */
3175 * stats and namecache size management
3177 if (ncp->nc_flag & NCF_UNRESOLVED)
3178 ++gd->gd_nchstats->ncs_miss;
3179 else if (ncp->nc_vp)
3180 ++gd->gd_nchstats->ncs_goodhits;
3182 ++gd->gd_nchstats->ncs_neghits;
3185 _cache_mntref(nch.mount);
3191 * Attempt to lookup a namecache entry and return with a shared namecache
3195 cache_nlookup_maybe_shared(struct nchandle *par_nch, struct nlcomponent *nlc,
3196 int excl, struct nchandle *res_nch)
3198 struct namecache *ncp;
3199 struct nchash_head *nchpp;
3205 * If exclusive requested or shared namecache locks are disabled,
3208 if (ncp_shared_lock_disable || excl)
3209 return(EWOULDBLOCK);
3212 mp = par_nch->mount;
3215 * This is a good time to call it, no ncp's are locked by
3218 cache_hysteresis(1);
3221 * Try to locate an existing entry
3223 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
3224 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
3225 nchpp = NCHHASH(hash);
3227 spin_lock_shared(&nchpp->spin);
3229 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) {
3231 * Break out if we find a matching entry. Note that
3232 * UNRESOLVED entries may match, but DESTROYED entries
3235 if (ncp->nc_parent == par_nch->ncp &&
3236 ncp->nc_nlen == nlc->nlc_namelen &&
3237 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
3238 (ncp->nc_flag & NCF_DESTROYED) == 0
3241 spin_unlock_shared(&nchpp->spin);
3242 if (_cache_lock_shared_special(ncp) == 0) {
3243 if (ncp->nc_parent == par_nch->ncp &&
3244 ncp->nc_nlen == nlc->nlc_namelen &&
3245 bcmp(ncp->nc_name, nlc->nlc_nameptr,
3246 ncp->nc_nlen) == 0 &&
3247 (ncp->nc_flag & NCF_DESTROYED) == 0 &&
3248 (ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
3249 _cache_auto_unresolve_test(mp, ncp) == 0) {
3255 spin_lock_shared(&nchpp->spin);
3263 spin_unlock_shared(&nchpp->spin);
3264 return(EWOULDBLOCK);
3269 * Note that nc_error might be non-zero (e.g ENOENT).
3272 res_nch->mount = mp;
3274 ++gd->gd_nchstats->ncs_goodhits;
3275 _cache_mntref(res_nch->mount);
3277 KKASSERT(ncp->nc_error != EWOULDBLOCK);
3278 return(ncp->nc_error);
3282 * This is a non-blocking verison of cache_nlookup() used by
3283 * nfs_readdirplusrpc_uio(). It can fail for any reason and
3284 * will return nch.ncp == NULL in that case.
3287 cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc)
3289 struct nchandle nch;
3290 struct namecache *ncp;
3291 struct namecache *new_ncp;
3292 struct nchash_head *nchpp;
3299 mp = par_nch->mount;
3303 * Try to locate an existing entry
3305 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
3306 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
3308 nchpp = NCHHASH(hash);
3310 spin_lock(&nchpp->spin);
3311 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) {
3313 * Break out if we find a matching entry. Note that
3314 * UNRESOLVED entries may match, but DESTROYED entries
3317 if (ncp->nc_parent == par_nch->ncp &&
3318 ncp->nc_nlen == nlc->nlc_namelen &&
3319 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
3320 (ncp->nc_flag & NCF_DESTROYED) == 0
3323 spin_unlock(&nchpp->spin);
3325 _cache_unlock(par_nch->ncp);
3328 if (_cache_lock_special(ncp) == 0) {
3329 if (ncp->nc_parent != par_nch->ncp ||
3330 ncp->nc_nlen != nlc->nlc_namelen ||
3331 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) ||
3332 (ncp->nc_flag & NCF_DESTROYED)) {
3333 kprintf("cache_lookup_nonblock: "
3334 "ncp-race %p %*.*s\n",
3343 _cache_auto_unresolve(mp, ncp);
3345 _cache_free(new_ncp);
3356 * We failed to locate an entry, create a new entry and add it to
3357 * the cache. The parent ncp must also be locked so we
3360 * We have to relookup after possibly blocking in kmalloc or
3361 * when locking par_nch.
3363 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
3364 * mount case, in which case nc_name will be NULL.
3366 if (new_ncp == NULL) {
3367 spin_unlock(&nchpp->spin);
3368 new_ncp = cache_alloc(nlc->nlc_namelen);
3369 if (nlc->nlc_namelen) {
3370 bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
3372 new_ncp->nc_name[nlc->nlc_namelen] = 0;
3376 if (par_locked == 0) {
3377 spin_unlock(&nchpp->spin);
3378 if (_cache_lock_nonblock(par_nch->ncp) == 0) {
3386 * WARNING! We still hold the spinlock. We have to set the hash
3387 * table entry atomically.
3390 _cache_link_parent(ncp, par_nch->ncp, nchpp);
3391 spin_unlock(&nchpp->spin);
3392 _cache_unlock(par_nch->ncp);
3393 /* par_locked = 0 - not used */
3396 * stats and namecache size management
3398 if (ncp->nc_flag & NCF_UNRESOLVED)
3399 ++gd->gd_nchstats->ncs_miss;
3400 else if (ncp->nc_vp)
3401 ++gd->gd_nchstats->ncs_goodhits;
3403 ++gd->gd_nchstats->ncs_neghits;
3406 _cache_mntref(nch.mount);
3411 _cache_free(new_ncp);
3420 * The namecache entry is marked as being used as a mount point.
3421 * Locate the mount if it is visible to the caller. The DragonFly
3422 * mount system allows arbitrary loops in the topology and disentangles
3423 * those loops by matching against (mp, ncp) rather than just (ncp).
3424 * This means any given ncp can dive any number of mounts, depending
3425 * on the relative mount (e.g. nullfs) the caller is at in the topology.
3427 * We use a very simple frontend cache to reduce SMP conflicts,
3428 * which we have to do because the mountlist scan needs an exclusive
3429 * lock around its ripout info list. Not to mention that there might
3430 * be a lot of mounts.
3432 struct findmount_info {
3433 struct mount *result;
3434 struct mount *nch_mount;
3435 struct namecache *nch_ncp;
3439 struct ncmount_cache *
3440 ncmount_cache_lookup(struct mount *mp, struct namecache *ncp)
3444 hash = (uintptr_t)mp + ((uintptr_t)mp >> 18);
3445 hash += (uintptr_t)ncp + ((uintptr_t)ncp >> 16);
3446 hash = (hash >> 1) % NCMOUNT_NUMCACHE;
3448 return (&ncmount_cache[hash]);
3453 cache_findmount_callback(struct mount *mp, void *data)
3455 struct findmount_info *info = data;
3458 * Check the mount's mounted-on point against the passed nch.
3460 if (mp->mnt_ncmounton.mount == info->nch_mount &&
3461 mp->mnt_ncmounton.ncp == info->nch_ncp
3471 cache_findmount(struct nchandle *nch)
3473 struct findmount_info info;
3474 struct ncmount_cache *ncc;
3480 if (ncmount_cache_enable == 0) {
3484 ncc = ncmount_cache_lookup(nch->mount, nch->ncp);
3485 if (ncc->ncp == nch->ncp) {
3486 spin_lock_shared(&ncc->spin);
3487 if (ncc->isneg == 0 &&
3488 ncc->ncp == nch->ncp && (mp = ncc->mp) != NULL) {
3489 if (mp->mnt_ncmounton.mount == nch->mount &&
3490 mp->mnt_ncmounton.ncp == nch->ncp) {
3492 * Cache hit (positive)
3495 spin_unlock_shared(&ncc->spin);
3498 /* else cache miss */
3501 ncc->ncp == nch->ncp && ncc->mp == nch->mount) {
3503 * Cache hit (negative)
3505 spin_unlock_shared(&ncc->spin);
3508 spin_unlock_shared(&ncc->spin);
3516 info.nch_mount = nch->mount;
3517 info.nch_ncp = nch->ncp;
3518 mountlist_scan(cache_findmount_callback, &info,
3519 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
3524 * Negative lookups: We cache the originating {ncp,mp}. (mp) is
3525 * only used for pointer comparisons and is not
3526 * referenced (otherwise there would be dangling
3529 * Positive lookups: We cache the originating {ncp} and the target
3530 * (mp). (mp) is referenced.
3532 * Indeterminant: If the match is undergoing an unmount we do
3533 * not cache it to avoid racing cache_unmounting(),
3534 * but still return the match.
3537 spin_lock(&ncc->spin);
3538 if (info.result == NULL) {
3539 if (ncc->isneg == 0 && ncc->mp)
3540 _cache_mntrel(ncc->mp);
3541 ncc->ncp = nch->ncp;
3542 ncc->mp = nch->mount;
3544 spin_unlock(&ncc->spin);
3545 } else if ((info.result->mnt_kern_flag & MNTK_UNMOUNT) == 0) {
3546 if (ncc->isneg == 0 && ncc->mp)
3547 _cache_mntrel(ncc->mp);
3548 _cache_mntref(info.result);
3549 ncc->ncp = nch->ncp;
3550 ncc->mp = info.result;
3552 spin_unlock(&ncc->spin);
3554 spin_unlock(&ncc->spin);
3557 return(info.result);
3561 cache_dropmount(struct mount *mp)
3567 cache_ismounting(struct mount *mp)
3569 struct nchandle *nch = &mp->mnt_ncmounton;
3570 struct ncmount_cache *ncc;
3572 ncc = ncmount_cache_lookup(nch->mount, nch->ncp);
3574 ncc->ncp == nch->ncp && ncc->mp == nch->mount) {
3575 spin_lock(&ncc->spin);
3577 ncc->ncp == nch->ncp && ncc->mp == nch->mount) {
3581 spin_unlock(&ncc->spin);
3586 cache_unmounting(struct mount *mp)
3588 struct nchandle *nch = &mp->mnt_ncmounton;
3589 struct ncmount_cache *ncc;
3591 ncc = ncmount_cache_lookup(nch->mount, nch->ncp);
3592 if (ncc->isneg == 0 &&
3593 ncc->ncp == nch->ncp && ncc->mp == mp) {
3594 spin_lock(&ncc->spin);
3595 if (ncc->isneg == 0 &&
3596 ncc->ncp == nch->ncp && ncc->mp == mp) {
3601 spin_unlock(&ncc->spin);
3606 * Resolve an unresolved namecache entry, generally by looking it up.
3607 * The passed ncp must be locked and refd.
3609 * Theoretically since a vnode cannot be recycled while held, and since
3610 * the nc_parent chain holds its vnode as long as children exist, the
3611 * direct parent of the cache entry we are trying to resolve should
3612 * have a valid vnode. If not then generate an error that we can
3613 * determine is related to a resolver bug.
3615 * However, if a vnode was in the middle of a recyclement when the NCP
3616 * got locked, ncp->nc_vp might point to a vnode that is about to become
3617 * invalid. cache_resolve() handles this case by unresolving the entry
3618 * and then re-resolving it.
3620 * Note that successful resolution does not necessarily return an error
3621 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
3625 cache_resolve(struct nchandle *nch, struct ucred *cred)
3627 struct namecache *par_tmp;
3628 struct namecache *par;
3629 struct namecache *ncp;
3630 struct nchandle nctmp;
3637 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE);
3640 * If the ncp is already resolved we have nothing to do. However,
3641 * we do want to guarentee that a usable vnode is returned when
3642 * a vnode is present, so make sure it hasn't been reclaimed.
3644 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
3645 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
3646 _cache_setunresolved(ncp);
3647 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
3648 return (ncp->nc_error);
3652 * If the ncp was destroyed it will never resolve again. This
3653 * can basically only happen when someone is chdir'd into an
3654 * empty directory which is then rmdir'd. We want to catch this
3655 * here and not dive the VFS because the VFS might actually
3656 * have a way to re-resolve the disconnected ncp, which will
3657 * result in inconsistencies in the cdir/nch for proc->p_fd.
3659 if (ncp->nc_flag & NCF_DESTROYED)
3663 * Mount points need special handling because the parent does not
3664 * belong to the same filesystem as the ncp.
3666 if (ncp == mp->mnt_ncmountpt.ncp)
3667 return (cache_resolve_mp(mp));
3670 * We expect an unbroken chain of ncps to at least the mount point,
3671 * and even all the way to root (but this code doesn't have to go
3672 * past the mount point).
3674 if (ncp->nc_parent == NULL) {
3675 kprintf("EXDEV case 1 %p %*.*s\n", ncp,
3676 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
3677 ncp->nc_error = EXDEV;
3678 return(ncp->nc_error);
3682 * The vp's of the parent directories in the chain are held via vhold()
3683 * due to the existance of the child, and should not disappear.
3684 * However, there are cases where they can disappear:
3686 * - due to filesystem I/O errors.
3687 * - due to NFS being stupid about tracking the namespace and
3688 * destroys the namespace for entire directories quite often.
3689 * - due to forced unmounts.
3690 * - due to an rmdir (parent will be marked DESTROYED)
3692 * When this occurs we have to track the chain backwards and resolve
3693 * it, looping until the resolver catches up to the current node. We
3694 * could recurse here but we might run ourselves out of kernel stack
3695 * so we do it in a more painful manner. This situation really should
3696 * not occur all that often, or if it does not have to go back too
3697 * many nodes to resolve the ncp.
3699 while ((dvp = cache_dvpref(ncp)) == NULL) {
3701 * This case can occur if a process is CD'd into a
3702 * directory which is then rmdir'd. If the parent is marked
3703 * destroyed there is no point trying to resolve it.
3705 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
3707 par = ncp->nc_parent;
3710 while ((par_tmp = par->nc_parent) != NULL &&
3711 par_tmp->nc_vp == NULL) {
3712 _cache_hold(par_tmp);
3713 _cache_lock(par_tmp);
3717 if (par->nc_parent == NULL) {
3718 kprintf("EXDEV case 2 %*.*s\n",
3719 par->nc_nlen, par->nc_nlen, par->nc_name);
3724 * The parent is not set in stone, ref and lock it to prevent
3725 * it from disappearing. Also note that due to renames it
3726 * is possible for our ncp to move and for par to no longer
3727 * be one of its parents. We resolve it anyway, the loop
3728 * will handle any moves.
3730 _cache_get(par); /* additional hold/lock */
3731 _cache_put(par); /* from earlier hold/lock */
3732 if (par == nch->mount->mnt_ncmountpt.ncp) {
3733 cache_resolve_mp(nch->mount);
3734 } else if ((dvp = cache_dvpref(par)) == NULL) {
3735 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
3739 if (par->nc_flag & NCF_UNRESOLVED) {
3742 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
3746 if ((error = par->nc_error) != 0) {
3747 if (par->nc_error != EAGAIN) {
3748 kprintf("EXDEV case 3 %*.*s error %d\n",
3749 par->nc_nlen, par->nc_nlen, par->nc_name,
3754 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
3755 par, par->nc_nlen, par->nc_nlen, par->nc_name);
3762 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
3763 * ncp's and reattach them. If this occurs the original ncp is marked
3764 * EAGAIN to force a relookup.
3766 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
3767 * ncp must already be resolved.
3772 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
3775 ncp->nc_error = EPERM;
3777 if (ncp->nc_error == EAGAIN) {
3778 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
3779 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
3782 return(ncp->nc_error);
3786 * Resolve the ncp associated with a mount point. Such ncp's almost always
3787 * remain resolved and this routine is rarely called. NFS MPs tends to force
3788 * re-resolution more often due to its mac-truck-smash-the-namecache
3789 * method of tracking namespace changes.
3791 * The semantics for this call is that the passed ncp must be locked on
3792 * entry and will be locked on return. However, if we actually have to
3793 * resolve the mount point we temporarily unlock the entry in order to
3794 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
3795 * the unlock we have to recheck the flags after we relock.
3798 cache_resolve_mp(struct mount *mp)
3800 struct namecache *ncp = mp->mnt_ncmountpt.ncp;
3804 KKASSERT(mp != NULL);
3807 * If the ncp is already resolved we have nothing to do. However,
3808 * we do want to guarentee that a usable vnode is returned when
3809 * a vnode is present, so make sure it hasn't been reclaimed.
3811 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
3812 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
3813 _cache_setunresolved(ncp);
3816 if (ncp->nc_flag & NCF_UNRESOLVED) {
3818 while (vfs_busy(mp, 0))
3820 error = VFS_ROOT(mp, &vp);
3824 * recheck the ncp state after relocking.
3826 if (ncp->nc_flag & NCF_UNRESOLVED) {
3827 ncp->nc_error = error;
3829 _cache_setvp(mp, ncp, vp);
3832 kprintf("[diagnostic] cache_resolve_mp: failed"
3833 " to resolve mount %p err=%d ncp=%p\n",
3835 _cache_setvp(mp, ncp, NULL);
3837 } else if (error == 0) {
3842 return(ncp->nc_error);
3846 * Clean out negative cache entries when too many have accumulated.
3849 _cache_cleanneg(long count)
3851 struct pcpu_ncache *pn;
3852 struct namecache *ncp;
3853 static uint32_t neg_rover;
3857 n = neg_rover++; /* SMP heuristical, race ok */
3859 n = n % (uint32_t)ncpus;
3862 * Normalize vfscache_negs and count. count is sometimes based
3863 * on vfscache_negs. vfscache_negs is heuristical and can sometimes
3864 * have crazy values.
3866 vnegs = vfscache_negs;
3868 if (vnegs <= MINNEG)
3873 pn = &pcpu_ncache[n];
3874 spin_lock(&pn->neg_spin);
3875 count = pn->neg_count * count / vnegs + 1;
3876 spin_unlock(&pn->neg_spin);
3879 * Attempt to clean out the specified number of negative cache
3883 spin_lock(&pn->neg_spin);
3884 ncp = TAILQ_FIRST(&pn->neg_list);
3886 spin_unlock(&pn->neg_spin);
3889 TAILQ_REMOVE(&pn->neg_list, ncp, nc_vnode);
3890 TAILQ_INSERT_TAIL(&pn->neg_list, ncp, nc_vnode);
3892 spin_unlock(&pn->neg_spin);
3895 * This can race, so we must re-check that the ncp
3896 * is on the ncneg.list after successfully locking it.
3898 if (_cache_lock_special(ncp) == 0) {
3899 if (ncp->nc_vp == NULL &&
3900 (ncp->nc_flag & NCF_UNRESOLVED) == 0) {
3901 ncp = cache_zap(ncp, 1);
3916 * Clean out positive cache entries when too many have accumulated.
3919 _cache_cleanpos(long count)
3921 static volatile int rover;
3922 struct nchash_head *nchpp;
3923 struct namecache *ncp;
3927 * Attempt to clean out the specified number of negative cache
3931 rover_copy = ++rover; /* MPSAFEENOUGH */
3933 nchpp = NCHHASH(rover_copy);
3935 if (TAILQ_FIRST(&nchpp->list) == NULL) {
3941 * Cycle ncp on list, ignore and do not move DUMMY
3942 * ncps. These are temporary list iterators.
3944 * We must cycle the ncp to the end of the list to
3945 * ensure that all ncp's have an equal chance of
3948 spin_lock(&nchpp->spin);
3949 ncp = TAILQ_FIRST(&nchpp->list);
3950 while (ncp && (ncp->nc_flag & NCF_DUMMY))
3951 ncp = TAILQ_NEXT(ncp, nc_hash);
3953 TAILQ_REMOVE(&nchpp->list, ncp, nc_hash);
3954 TAILQ_INSERT_TAIL(&nchpp->list, ncp, nc_hash);
3957 spin_unlock(&nchpp->spin);
3960 if (_cache_lock_special(ncp) == 0) {
3961 ncp = cache_zap(ncp, 1);
3973 * This is a kitchen sink function to clean out ncps which we
3974 * tried to zap from cache_drop() but failed because we were
3975 * unable to acquire the parent lock.
3977 * Such entries can also be removed via cache_inval_vp(), such
3978 * as when unmounting.
3981 _cache_cleandefered(void)
3983 struct nchash_head *nchpp;
3984 struct namecache *ncp;
3985 struct namecache dummy;
3989 * Create a list iterator. DUMMY indicates that this is a list
3990 * iterator, DESTROYED prevents matches by lookup functions.
3993 pcpu_ncache[mycpu->gd_cpuid].numdefered = 0;
3994 bzero(&dummy, sizeof(dummy));
3995 dummy.nc_flag = NCF_DESTROYED | NCF_DUMMY;
3998 for (i = 0; i <= nchash; ++i) {
3999 nchpp = &nchashtbl[i];
4001 spin_lock(&nchpp->spin);
4002 TAILQ_INSERT_HEAD(&nchpp->list, &dummy, nc_hash);
4004 while ((ncp = TAILQ_NEXT(ncp, nc_hash)) != NULL) {
4005 if ((ncp->nc_flag & NCF_DEFEREDZAP) == 0)
4007 TAILQ_REMOVE(&nchpp->list, &dummy, nc_hash);
4008 TAILQ_INSERT_AFTER(&nchpp->list, ncp, &dummy, nc_hash);
4010 spin_unlock(&nchpp->spin);
4011 if (_cache_lock_nonblock(ncp) == 0) {
4012 ncp->nc_flag &= ~NCF_DEFEREDZAP;
4016 spin_lock(&nchpp->spin);
4019 TAILQ_REMOVE(&nchpp->list, &dummy, nc_hash);
4020 spin_unlock(&nchpp->spin);
4025 * Name cache initialization, from vfsinit() when we are booting
4030 struct pcpu_ncache *pn;
4035 * Per-cpu accounting and negative hit list
4037 pcpu_ncache = kmalloc(sizeof(*pcpu_ncache) * ncpus,
4038 M_VFSCACHE, M_WAITOK|M_ZERO);
4039 for (i = 0; i < ncpus; ++i) {
4040 pn = &pcpu_ncache[i];
4041 TAILQ_INIT(&pn->neg_list);
4042 spin_init(&pn->neg_spin, "ncneg");
4046 * Initialise per-cpu namecache effectiveness statistics.
4048 for (i = 0; i < ncpus; ++i) {
4049 gd = globaldata_find(i);
4050 gd->gd_nchstats = &nchstats[i];
4054 * Create a generous namecache hash table
4056 nchashtbl = hashinit_ext(vfs_inodehashsize(),
4057 sizeof(struct nchash_head),
4058 M_VFSCACHE, &nchash);
4059 for (i = 0; i <= (int)nchash; ++i) {
4060 TAILQ_INIT(&nchashtbl[i].list);
4061 spin_init(&nchashtbl[i].spin, "nchinit_hash");
4063 for (i = 0; i < NCMOUNT_NUMCACHE; ++i)
4064 spin_init(&ncmount_cache[i].spin, "nchinit_cache");
4065 nclockwarn = 5 * hz;
4069 * Called from start_init() to bootstrap the root filesystem. Returns
4070 * a referenced, unlocked namecache record.
4073 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp)
4075 nch->ncp = cache_alloc(0);
4079 _cache_setvp(nch->mount, nch->ncp, vp);
4083 * vfs_cache_setroot()
4085 * Create an association between the root of our namecache and
4086 * the root vnode. This routine may be called several times during
4089 * If the caller intends to save the returned namecache pointer somewhere
4090 * it must cache_hold() it.
4093 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch)
4096 struct nchandle onch;
4104 cache_zero(&rootnch);
4112 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
4113 * topology and is being removed as quickly as possible. The new VOP_N*()
4114 * API calls are required to make specific adjustments using the supplied
4115 * ncp pointers rather then just bogusly purging random vnodes.
4117 * Invalidate all namecache entries to a particular vnode as well as
4118 * any direct children of that vnode in the namecache. This is a
4119 * 'catch all' purge used by filesystems that do not know any better.
4121 * Note that the linkage between the vnode and its namecache entries will
4122 * be removed, but the namecache entries themselves might stay put due to
4123 * active references from elsewhere in the system or due to the existance of
4124 * the children. The namecache topology is left intact even if we do not
4125 * know what the vnode association is. Such entries will be marked
4129 cache_purge(struct vnode *vp)
4131 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
4134 static int disablecwd;
4135 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
4138 static u_long numcwdcalls;
4139 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdcalls, CTLFLAG_RD, &numcwdcalls, 0,
4140 "Number of current directory resolution calls");
4141 static u_long numcwdfailnf;
4142 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfailnf, CTLFLAG_RD, &numcwdfailnf, 0,
4143 "Number of current directory failures due to lack of file");
4144 static u_long numcwdfailsz;
4145 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfailsz, CTLFLAG_RD, &numcwdfailsz, 0,
4146 "Number of current directory failures due to large result");
4147 static u_long numcwdfound;
4148 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfound, CTLFLAG_RD, &numcwdfound, 0,
4149 "Number of current directory resolution successes");
4155 sys___getcwd(struct __getcwd_args *uap)
4165 buflen = uap->buflen;
4168 if (buflen > MAXPATHLEN)
4169 buflen = MAXPATHLEN;
4171 buf = kmalloc(buflen, M_TEMP, M_WAITOK);
4172 bp = kern_getcwd(buf, buflen, &error);
4174 error = copyout(bp, uap->buf, strlen(bp) + 1);
4180 kern_getcwd(char *buf, size_t buflen, int *error)
4182 struct proc *p = curproc;
4184 int i, slash_prefixed;
4185 struct filedesc *fdp;
4186 struct nchandle nch;
4187 struct namecache *ncp;
4196 nch = fdp->fd_ncdir;
4201 while (ncp && (ncp != fdp->fd_nrdir.ncp ||
4202 nch.mount != fdp->fd_nrdir.mount)
4205 * While traversing upwards if we encounter the root
4206 * of the current mount we have to skip to the mount point
4207 * in the underlying filesystem.
4209 if (ncp == nch.mount->mnt_ncmountpt.ncp) {
4210 nch = nch.mount->mnt_ncmounton;
4219 * Prepend the path segment
4221 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
4228 *--bp = ncp->nc_name[i];
4240 * Go up a directory. This isn't a mount point so we don't
4241 * have to check again.
4243 while ((nch.ncp = ncp->nc_parent) != NULL) {
4244 if (ncp_shared_lock_disable)
4247 _cache_lock_shared(ncp);
4248 if (nch.ncp != ncp->nc_parent) {
4252 _cache_hold(nch.ncp);
4265 if (!slash_prefixed) {
4283 * Thus begins the fullpath magic.
4285 * The passed nchp is referenced but not locked.
4287 static int disablefullpath;
4288 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
4289 &disablefullpath, 0,
4290 "Disable fullpath lookups");
4293 cache_fullpath(struct proc *p, struct nchandle *nchp, struct nchandle *nchbase,
4294 char **retbuf, char **freebuf, int guess)
4296 struct nchandle fd_nrdir;
4297 struct nchandle nch;
4298 struct namecache *ncp;
4299 struct mount *mp, *new_mp;
4308 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK);
4309 bp = buf + MAXPATHLEN - 1;
4312 fd_nrdir = *nchbase;
4314 fd_nrdir = p->p_fd->fd_nrdir;
4324 while (ncp && (ncp != fd_nrdir.ncp || mp != fd_nrdir.mount)) {
4328 * If we are asked to guess the upwards path, we do so whenever
4329 * we encounter an ncp marked as a mountpoint. We try to find
4330 * the actual mountpoint by finding the mountpoint with this
4333 if (guess && (ncp->nc_flag & NCF_ISMOUNTPT)) {
4334 new_mp = mount_get_by_nc(ncp);
4337 * While traversing upwards if we encounter the root
4338 * of the current mount we have to skip to the mount point.
4340 if (ncp == mp->mnt_ncmountpt.ncp) {
4344 nch = new_mp->mnt_ncmounton;
4354 * Prepend the path segment
4356 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
4362 *--bp = ncp->nc_name[i];
4373 * Go up a directory. This isn't a mount point so we don't
4374 * have to check again.
4376 * We can only safely access nc_parent with ncp held locked.
4378 while ((nch.ncp = ncp->nc_parent) != NULL) {
4380 if (nch.ncp != ncp->nc_parent) {
4384 _cache_hold(nch.ncp);
4397 if (!slash_prefixed) {
4415 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf,
4416 char **freebuf, int guess)
4418 struct namecache *ncp;
4419 struct nchandle nch;
4423 if (disablefullpath)
4429 /* vn is NULL, client wants us to use p->p_textvp */
4431 if ((vn = p->p_textvp) == NULL)
4434 spin_lock_shared(&vn->v_spin);
4435 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
4440 spin_unlock_shared(&vn->v_spin);
4444 spin_unlock_shared(&vn->v_spin);
4447 nch.mount = vn->v_mount;
4448 error = cache_fullpath(p, &nch, NULL, retbuf, freebuf, guess);
4454 vfscache_rollup_cpu(struct globaldata *gd)
4456 struct pcpu_ncache *pn;
4459 if (pcpu_ncache == NULL)
4461 pn = &pcpu_ncache[gd->gd_cpuid];
4463 if (pn->vfscache_count) {
4464 count = atomic_swap_long(&pn->vfscache_count, 0);
4465 atomic_add_long(&vfscache_count, count);
4467 if (pn->vfscache_leafs) {
4468 count = atomic_swap_long(&pn->vfscache_leafs, 0);
4469 atomic_add_long(&vfscache_leafs, count);
4471 if (pn->vfscache_negs) {
4472 count = atomic_swap_long(&pn->vfscache_negs, 0);
4473 atomic_add_long(&vfscache_negs, count);
4475 if (pn->numdefered) {
4476 count = atomic_swap_long(&pn->numdefered, 0);
4477 atomic_add_long(&numdefered, count);
4483 vfscache_rollup_all(void)
4487 for (n = 0; n < ncpus; ++n)
4488 vfscache_rollup_cpu(globaldata_find(n));