2 * Copyright (c) 2003-2020 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 #include <sys/param.h>
66 #include <sys/systm.h>
68 #include <sys/kernel.h>
69 #include <sys/sysctl.h>
70 #include <sys/mount.h>
71 #include <sys/vnode.h>
72 #include <sys/malloc.h>
73 #include <sys/sysmsg.h>
74 #include <sys/spinlock.h>
76 #include <sys/nlookup.h>
77 #include <sys/filedesc.h>
78 #include <sys/fnv_hash.h>
79 #include <sys/globaldata.h>
80 #include <sys/kern_syscall.h>
81 #include <sys/dirent.h>
84 #include <sys/spinlock2.h>
86 #define MAX_RECURSION_DEPTH 64
89 * Random lookups in the cache are accomplished with a hash table using
90 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock,
91 * but we use the ncp->update counter trick to avoid acquiring any
92 * contestable spin-locks during a lookup.
94 * Negative entries may exist and correspond to resolved namecache
95 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT
96 * will be set if the entry corresponds to a whited-out directory entry
97 * (verses simply not finding the entry at all). pcpu_ncache[n].neg_list
98 * is locked via pcpu_ncache[n].neg_spin;
102 * (1) ncp's typically have at least a nc_refs of 1, and usually 2. One
103 * is applicable to direct lookups via the hash table nchpp or via
104 * nc_list (the two are added or removed together). Removal of the ncp
105 * from the hash table drops this reference. The second is applicable
106 * to vp->v_namecache linkages (or negative list linkages), and removal
107 * of the ncp from these lists drops this reference.
109 * On the 1->0 transition of nc_refs the ncp can no longer be referenced
110 * and must be destroyed. No other thread should have access to it at
111 * this point so it can be safely locked and freed without any deadlock
114 * The 1->0 transition can occur at almost any juncture and so cache_drop()
115 * deals with it directly.
117 * (2) Once the 1->0 transition occurs, the entity that caused the transition
118 * will be responsible for destroying the ncp. The ncp cannot be on any
119 * list or hash at this time, or be held by anyone other than the caller
120 * responsible for the transition.
122 * (3) A ncp must be locked in order to modify it.
124 * (5) ncp locks are ordered, child-to-parent. Child first, then parent.
125 * This may seem backwards but forward-scans use the hash table and thus
126 * can hold the parent unlocked while traversing downward. Deletions,
127 * on the other-hand, tend to propagate bottom-up since the ref on the
128 * is dropped as the children go away.
130 * (6) Both parent and child must be locked in order to enter the child onto
131 * the parent's nc_list.
135 * Structures associated with name cacheing.
137 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
140 #define NCMOUNT_NUMCACHE (16384) /* power of 2 */
141 #define NCMOUNT_SET (8) /* power of 2 */
143 MALLOC_DEFINE_OBJ(M_VFSCACHE, sizeof(struct namecache),
144 "namecache", "namecache entries");
145 MALLOC_DEFINE(M_VFSCACHEAUX, "namecachestr", "namecache strings");
147 TAILQ_HEAD(nchash_list, namecache);
150 * Don't cachealign, but at least pad to 32 bytes so entries
151 * don't cross a cache line.
154 struct nchash_list list; /* 16 bytes */
155 struct spinlock spin; /* 8 bytes */
156 long pad01; /* 8 bytes */
159 struct ncmount_cache {
160 struct spinlock spin;
161 struct namecache *ncp;
163 struct mount *mp_target;
171 struct spinlock umount_spin; /* cache_findmount/interlock */
172 struct spinlock neg_spin; /* for neg_list and neg_count */
173 struct namecache_list neg_list;
180 long inv_kid_quick_count;
181 long inv_ncp_quick_count;
182 long clean_pos_count;
183 long clean_neg_count;
186 __read_mostly static struct nchash_head *nchashtbl;
187 __read_mostly static struct pcpu_ncache *pcpu_ncache;
188 static struct ncmount_cache ncmount_cache[NCMOUNT_NUMCACHE];
191 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
192 * to create the namecache infrastructure leading to a dangling vnode.
194 * 0 Only errors are reported
195 * 1 Successes are reported
196 * 2 Successes + the whole directory scan is reported
197 * 3 Force the directory scan code run as if the parent vnode did not
198 * have a namecache record, even if it does have one.
200 __read_mostly int ncvp_debug;
201 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0,
202 "Namecache debug level (0-3)");
204 __read_mostly static u_long nchash; /* size of hash table */
205 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
206 "Size of namecache hash table");
208 __read_mostly static int ncnegflush = 10; /* burst for negative flush */
209 SYSCTL_INT(_debug, OID_AUTO, ncnegflush, CTLFLAG_RW, &ncnegflush, 0,
210 "Batch flush negative entries");
212 __read_mostly static int ncposflush = 10; /* burst for positive flush */
213 SYSCTL_INT(_debug, OID_AUTO, ncposflush, CTLFLAG_RW, &ncposflush, 0,
214 "Batch flush positive entries");
216 __read_mostly static int ncnegfactor = 16; /* ratio of negative entries */
217 SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
218 "Ratio of negative namecache entries");
220 __read_mostly static int ncposfactor = 16; /* ratio of unres+leaf entries */
221 SYSCTL_INT(_debug, OID_AUTO, ncposfactor, CTLFLAG_RW, &ncposfactor, 0,
222 "Ratio of unresolved leaf namecache entries");
224 __read_mostly static int nclockwarn; /* warn on locked entries in ticks */
225 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0,
226 "Warn on locked namecache entries in ticks");
228 __read_mostly static int ncposlimit; /* number of cache entries allocated */
229 SYSCTL_INT(_debug, OID_AUTO, ncposlimit, CTLFLAG_RW, &ncposlimit, 0,
230 "Number of cache entries allocated");
232 __read_mostly static int ncp_shared_lock_disable = 0;
233 SYSCTL_INT(_debug, OID_AUTO, ncp_shared_lock_disable, CTLFLAG_RW,
234 &ncp_shared_lock_disable, 0, "Disable shared namecache locks");
236 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode),
237 "sizeof(struct vnode)");
238 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache),
239 "sizeof(struct namecache)");
241 __read_mostly static int ncmount_cache_enable = 1;
242 SYSCTL_INT(_debug, OID_AUTO, ncmount_cache_enable, CTLFLAG_RW,
243 &ncmount_cache_enable, 0, "mount point cache");
245 static __inline void _cache_drop(struct namecache *ncp);
246 static int cache_resolve_mp(struct mount *mp, int adjgen);
247 static int cache_findmount_callback(struct mount *mp, void *data);
248 static void _cache_setunresolved(struct namecache *ncp, int adjgen);
249 static void _cache_cleanneg(long count);
250 static void _cache_cleanpos(long ucount, long xcount);
251 static void _cache_cleandefered(void);
252 static void _cache_unlink(struct namecache *ncp);
255 * The new name cache statistics (these are rolled up globals and not
256 * modified in the critical path, see struct pcpu_ncache).
258 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
259 static long vfscache_negs;
260 SYSCTL_LONG(_vfs_cache, OID_AUTO, numneg, CTLFLAG_RD, &vfscache_negs, 0,
261 "Number of negative namecache entries");
262 static long vfscache_count;
263 SYSCTL_LONG(_vfs_cache, OID_AUTO, numcache, CTLFLAG_RD, &vfscache_count, 0,
264 "Number of namecaches entries");
265 static long vfscache_leafs;
266 SYSCTL_LONG(_vfs_cache, OID_AUTO, numleafs, CTLFLAG_RD, &vfscache_leafs, 0,
267 "Number of leaf namecaches entries");
268 static long vfscache_unres;
269 SYSCTL_LONG(_vfs_cache, OID_AUTO, numunres, CTLFLAG_RD, &vfscache_unres, 0,
270 "Number of unresolved leaf namecaches entries");
272 static long inv_kid_quick_count;
273 SYSCTL_LONG(_vfs_cache, OID_AUTO, inv_kid_quick_count, CTLFLAG_RD,
274 &inv_kid_quick_count, 0,
275 "quick kid invalidations");
276 static long inv_ncp_quick_count;
277 SYSCTL_LONG(_vfs_cache, OID_AUTO, inv_ncp_quick_count, CTLFLAG_RD,
278 &inv_ncp_quick_count, 0,
279 "quick ncp invalidations");
280 static long clean_pos_count;
281 SYSCTL_LONG(_vfs_cache, OID_AUTO, clean_pos_count, CTLFLAG_RD,
283 "positive ncp cleanings");
284 static long clean_neg_count;
285 SYSCTL_LONG(_vfs_cache, OID_AUTO, clean_neg_count, CTLFLAG_RD,
287 "negative ncp cleanings");
289 static long numdefered;
290 SYSCTL_LONG(_debug, OID_AUTO, numdefered, CTLFLAG_RD, &numdefered, 0,
291 "Number of cache entries allocated");
294 * Returns the number of basic references expected on the ncp, not
295 * including any children. 1 for the natural ref, and an addition ref
296 * if the ncp is resolved (representing a positive or negative hit).
299 ncpbaserefs(struct namecache *ncp)
301 return (1 + ((ncp->nc_flag & NCF_UNRESOLVED) == 0));
304 struct nchstats nchstats[SMP_MAXCPU];
306 * Export VFS cache effectiveness statistics to user-land.
308 * The statistics are left for aggregation to user-land so
309 * neat things can be achieved, like observing per-CPU cache
313 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
315 struct globaldata *gd;
319 for (i = 0; i < ncpus; ++i) {
320 gd = globaldata_find(i);
321 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
322 sizeof(struct nchstats))))
328 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
329 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
331 static int cache_zap(struct namecache *ncp);
334 * Cache mount points and namecache records in order to avoid unnecessary
335 * atomic ops on mnt_refs and ncp->refs. This improves concurrent SMP
336 * performance and is particularly important on multi-socket systems to
337 * reduce cache-line ping-ponging.
339 * Try to keep the pcpu structure within one cache line (~64 bytes).
341 #define MNTCACHE_COUNT 32 /* power of 2, multiple of SET */
342 #define MNTCACHE_SET 8 /* set associativity */
344 struct mntcache_elm {
345 struct namecache *ncp;
352 struct mntcache_elm array[MNTCACHE_COUNT];
355 static struct mntcache pcpu_mntcache[MAXCPU];
359 _cache_ncp_gen_enter(struct namecache *ncp)
361 ncp->nc_generation += 2;
367 _cache_ncp_gen_exit(struct namecache *ncp)
370 ncp->nc_generation += 2;
375 struct mntcache_elm *
376 _cache_mntcache_hash(void *ptr)
378 struct mntcache_elm *elm;
381 hv = iscsi_crc32(&ptr, sizeof(ptr)) & (MNTCACHE_COUNT - 1);
382 elm = &pcpu_mntcache[mycpu->gd_cpuid].array[hv & ~(MNTCACHE_SET - 1)];
389 _cache_mntref(struct mount *mp)
391 struct mntcache_elm *elm;
395 elm = _cache_mntcache_hash(mp);
396 for (i = 0; i < MNTCACHE_SET; ++i) {
398 mpr = atomic_swap_ptr((void *)&elm->mp, NULL);
399 if (__predict_true(mpr == mp))
402 atomic_add_int(&mpr->mnt_refs, -1);
406 atomic_add_int(&mp->mnt_refs, 1);
411 _cache_mntrel(struct mount *mp)
413 struct mntcache_elm *elm;
414 struct mntcache_elm *best;
420 elm = _cache_mntcache_hash(mp);
422 for (i = 0; i < MNTCACHE_SET; ++i) {
423 if (elm->mp == NULL) {
424 mpr = atomic_swap_ptr((void *)&elm->mp, mp);
425 if (__predict_false(mpr != NULL)) {
426 atomic_add_int(&mpr->mnt_refs, -1);
431 delta1 = ticks - best->ticks;
432 delta2 = ticks - elm->ticks;
433 if (delta2 > delta1 || delta1 < -1 || delta2 < -1)
437 mpr = atomic_swap_ptr((void *)&best->mp, mp);
440 atomic_add_int(&mpr->mnt_refs, -1);
444 * Clears all cached mount points on all cpus. This routine should only
445 * be called when we are waiting for a mount to clear, e.g. so we can
449 cache_clearmntcache(struct mount *target __unused)
453 for (n = 0; n < ncpus; ++n) {
454 struct mntcache *cache = &pcpu_mntcache[n];
455 struct mntcache_elm *elm;
456 struct namecache *ncp;
460 for (i = 0; i < MNTCACHE_COUNT; ++i) {
461 elm = &cache->array[i];
463 mp = atomic_swap_ptr((void *)&elm->mp, NULL);
465 atomic_add_int(&mp->mnt_refs, -1);
468 ncp = atomic_swap_ptr((void *)&elm->ncp, NULL);
477 * Namespace locking. The caller must already hold a reference to the
478 * namecache structure in order to lock/unlock it. The controlling entity
479 * in a 1->0 transition does not need to lock the ncp to dispose of it,
480 * as nobody else will have visibility to it at that point.
482 * Note that holding a locked namecache structure prevents other threads
483 * from making namespace changes (e.g. deleting or creating), prevents
484 * vnode association state changes by other threads, and prevents the
485 * namecache entry from being resolved or unresolved by other threads.
487 * An exclusive lock owner has full authority to associate/disassociate
488 * vnodes and resolve/unresolve the locked ncp.
490 * A shared lock owner only has authority to acquire the underlying vnode,
493 * The primary lock field is nc_lockstatus. nc_locktd is set after the
494 * fact (when locking) or cleared prior to unlocking.
496 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
497 * or recycled, but it does NOT help you if the vnode had already
498 * initiated a recyclement. If this is important, use cache_get()
499 * rather then cache_lock() (and deal with the differences in the
500 * way the refs counter is handled). Or, alternatively, make an
501 * unconditional call to cache_validate() or cache_resolve()
502 * after cache_lock() returns.
506 _cache_lock(struct namecache *ncp)
511 error = lockmgr(&ncp->nc_lock, LK_EXCLUSIVE);
512 while (__predict_false(error == EWOULDBLOCK)) {
514 didwarn = ticks - nclockwarn;
515 kprintf("[diagnostic] cache_lock: "
518 curthread->td_comm, ncp,
519 ncp->nc_nlen, ncp->nc_nlen,
522 error = lockmgr(&ncp->nc_lock, LK_EXCLUSIVE | LK_TIMELOCK);
524 if (__predict_false(didwarn)) {
525 kprintf("[diagnostic] cache_lock: "
526 "%s unblocked %*.*s after %d secs\n",
528 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name,
529 (int)(ticks - didwarn) / hz);
534 * Release a previously acquired lock.
536 * A concurrent shared-lock acquisition or acquisition/release can
537 * race bit 31 so only drop the ncp if bit 31 was set.
541 _cache_unlock(struct namecache *ncp)
543 lockmgr(&ncp->nc_lock, LK_RELEASE);
547 * Lock ncp exclusively, non-blocking. Return 0 on success.
551 _cache_lock_nonblock(struct namecache *ncp)
555 error = lockmgr(&ncp->nc_lock, LK_EXCLUSIVE | LK_NOWAIT);
556 if (__predict_false(error != 0)) {
563 * This is a special form of _cache_lock() which only succeeds if
564 * it can get a pristine, non-recursive lock. The caller must have
565 * already ref'd the ncp.
567 * On success the ncp will be locked, on failure it will not. The
568 * ref count does not change either way.
570 * We want _cache_lock_special() (on success) to return a definitively
571 * usable vnode or a definitively unresolved ncp.
575 _cache_lock_special(struct namecache *ncp)
577 if (_cache_lock_nonblock(ncp) == 0) {
578 if (lockmgr_oneexcl(&ncp->nc_lock)) {
579 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
580 _cache_setunresolved(ncp, 1);
589 * Shared lock, guarantees vp held
591 * The shared lock holds vp on the 0->1 transition. It is possible to race
592 * another shared lock release, preventing the other release from dropping
593 * the vnode and clearing bit 31.
595 * If it is not set then we are responsible for setting it, and this
596 * responsibility does not race with anyone else.
600 _cache_lock_shared(struct namecache *ncp)
605 error = lockmgr(&ncp->nc_lock, LK_SHARED | LK_TIMELOCK);
606 while (__predict_false(error == EWOULDBLOCK)) {
608 didwarn = ticks - nclockwarn;
609 kprintf("[diagnostic] cache_lock_shared: "
612 curthread->td_comm, ncp,
613 ncp->nc_nlen, ncp->nc_nlen,
616 error = lockmgr(&ncp->nc_lock, LK_SHARED | LK_TIMELOCK);
618 if (__predict_false(didwarn)) {
619 kprintf("[diagnostic] cache_lock_shared: "
620 "%s unblocked %*.*s after %d secs\n",
622 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name,
623 (int)(ticks - didwarn) / hz);
628 * Shared lock, guarantees vp held. Non-blocking. Returns 0 on success
632 _cache_lock_shared_nonblock(struct namecache *ncp)
636 error = lockmgr(&ncp->nc_lock, LK_SHARED | LK_NOWAIT);
637 if (__predict_false(error != 0)) {
644 * This function tries to get a shared lock but will back-off to an
647 * (1) Some other thread is trying to obtain an exclusive lock
648 * (to prevent the exclusive requester from getting livelocked out
649 * by many shared locks).
651 * (2) The current thread already owns an exclusive lock (to avoid
654 * WARNING! On machines with lots of cores we really want to try hard to
655 * get a shared lock or concurrent path lookups can chain-react
656 * into a very high-latency exclusive lock.
658 * This is very evident in dsynth's initial scans.
662 _cache_lock_shared_special(struct namecache *ncp)
665 * Only honor a successful shared lock (returning 0) if there is
666 * no exclusive request pending and the vnode, if present, is not
667 * in a reclaimed state.
669 if (_cache_lock_shared_nonblock(ncp) == 0) {
670 if (__predict_true(!lockmgr_exclpending(&ncp->nc_lock))) {
671 if (ncp->nc_vp == NULL ||
672 (ncp->nc_vp->v_flag & VRECLAIMED) == 0) {
681 * Non-blocking shared lock failed. If we already own the exclusive
682 * lock just acquire another exclusive lock (instead of deadlocking).
683 * Otherwise acquire a shared lock.
685 if (lockstatus(&ncp->nc_lock, curthread) == LK_EXCLUSIVE) {
689 _cache_lock_shared(ncp);
697 * (v) LK_SHARED or LK_EXCLUSIVE
701 _cache_lockstatus(struct namecache *ncp)
705 status = lockstatus(&ncp->nc_lock, curthread);
706 if (status == LK_EXCLOTHER)
712 * cache_hold() and cache_drop() prevent the premature deletion of a
713 * namecache entry but do not prevent operations (such as zapping) on
714 * that namecache entry.
716 * This routine may only be called from outside this source module if
717 * nc_refs is already deterministically at least 1, such as being
718 * associated with e.g. a process, file descriptor, or some other entity.
720 * Only the above situations, similar situations within this module where
721 * the ref count is deterministically at least 1, or when the ncp is found
722 * via the nchpp (hash table) lookup, can bump nc_refs.
724 * Very specifically, a ncp found via nc_list CANNOT bump nc_refs. It
725 * can still be removed from the nc_list, however, as long as the caller
726 * can acquire its lock (in the wrong order).
728 * This is a rare case where callers are allowed to hold a spinlock,
729 * so we can't ourselves.
733 _cache_hold(struct namecache *ncp)
735 KKASSERT(ncp->nc_refs > 0);
736 atomic_add_int(&ncp->nc_refs, 1);
742 * Drop a cache entry.
744 * The 1->0 transition can only occur after or because the natural ref
745 * is being dropped. If another thread had a temporary ref during the
746 * ncp's destruction, then that other thread might wind up being the
747 * one to drop the last ref.
751 _cache_drop(struct namecache *ncp)
753 if (atomic_fetchadd_int(&ncp->nc_refs, -1) == 1) {
754 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
759 ncp->nc_refs = -1; /* safety */
761 kfree(ncp->nc_name, M_VFSCACHEAUX);
762 kfree_obj(ncp, M_VFSCACHE);
767 * Link a new namecache entry to its parent and to the hash table. Be
768 * careful to avoid races if vhold() blocks in the future.
770 * Both ncp and par must be referenced and locked. The reference is
771 * transfered to the nchpp (and, most notably, NOT to the parent list).
773 * NOTE: The hash table spinlock is held across this call, we can't do
777 _cache_link_parent(struct namecache *ncp, struct namecache *par,
778 struct nchash_head *nchpp)
780 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid];
782 KKASSERT(ncp->nc_parent == NULL);
783 _cache_ncp_gen_enter(ncp);
784 ncp->nc_parent = par;
785 ncp->nc_head = nchpp;
788 * Set inheritance flags. Note that the parent flags may be
789 * stale due to getattr potentially not having been run yet
790 * (it gets run during nlookup()'s).
792 ncp->nc_flag &= ~(NCF_SF_PNOCACHE | NCF_UF_PCACHE);
793 if (par->nc_flag & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE))
794 ncp->nc_flag |= NCF_SF_PNOCACHE;
795 if (par->nc_flag & (NCF_UF_CACHE | NCF_UF_PCACHE))
796 ncp->nc_flag |= NCF_UF_PCACHE;
799 * Add to hash table and parent, adjust accounting
801 TAILQ_INSERT_HEAD(&nchpp->list, ncp, nc_hash);
802 atomic_add_long(&pn->vfscache_count, 1);
805 * ncp is a new leaf being added to the tree
807 if (TAILQ_EMPTY(&ncp->nc_list)) {
808 atomic_add_long(&pn->vfscache_leafs, 1);
809 if (ncp->nc_flag & NCF_UNRESOLVED)
810 atomic_add_long(&pn->vfscache_unres, 1);
813 if (TAILQ_EMPTY(&par->nc_list)) {
815 * Parent was, but now is no longer a leaf
818 * XXX for now don't mess with par's gen, it causes
819 * unnecessary nlookup retries (though not many)
821 /*_cache_ncp_gen_enter(par);*/
822 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
823 if (par->nc_parent) {
824 if (par->nc_flag & NCF_UNRESOLVED)
825 atomic_add_long(&pn->vfscache_unres, -1);
826 atomic_add_long(&pn->vfscache_leafs, -1);
830 * Any vp associated with an ncp which has children must
831 * be held to prevent it from being recycled.
835 /*_cache_ncp_gen_exit(par);*/
837 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
839 _cache_hold(par); /* add nc_parent ref */
840 _cache_ncp_gen_exit(ncp);
844 * Remove the parent and hash associations from a namecache structure.
845 * Drop the ref-count on the parent. The caller receives the ref
846 * from the ncp's nchpp linkage that was removed and may forward that
847 * ref to a new linkage.
849 * The caller usually holds an additional ref * on the ncp so the unlink
850 * cannot be the final drop. XXX should not be necessary now since the
851 * caller receives the ref from the nchpp linkage, assuming the ncp
852 * was linked in the first place.
854 * ncp must be locked, which means that there won't be any nc_parent
855 * removal races. This routine will acquire a temporary lock on
856 * the parent as well as the appropriate hash chain.
858 * par must be locked and will remain locked on return.
860 * nhcpp must be spin-locked. This routine eats the spin-lock.
863 _cache_unlink_parent(struct namecache *par, struct namecache *ncp,
864 struct nchash_head *nchpp)
866 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid];
867 struct vnode *dropvp;
869 KKASSERT(ncp->nc_parent == par);
871 _cache_ncp_gen_enter(ncp);
873 /* don't add a ref, we drop the nchpp ref later */
876 * Remove from hash table and parent, adjust accounting
878 TAILQ_REMOVE(&ncp->nc_head->list, ncp, nc_hash);
879 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
880 atomic_add_long(&pn->vfscache_count, -1);
883 * Removing leaf from tree
885 if (TAILQ_EMPTY(&ncp->nc_list)) {
886 if (ncp->nc_flag & NCF_UNRESOLVED)
887 atomic_add_long(&pn->vfscache_unres, -1);
888 atomic_add_long(&pn->vfscache_leafs, -1);
892 * Parent is now a leaf?
895 if (TAILQ_EMPTY(&par->nc_list)) {
897 * XXX for now don't mess with par's gen, it causes
898 * unnecessary nlookup retries (though not many)
900 /*_cache_ncp_gen_enter(par);*/
901 if (par->nc_parent) {
902 if (par->nc_flag & NCF_UNRESOLVED)
903 atomic_add_long(&pn->vfscache_unres, 1);
904 atomic_add_long(&pn->vfscache_leafs, 1);
908 /*_cache_ncp_gen_exit(par);*/
910 ncp->nc_parent = NULL;
912 spin_unlock(&nchpp->spin);
913 _cache_drop(par); /* drop ncp's nc_parent ref from (par) */
916 * We can only safely vdrop with no spinlocks held.
920 _cache_ncp_gen_exit(ncp);
924 * Allocate a new namecache structure. Most of the code does not require
925 * zero-termination of the string but it makes vop_compat_ncreate() easier.
927 * The returned ncp will be locked and referenced. The ref is generally meant
928 * to be transfered to the nchpp linkage.
930 static struct namecache *
931 cache_alloc(int nlen)
933 struct namecache *ncp;
935 ncp = kmalloc_obj(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
937 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHEAUX, M_WAITOK);
939 ncp->nc_flag = NCF_UNRESOLVED;
940 ncp->nc_error = ENOTCONN; /* needs to be resolved */
941 ncp->nc_refs = 1; /* natural ref */
942 ncp->nc_generation = 0; /* link/unlink/res/unres op */
943 TAILQ_INIT(&ncp->nc_list);
944 lockinit(&ncp->nc_lock, "ncplk", hz, LK_CANRECURSE);
945 lockmgr(&ncp->nc_lock, LK_EXCLUSIVE);
951 * Can only be called for the case where the ncp has never been
952 * associated with anything (so no spinlocks are needed).
955 _cache_free(struct namecache *ncp)
957 KKASSERT(ncp->nc_refs == 1);
959 kfree(ncp->nc_name, M_VFSCACHEAUX);
960 kfree_obj(ncp, M_VFSCACHE);
964 * [re]initialize a nchandle.
967 cache_zero(struct nchandle *nch)
974 * Ref and deref a nchandle structure (ncp + mp)
976 * The caller must specify a stable ncp pointer, typically meaning the
977 * ncp is already referenced but this can also occur indirectly through
978 * e.g. holding a lock on a direct child.
980 * WARNING: Caller may hold an unrelated read spinlock, which means we can't
981 * use read spinlocks here.
984 cache_hold(struct nchandle *nch)
986 _cache_hold(nch->ncp);
987 _cache_mntref(nch->mount);
992 * Create a copy of a namecache handle for an already-referenced
996 cache_copy(struct nchandle *nch, struct nchandle *target)
998 struct namecache *ncp;
1000 struct mntcache_elm *elm;
1001 struct namecache *ncpr;
1009 elm = _cache_mntcache_hash(ncp);
1010 for (i = 0; i < MNTCACHE_SET; ++i) {
1011 if (elm->ncp == ncp) {
1012 ncpr = atomic_swap_ptr((void *)&elm->ncp, NULL);
1028 * Drop the nchandle, but try to cache the ref to avoid global atomic
1029 * ops. This is typically done on the system root and jail root nchandles.
1032 cache_drop_and_cache(struct nchandle *nch, int elmno)
1034 struct mntcache_elm *elm;
1035 struct mntcache_elm *best;
1036 struct namecache *ncpr;
1043 _cache_drop(nch->ncp);
1047 _cache_mntrel(nch->mount);
1053 elm = _cache_mntcache_hash(nch->ncp);
1055 for (i = 0; i < MNTCACHE_SET; ++i) {
1056 if (elm->ncp == NULL) {
1057 ncpr = atomic_swap_ptr((void *)&elm->ncp, nch->ncp);
1058 _cache_mntrel(nch->mount);
1066 delta1 = ticks - best->ticks;
1067 delta2 = ticks - elm->ticks;
1068 if (delta2 > delta1 || delta1 < -1 || delta2 < -1)
1072 ncpr = atomic_swap_ptr((void *)&best->ncp, nch->ncp);
1073 _cache_mntrel(nch->mount);
1074 best->ticks = ticks;
1082 cache_changemount(struct nchandle *nch, struct mount *mp)
1085 _cache_mntrel(nch->mount);
1090 cache_drop(struct nchandle *nch)
1092 _cache_mntrel(nch->mount);
1093 _cache_drop(nch->ncp);
1100 * -1 Locked by other
1102 * (v) LK_SHARED or LK_EXCLUSIVE
1105 cache_lockstatus(struct nchandle *nch)
1107 return(_cache_lockstatus(nch->ncp));
1111 cache_lock(struct nchandle *nch)
1113 _cache_lock(nch->ncp);
1117 * Returns a shared or exclusive-locked ncp. The ncp will only be
1118 * shared-locked if it is already resolved.
1121 cache_lock_maybe_shared(struct nchandle *nch, int excl)
1123 struct namecache *ncp = nch->ncp;
1125 if (ncp_shared_lock_disable || excl ||
1126 (ncp->nc_flag & NCF_UNRESOLVED)) {
1129 _cache_lock_shared(ncp);
1130 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1131 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) {
1143 * Lock fncpd, fncp, tncpd, and tncp. tncp is already locked but may
1144 * have to be cycled to avoid deadlocks. Make sure all four are resolved.
1146 * The caller is responsible for checking the validity upon return as
1147 * the records may have been flagged DESTROYED in the interim.
1149 * Namecache lock ordering is leaf first, then parent. However, complex
1150 * interactions may occur between the source and target because there is
1151 * no ordering guarantee between (fncpd, fncp) and (tncpd and tncp).
1154 cache_lock4_tondlocked(struct nchandle *fncpd, struct nchandle *fncp,
1155 struct nchandle *tncpd, struct nchandle *tncp,
1156 struct ucred *fcred, struct ucred *tcred)
1159 u_int dummy_gen = 0;
1162 * Lock tncp and tncpd
1164 * NOTE: Because these ncps are not locked to begin with, it is
1165 * possible for other rename races to cause the normal lock
1166 * order assumptions to fail.
1168 * NOTE: Lock ordering assumptions are valid if a leaf's parent
1169 * matches after the leaf has been locked. However, ordering
1170 * between the 'from' and the 'to' is not and an overlapping
1171 * lock order reversal is still possible.
1174 if (__predict_false(tlocked == 0)) {
1177 if (__predict_false(cache_lock_nonblock(tncpd) != 0)) {
1179 cache_lock(tncpd); /* cycle tncpd lock */
1180 cache_unlock(tncpd);
1186 * Lock fncp and fncpd
1188 * NOTE: Because these ncps are not locked to begin with, it is
1189 * possible for other rename races to cause the normal lock
1190 * order assumptions to fail.
1192 * NOTE: Lock ordering assumptions are valid if a leaf's parent
1193 * matches after the leaf has been locked. However, ordering
1194 * between the 'from' and the 'to' is not and an overlapping
1195 * lock order reversal is still possible.
1197 if (__predict_false(cache_lock_nonblock(fncp) != 0)) {
1198 cache_unlock(tncpd);
1200 cache_lock(fncp); /* cycle fncp lock */
1206 if (__predict_false(cache_lock_nonblock(fncpd) != 0)) {
1208 cache_unlock(tncpd);
1211 cache_unlock(fncpd); /* cycle fncpd lock */
1216 if (__predict_true((fncpd->ncp->nc_flag & NCF_DESTROYED) == 0))
1217 cache_resolve(fncpd, &dummy_gen, fcred);
1218 if (__predict_true((tncpd->ncp->nc_flag & NCF_DESTROYED) == 0))
1219 cache_resolve(tncpd, &dummy_gen, tcred);
1220 if (__predict_true((fncp->ncp->nc_flag & NCF_DESTROYED) == 0))
1221 cache_resolve(fncp, &dummy_gen, fcred);
1222 if (__predict_true((tncp->ncp->nc_flag & NCF_DESTROYED) == 0))
1223 cache_resolve(tncp, &dummy_gen, tcred);
1227 cache_lock_nonblock(struct nchandle *nch)
1229 return(_cache_lock_nonblock(nch->ncp));
1233 cache_unlock(struct nchandle *nch)
1235 _cache_unlock(nch->ncp);
1239 * ref-and-lock, unlock-and-deref functions.
1241 * This function is primarily used by nlookup. Even though cache_lock
1242 * holds the vnode, it is possible that the vnode may have already
1243 * initiated a recyclement.
1245 * We want cache_get() to return a definitively usable vnode or a
1246 * definitively unresolved ncp.
1250 _cache_get(struct namecache *ncp)
1254 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1255 _cache_setunresolved(ncp, 1);
1260 * Attempt to obtain a shared lock on the ncp. A shared lock will only
1261 * be obtained if the ncp is resolved and the vnode (if not ENOENT) is
1262 * valid. Otherwise an exclusive lock will be acquired instead.
1266 _cache_get_maybe_shared(struct namecache *ncp, int excl)
1268 if (ncp_shared_lock_disable || excl ||
1269 (ncp->nc_flag & NCF_UNRESOLVED))
1271 return(_cache_get(ncp));
1274 _cache_lock_shared(ncp);
1275 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1276 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) {
1278 ncp = _cache_get(ncp);
1283 ncp = _cache_get(ncp);
1290 * NOTE: The same nchandle can be passed for both arguments.
1293 cache_get(struct nchandle *nch, struct nchandle *target)
1295 KKASSERT(nch->ncp->nc_refs > 0);
1296 target->mount = nch->mount;
1297 target->ncp = _cache_get(nch->ncp);
1298 _cache_mntref(target->mount);
1302 cache_get_maybe_shared(struct nchandle *nch, struct nchandle *target, int excl)
1304 KKASSERT(nch->ncp->nc_refs > 0);
1305 target->mount = nch->mount;
1306 target->ncp = _cache_get_maybe_shared(nch->ncp, excl);
1307 _cache_mntref(target->mount);
1311 * Release a held and locked ncp
1315 _cache_put(struct namecache *ncp)
1322 cache_put(struct nchandle *nch)
1324 _cache_mntrel(nch->mount);
1325 _cache_put(nch->ncp);
1331 * Resolve an unresolved ncp by associating a vnode with it. If the
1332 * vnode is NULL, a negative cache entry is created.
1334 * The ncp should be locked on entry and will remain locked on return.
1338 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp,
1341 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid];
1343 KKASSERT((ncp->nc_flag & NCF_UNRESOLVED) &&
1344 (_cache_lockstatus(ncp) == LK_EXCLUSIVE) &&
1345 ncp->nc_vp == NULL);
1348 _cache_ncp_gen_enter(ncp);
1352 * Any vp associated with an ncp which has children must
1353 * be held. Any vp associated with a locked ncp must be held.
1355 if (!TAILQ_EMPTY(&ncp->nc_list))
1357 spin_lock(&vp->v_spin);
1359 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
1360 ++vp->v_namecache_count;
1361 _cache_hold(ncp); /* v_namecache assoc */
1362 spin_unlock(&vp->v_spin);
1363 vhold(vp); /* nc_vp */
1366 * Set auxiliary flags
1368 switch(vp->v_type) {
1370 ncp->nc_flag |= NCF_ISDIR;
1373 ncp->nc_flag |= NCF_ISSYMLINK;
1374 /* XXX cache the contents of the symlink */
1383 * XXX: this is a hack to work-around the lack of a real pfs vfs
1387 if (strncmp(mp->mnt_stat.f_fstypename, "null", 5) == 0)
1392 * When creating a negative cache hit we set the
1393 * namecache_gen. A later resolve will clean out the
1394 * negative cache hit if the mount point's namecache_gen
1395 * has changed. Used by devfs, could also be used by
1399 ncp->nc_negcpu = mycpu->gd_cpuid;
1400 spin_lock(&pn->neg_spin);
1401 TAILQ_INSERT_TAIL(&pn->neg_list, ncp, nc_vnode);
1402 _cache_hold(ncp); /* neg_list assoc */
1404 spin_unlock(&pn->neg_spin);
1405 atomic_add_long(&pn->vfscache_negs, 1);
1407 ncp->nc_error = ENOENT;
1409 VFS_NCPGEN_SET(mp, ncp);
1413 * Previously unresolved leaf is now resolved.
1415 * Clear the NCF_UNRESOLVED flag last (see cache_nlookup_nonlocked()).
1416 * We only adjust vfscache_unres for ncp's that are in the tree.
1418 if (TAILQ_EMPTY(&ncp->nc_list) && ncp->nc_parent)
1419 atomic_add_long(&pn->vfscache_unres, -1);
1420 ncp->nc_flag &= ~(NCF_UNRESOLVED | NCF_DEFEREDZAP);
1422 _cache_ncp_gen_exit(ncp);
1426 cache_setvp(struct nchandle *nch, struct vnode *vp)
1428 _cache_setvp(nch->mount, nch->ncp, vp, 1);
1435 cache_settimeout(struct nchandle *nch, int nticks)
1437 struct namecache *ncp = nch->ncp;
1439 if ((ncp->nc_timeout = ticks + nticks) == 0)
1440 ncp->nc_timeout = 1;
1444 * Disassociate the vnode or negative-cache association and mark a
1445 * namecache entry as unresolved again. Note that the ncp is still
1446 * left in the hash table and still linked to its parent.
1448 * The ncp should be locked and refd on entry and will remain locked and refd
1451 * This routine is normally never called on a directory containing children.
1452 * However, NFS often does just that in its rename() code as a cop-out to
1453 * avoid complex namespace operations. This disconnects a directory vnode
1454 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
1460 _cache_setunresolved(struct namecache *ncp, int adjgen)
1464 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1465 struct pcpu_ncache *pn;
1468 _cache_ncp_gen_enter(ncp);
1471 * Is a resolved or destroyed leaf now becoming unresolved?
1472 * Only adjust vfscache_unres for linked ncp's.
1474 if (TAILQ_EMPTY(&ncp->nc_list) && ncp->nc_parent) {
1475 pn = &pcpu_ncache[mycpu->gd_cpuid];
1476 atomic_add_long(&pn->vfscache_unres, 1);
1479 ncp->nc_flag |= NCF_UNRESOLVED;
1480 ncp->nc_timeout = 0;
1481 ncp->nc_error = ENOTCONN;
1482 if ((vp = ncp->nc_vp) != NULL) {
1483 spin_lock(&vp->v_spin);
1485 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
1486 --vp->v_namecache_count;
1487 spin_unlock(&vp->v_spin);
1490 * Any vp associated with an ncp with children is
1491 * held by that ncp. Any vp associated with ncp
1492 * is held by that ncp. These conditions must be
1493 * undone when the vp is cleared out from the ncp.
1495 if (!TAILQ_EMPTY(&ncp->nc_list))
1499 pn = &pcpu_ncache[ncp->nc_negcpu];
1501 atomic_add_long(&pn->vfscache_negs, -1);
1502 spin_lock(&pn->neg_spin);
1503 TAILQ_REMOVE(&pn->neg_list, ncp, nc_vnode);
1505 spin_unlock(&pn->neg_spin);
1507 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK);
1510 _cache_ncp_gen_exit(ncp);
1511 _cache_drop(ncp); /* from v_namecache or neg_list */
1516 * The cache_nresolve() code calls this function to automatically
1517 * set a resolved cache element to unresolved if it has timed out
1518 * or if it is a negative cache hit and the mount point namecache_gen
1522 _cache_auto_unresolve_test(struct mount *mp, struct namecache *ncp)
1525 * Try to zap entries that have timed out. We have
1526 * to be careful here because locked leafs may depend
1527 * on the vnode remaining intact in a parent, so only
1528 * do this under very specific conditions.
1530 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 &&
1531 TAILQ_EMPTY(&ncp->nc_list)) {
1536 * If a resolved negative cache hit is invalid due to
1537 * the mount's namecache generation being bumped, zap it.
1539 if (ncp->nc_vp == NULL && VFS_NCPGEN_TEST(mp, ncp)) {
1544 * Otherwise we are good
1549 static __inline void
1550 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp)
1553 * Already in an unresolved state, nothing to do.
1555 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1556 if (_cache_auto_unresolve_test(mp, ncp))
1557 _cache_setunresolved(ncp, 1);
1562 cache_setunresolved(struct nchandle *nch)
1564 _cache_setunresolved(nch->ncp, 1);
1568 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
1569 * looking for matches. This flag tells the lookup code when it must
1570 * check for a mount linkage and also prevents the directories in question
1571 * from being deleted or renamed.
1575 cache_clrmountpt_callback(struct mount *mp, void *data)
1577 struct nchandle *nch = data;
1579 if (mp->mnt_ncmounton.ncp == nch->ncp)
1581 if (mp->mnt_ncmountpt.ncp == nch->ncp)
1587 * Clear NCF_ISMOUNTPT on nch->ncp if it is no longer associated
1588 * with a mount point.
1591 cache_clrmountpt(struct nchandle *nch)
1595 count = mountlist_scan(cache_clrmountpt_callback, nch,
1596 MNTSCAN_FORWARD | MNTSCAN_NOBUSY |
1599 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT;
1603 * Invalidate portions of the namecache topology given a starting entry.
1604 * The passed ncp is set to an unresolved state and:
1606 * The passed ncp must be referenced and locked. The routine may unlock
1607 * and relock ncp several times, and will recheck the children and loop
1608 * to catch races. When done the passed ncp will be returned with the
1609 * reference and lock intact.
1611 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
1612 * that the physical underlying nodes have been
1613 * destroyed... as in deleted. For example, when
1614 * a directory is removed. This will cause record
1615 * lookups on the name to no longer be able to find
1616 * the record and tells the resolver to return failure
1617 * rather then trying to resolve through the parent.
1619 * The topology itself, including ncp->nc_name,
1622 * This only applies to the passed ncp, if CINV_CHILDREN
1623 * is specified the children are not flagged.
1625 * CINV_CHILDREN - Set all children (recursively) to an unresolved
1628 * Note that this will also have the side effect of
1629 * cleaning out any unreferenced nodes in the topology
1630 * from the leaves up as the recursion backs out.
1632 * Note that the topology for any referenced nodes remains intact, but
1633 * the nodes will be marked as having been destroyed and will be set
1634 * to an unresolved state.
1636 * It is possible for cache_inval() to race a cache_resolve(), meaning that
1637 * the namecache entry may not actually be invalidated on return if it was
1638 * revalidated while recursing down into its children. This code guarentees
1639 * that the node(s) will go through an invalidation cycle, but does not
1640 * guarentee that they will remain in an invalidated state.
1642 * Returns non-zero if a revalidation was detected during the invalidation
1643 * recursion, zero otherwise. Note that since only the original ncp is
1644 * locked the revalidation ultimately can only indicate that the original ncp
1645 * *MIGHT* no have been reresolved.
1647 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
1648 * have to avoid blowing out the kernel stack. We do this by saving the
1649 * deep namecache node and aborting the recursion, then re-recursing at that
1650 * node using a depth-first algorithm in order to allow multiple deep
1651 * recursions to chain through each other, then we restart the invalidation
1656 struct namecache *resume_ncp;
1660 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *);
1664 _cache_inval(struct namecache *ncp, int flags)
1666 struct cinvtrack track;
1667 struct namecache *ncp2;
1671 track.resume_ncp = NULL;
1674 r = _cache_inval_internal(ncp, flags, &track);
1675 if (track.resume_ncp == NULL)
1678 while ((ncp2 = track.resume_ncp) != NULL) {
1679 track.resume_ncp = NULL;
1681 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY,
1683 /*_cache_put(ncp2);*/
1692 cache_inval(struct nchandle *nch, int flags)
1694 return(_cache_inval(nch->ncp, flags));
1698 * Helper for _cache_inval(). The passed ncp is refd and locked and
1699 * remains that way on return, but may be unlocked/relocked multiple
1700 * times by the routine.
1703 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track)
1705 struct namecache *nextkid;
1708 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE);
1710 _cache_ncp_gen_enter(ncp);
1711 _cache_setunresolved(ncp, 0);
1712 if (flags & CINV_DESTROY) {
1713 ncp->nc_flag |= NCF_DESTROYED;
1717 while ((flags & CINV_CHILDREN) &&
1718 (nextkid = TAILQ_FIRST(&ncp->nc_list)) != NULL
1720 struct namecache *kid;
1724 _cache_hold(nextkid);
1725 if (++track->depth > MAX_RECURSION_DEPTH) {
1726 track->resume_ncp = ncp;
1730 while ((kid = nextkid) != NULL) {
1732 * Parent (ncp) must be locked for the iteration.
1735 if (kid->nc_parent != ncp) {
1737 kprintf("cache_inval_internal restartA %s\n",
1742 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
1743 _cache_hold(nextkid);
1746 * Parent unlocked for this section to avoid
1747 * deadlocks. Then lock the kid and check for
1751 if (track->resume_ncp) {
1757 if (kid->nc_parent != ncp) {
1758 kprintf("cache_inval_internal "
1767 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
1768 TAILQ_FIRST(&kid->nc_list)
1771 rcnt += _cache_inval_internal(kid,
1772 flags & ~CINV_DESTROY, track);
1773 /*_cache_unlock(kid);*/
1774 /*_cache_drop(kid);*/
1781 * Relock parent to continue scan
1786 _cache_drop(nextkid);
1793 * Someone could have gotten in there while ncp was unlocked,
1796 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1798 _cache_ncp_gen_exit(ncp);
1804 * Invalidate a vnode's namecache associations. To avoid races against
1805 * the resolver we do not invalidate a node which we previously invalidated
1806 * but which was then re-resolved while we were in the invalidation loop.
1808 * Returns non-zero if any namecache entries remain after the invalidation
1811 * NOTE: Unlike the namecache topology which guarentees that ncp's will not
1812 * be ripped out of the topology while held, the vnode's v_namecache
1813 * list has no such restriction. NCP's can be ripped out of the list
1814 * at virtually any time if not locked, even if held.
1816 * In addition, the v_namecache list itself must be locked via
1817 * the vnode's spinlock.
1820 cache_inval_vp(struct vnode *vp, int flags)
1822 struct namecache *ncp;
1823 struct namecache *next;
1826 spin_lock(&vp->v_spin);
1827 ncp = TAILQ_FIRST(&vp->v_namecache);
1831 /* loop entered with ncp held and vp spin-locked */
1832 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1834 spin_unlock(&vp->v_spin);
1836 if (ncp->nc_vp != vp) {
1837 kprintf("Warning: cache_inval_vp: race-A detected on "
1838 "%s\n", ncp->nc_name);
1844 _cache_inval(ncp, flags);
1845 _cache_put(ncp); /* also releases reference */
1847 spin_lock(&vp->v_spin);
1848 if (ncp && ncp->nc_vp != vp) {
1849 spin_unlock(&vp->v_spin);
1850 kprintf("Warning: cache_inval_vp: race-B detected on "
1851 "%s\n", ncp->nc_name);
1856 spin_unlock(&vp->v_spin);
1857 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1861 * This routine is used instead of the normal cache_inval_vp() when we
1862 * are trying to recycle otherwise good vnodes.
1864 * Return 0 on success, non-zero if not all namecache records could be
1865 * disassociated from the vnode (for various reasons).
1868 cache_inval_vp_nonblock(struct vnode *vp)
1870 struct namecache *ncp;
1871 struct namecache *next;
1873 spin_lock(&vp->v_spin);
1875 ncp = TAILQ_FIRST(&vp->v_namecache);
1880 /* loop entered with ncp held */
1881 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1883 spin_unlock(&vp->v_spin);
1884 if (_cache_lock_nonblock(ncp)) {
1890 if (ncp->nc_vp != vp) {
1891 kprintf("Warning: cache_inval_vp: race-A detected on "
1892 "%s\n", ncp->nc_name);
1898 _cache_inval(ncp, 0);
1899 _cache_put(ncp); /* also releases reference */
1901 spin_lock(&vp->v_spin);
1902 if (ncp && ncp->nc_vp != vp) {
1903 spin_unlock(&vp->v_spin);
1904 kprintf("Warning: cache_inval_vp: race-B detected on "
1905 "%s\n", ncp->nc_name);
1910 spin_unlock(&vp->v_spin);
1912 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1916 * Attempt to quickly invalidate the vnode's namecache entry. This function
1917 * will also dive the ncp and free its children but only if they are trivial.
1918 * All locks are non-blocking and the function will fail if required locks
1919 * cannot be obtained.
1921 * We want this sort of function to be able to guarantee progress when vnlru
1922 * wants to recycle a vnode. Directories could otherwise get stuck and not
1923 * be able to recycle due to destroyed or unresolved children in the
1927 cache_inval_vp_quick(struct vnode *vp)
1929 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid];
1930 struct namecache *ncp;
1931 struct namecache *kid;
1933 spin_lock(&vp->v_spin);
1934 while ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) {
1936 spin_unlock(&vp->v_spin);
1937 if (_cache_lock_nonblock(ncp)) {
1943 * Try to trivially destroy any children.
1945 while ((kid = TAILQ_FIRST(&ncp->nc_list)) != NULL) {
1946 struct nchash_head *nchpp;
1949 * Early test without the lock. Give-up if the
1950 * child has children of its own, the child is
1951 * positively-resolved, or the ref-count is
1954 if (TAILQ_FIRST(&kid->nc_list) ||
1956 kid->nc_refs != ncpbaserefs(kid))
1963 if (_cache_lock_nonblock(kid)) {
1970 * A destruction/free test requires the parent,
1971 * the kid, and the hash table to be locked. Note
1972 * that the kid may still be on the negative cache
1975 nchpp = kid->nc_head;
1976 spin_lock(&nchpp->spin);
1979 * Give up if the child isn't trivial. It can be
1980 * resolved or unresolved but must not have a vp.
1982 if (kid->nc_parent != ncp ||
1984 TAILQ_FIRST(&kid->nc_list) ||
1985 kid->nc_refs != 1 + ncpbaserefs(kid))
1987 spin_unlock(&nchpp->spin);
1993 ++pn->inv_kid_quick_count;
1996 * We can safely destroy the kid. It may still
1997 * have extra refs due to ncneglist races, but since
1998 * we checked above with the lock held those races
1999 * will self-resolve.
2001 * With these actions the kid should nominally
2002 * have just its natural ref plus our ref.
2004 * This is only safe because we hold locks on
2005 * the parent, the kid, and the nchpp. The only
2006 * lock we don't have is on the ncneglist and that
2007 * can race a ref, but as long as we unresolve the
2008 * kid before executing our final drop the ncneglist
2009 * code path(s) will just drop their own ref so all
2012 _cache_unlink_parent(ncp, kid, nchpp);
2013 _cache_setunresolved(kid, 1);
2014 if (kid->nc_refs != 2) {
2015 kprintf("Warning: kid %p unexpected refs=%d "
2018 kid->nc_flag, kid->nc_name);
2020 _cache_put(kid); /* drop our ref and lock */
2021 _cache_drop(kid); /* drop natural ref to destroy */
2025 * Now check ncp itself against our expectations. With
2026 * no children left we have our ref plus whether it is
2027 * resolved or not (which it has to be, actually, since it
2028 * is hanging off the vp->v_namecache).
2030 if (ncp->nc_refs != 1 + ncpbaserefs(ncp)) {
2032 spin_lock(&vp->v_spin);
2036 ++pn->inv_ncp_quick_count;
2039 * Success, disassociate and release the ncp. Do not
2040 * try to zap it here.
2042 * NOTE: Releasing the ncp here leaves it in the tree,
2043 * but since we have disassociated the vnode this
2044 * ncp entry becomes 'trivial' and successive calls
2045 * to cache_inval_vp_quick() will be able to continue
2048 _cache_setunresolved(ncp, 1);
2050 spin_lock(&vp->v_spin);
2052 spin_unlock(&vp->v_spin);
2056 * Clears the universal directory search 'ok' flag. This flag allows
2057 * nlookup() to bypass normal vnode checks. This flag is a cached flag
2058 * so clearing it simply forces revalidation.
2061 cache_inval_wxok(struct vnode *vp)
2063 struct namecache *ncp;
2065 spin_lock(&vp->v_spin);
2066 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
2067 if (ncp->nc_flag & (NCF_WXOK | NCF_NOTX))
2068 atomic_clear_short(&ncp->nc_flag, NCF_WXOK | NCF_NOTX);
2070 spin_unlock(&vp->v_spin);
2074 * The source ncp has been renamed to the target ncp. All elements have been
2075 * locked, including the parent ncp's.
2077 * The target ncp is destroyed (as a normal rename-over would destroy the
2078 * target file or directory).
2080 * Because there may be references to the source ncp we cannot copy its
2081 * contents to the target. Instead the source ncp is relinked as the target
2082 * and the target ncp is removed from the namecache topology.
2085 cache_rename(struct nchandle *fnch, struct nchandle *tnch)
2087 struct namecache *fncp = fnch->ncp;
2088 struct namecache *tncp = tnch->ncp;
2089 struct namecache *par;
2090 struct nchash_head *nchpp;
2095 if (tncp->nc_nlen) {
2096 nname = kmalloc(tncp->nc_nlen + 1, M_VFSCACHEAUX, M_WAITOK);
2097 bcopy(tncp->nc_name, nname, tncp->nc_nlen);
2098 nname[tncp->nc_nlen] = 0;
2104 * Rename fncp (unlink)
2106 if (fncp->nc_parent) {
2107 par = fncp->nc_parent;
2110 nchpp = fncp->nc_head;
2111 spin_lock(&nchpp->spin);
2112 _cache_unlink_parent(par, fncp, nchpp); /* eats nchpp */
2118 oname = fncp->nc_name;
2119 fncp->nc_name = nname;
2120 fncp->nc_nlen = tncp->nc_nlen;
2122 kfree(oname, M_VFSCACHEAUX);
2124 par = tncp->nc_parent;
2125 KKASSERT(par->nc_lock.lk_lockholder == curthread);
2128 * Rename fncp (relink)
2130 hash = fnv_32_buf(fncp->nc_name, fncp->nc_nlen, FNV1_32_INIT);
2131 hash = fnv_32_buf(&par, sizeof(par), hash);
2132 nchpp = NCHHASH(hash);
2134 spin_lock(&nchpp->spin);
2135 _cache_link_parent(fncp, par, nchpp);
2136 spin_unlock(&nchpp->spin);
2139 * Get rid of the overwritten tncp (unlink)
2141 _cache_unlink(tncp);
2145 * Perform actions consistent with unlinking a file. The passed-in ncp
2148 * The ncp is marked DESTROYED so it no longer shows up in searches,
2149 * and will be physically deleted when the vnode goes away.
2151 * If the related vnode has no refs then we cycle it through vget()/vput()
2152 * to (possibly if we don't have a ref race) trigger a deactivation,
2153 * allowing the VFS to trivially detect and recycle the deleted vnode
2154 * via VOP_INACTIVE().
2156 * NOTE: _cache_rename() will automatically call _cache_unlink() on the
2160 cache_unlink(struct nchandle *nch)
2162 _cache_unlink(nch->ncp);
2166 _cache_unlink(struct namecache *ncp)
2171 * Causes lookups to fail and allows another ncp with the same
2172 * name to be created under ncp->nc_parent.
2174 _cache_ncp_gen_enter(ncp);
2175 ncp->nc_flag |= NCF_DESTROYED;
2178 * Attempt to trigger a deactivation. Set VREF_FINALIZE to
2179 * force action on the 1->0 transition. Do not destroy the
2180 * vp association if a vp is present (leave the destroyed ncp
2181 * resolved through the vp finalization).
2183 * Cleanup the refs in the resolved-not-found case by setting
2184 * the ncp to an unresolved state. This improves our ability
2185 * to get rid of dead ncp elements in other cache_*() routines.
2187 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
2190 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE);
2191 if (VREFCNT(vp) <= 0) {
2192 if (vget(vp, LK_SHARED) == 0)
2196 _cache_setunresolved(ncp, 0);
2199 _cache_ncp_gen_exit(ncp);
2203 * Return non-zero if the nch might be associated with an open and/or mmap()'d
2204 * file. The easy solution is to just return non-zero if the vnode has refs.
2205 * Used to interlock hammer2 reclaims (VREF_FINALIZE should already be set to
2206 * force the reclaim).
2209 cache_isopen(struct nchandle *nch)
2212 struct namecache *ncp = nch->ncp;
2214 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
2215 (vp = ncp->nc_vp) != NULL &&
2224 * vget the vnode associated with the namecache entry. Resolve the namecache
2225 * entry if necessary. The passed ncp must be referenced and locked. If
2226 * the ncp is resolved it might be locked shared.
2228 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
2229 * (depending on the passed lk_type) will be returned in *vpp with an error
2230 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
2231 * most typical error is ENOENT, meaning that the ncp represents a negative
2232 * cache hit and there is no vnode to retrieve, but other errors can occur
2235 * The vget() can race a reclaim. If this occurs we re-resolve the
2238 * There are numerous places in the kernel where vget() is called on a
2239 * vnode while one or more of its namecache entries is locked. Releasing
2240 * a vnode never deadlocks against locked namecache entries (the vnode
2241 * will not get recycled while referenced ncp's exist). This means we
2242 * can safely acquire the vnode. In fact, we MUST NOT release the ncp
2243 * lock when acquiring the vp lock or we might cause a deadlock.
2245 * NOTE: The passed-in ncp must be locked exclusively if it is initially
2246 * unresolved. If a reclaim race occurs the passed-in ncp will be
2247 * relocked exclusively before being re-resolved.
2250 cache_vget(struct nchandle *nch, struct ucred *cred,
2251 int lk_type, struct vnode **vpp)
2253 struct namecache *ncp;
2256 u_int dummy_gen = 0;
2261 if (ncp->nc_flag & NCF_UNRESOLVED)
2262 error = cache_resolve(nch, &dummy_gen, cred);
2266 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
2267 error = vget(vp, lk_type);
2272 * The ncp may have been locked shared, we must relock
2273 * it exclusively before we can set it to unresolved.
2275 if (error == ENOENT) {
2276 kprintf("Warning: vnode reclaim race detected "
2277 "in cache_vget on %p (%s)\n",
2281 _cache_setunresolved(ncp, 1);
2286 * Not a reclaim race, some other error.
2288 KKASSERT(ncp->nc_vp == vp);
2291 KKASSERT(ncp->nc_vp == vp);
2292 KKASSERT((vp->v_flag & VRECLAIMED) == 0);
2295 if (error == 0 && vp == NULL)
2302 * Similar to cache_vget() but only acquires a ref on the vnode. The vnode
2303 * is already held by virtuue of the ncp being locked, but it might not be
2304 * referenced and while it is not referenced it can transition into the
2307 * NOTE: The passed-in ncp must be locked exclusively if it is initially
2308 * unresolved. If a reclaim race occurs the passed-in ncp will be
2309 * relocked exclusively before being re-resolved.
2311 * NOTE: At the moment we have to issue a vget() on the vnode, even though
2312 * we are going to immediately release the lock, in order to resolve
2313 * potential reclamation races. Once we have a solid vnode ref that
2314 * was (at some point) interlocked via a vget(), the vnode will not
2317 * NOTE: vhold counts (v_auxrefs) do not prevent reclamation.
2320 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp)
2322 struct namecache *ncp;
2326 u_int dummy_gen = 0;
2331 if (ncp->nc_flag & NCF_UNRESOLVED)
2332 error = cache_resolve(nch, &dummy_gen, cred);
2336 while (error == 0 && (vp = ncp->nc_vp) != NULL) {
2338 * Try a lockless ref of the vnode. VRECLAIMED transitions
2339 * use the vx_lock state and update-counter mechanism so we
2340 * can detect if one is in-progress or occurred.
2342 * If we can successfully ref the vnode and interlock against
2343 * the update-counter mechanism, and VRECLAIMED is found to
2344 * not be set after that, we should be good.
2346 v = spin_access_start_only(&vp->v_spin);
2347 if (__predict_true(spin_access_check_inprog(v) == 0)) {
2349 if (__predict_false(
2350 spin_access_end_only(&vp->v_spin, v))) {
2354 if (__predict_true((vp->v_flag & VRECLAIMED) == 0)) {
2358 kprintf("CACHE_VREF: IN-RECLAIM\n");
2362 * Do it the slow way
2364 error = vget(vp, LK_SHARED);
2369 if (error == ENOENT) {
2370 kprintf("Warning: vnode reclaim race detected "
2371 "in cache_vget on %p (%s)\n",
2375 _cache_setunresolved(ncp, 1);
2380 * Not a reclaim race, some other error.
2382 KKASSERT(ncp->nc_vp == vp);
2385 KKASSERT(ncp->nc_vp == vp);
2386 KKASSERT((vp->v_flag & VRECLAIMED) == 0);
2387 /* caller does not want a lock */
2392 if (error == 0 && vp == NULL)
2400 * Return a referenced vnode representing the parent directory of
2403 * Because the caller has locked the ncp it should not be possible for
2404 * the parent ncp to go away. However, the parent can unresolve its
2405 * dvp at any time so we must be able to acquire a lock on the parent
2406 * to safely access nc_vp.
2408 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock,
2409 * so use vhold()/vdrop() while holding the lock to prevent dvp from
2410 * getting destroyed.
2412 * NOTE: vhold() is allowed when dvp has 0 refs if we hold a
2413 * lock on the ncp in question..
2416 cache_dvpref(struct namecache *ncp)
2418 struct namecache *par;
2422 if ((par = ncp->nc_parent) != NULL) {
2425 if ((par->nc_flag & NCF_UNRESOLVED) == 0) {
2426 if ((dvp = par->nc_vp) != NULL)
2431 if (vget(dvp, LK_SHARED) == 0) {
2434 /* return refd, unlocked dvp */
2446 * Convert a directory vnode to a namecache record without any other
2447 * knowledge of the topology. This ONLY works with directory vnodes and
2448 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
2449 * returned ncp (if not NULL) will be held and unlocked.
2451 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
2452 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
2453 * for dvp. This will fail only if the directory has been deleted out from
2456 * Callers must always check for a NULL return no matter the value of 'makeit'.
2458 * To avoid underflowing the kernel stack each recursive call increments
2459 * the makeit variable.
2462 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
2463 struct vnode *dvp, char *fakename);
2464 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
2465 struct vnode **saved_dvp);
2468 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit,
2469 struct nchandle *nch)
2471 struct vnode *saved_dvp;
2477 nch->mount = dvp->v_mount;
2482 * Handle the makeit == 0 degenerate case
2485 spin_lock_shared(&dvp->v_spin);
2486 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
2489 spin_unlock_shared(&dvp->v_spin);
2493 * Loop until resolution, inside code will break out on error.
2497 * Break out if we successfully acquire a working ncp.
2499 spin_lock_shared(&dvp->v_spin);
2500 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
2503 spin_unlock_shared(&dvp->v_spin);
2506 spin_unlock_shared(&dvp->v_spin);
2509 * If dvp is the root of its filesystem it should already
2510 * have a namecache pointer associated with it as a side
2511 * effect of the mount, but it may have been disassociated.
2513 if (dvp->v_flag & VROOT) {
2514 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp);
2515 error = cache_resolve_mp(nch->mount, 1);
2516 _cache_put(nch->ncp);
2517 if (ncvp_debug & 1) {
2518 kprintf("cache_fromdvp: resolve root of "
2519 "mount %p error %d",
2520 dvp->v_mount, error);
2524 kprintf(" failed\n");
2529 kprintf(" succeeded\n");
2534 * If we are recursed too deeply resort to an O(n^2)
2535 * algorithm to resolve the namecache topology. The
2536 * resolved pvp is left referenced in saved_dvp to
2537 * prevent the tree from being destroyed while we loop.
2540 error = cache_fromdvp_try(dvp, cred, &saved_dvp);
2542 kprintf("lookupdotdot(longpath) failed %d "
2543 "dvp %p\n", error, dvp);
2551 * Get the parent directory and resolve its ncp.
2554 kfree(fakename, M_TEMP);
2557 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
2560 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp);
2566 * Reuse makeit as a recursion depth counter. On success
2567 * nch will be fully referenced.
2569 cache_fromdvp(pvp, cred, makeit + 1, nch);
2571 if (nch->ncp == NULL)
2575 * Do an inefficient scan of pvp (embodied by ncp) to look
2576 * for dvp. This will create a namecache record for dvp on
2577 * success. We loop up to recheck on success.
2579 * ncp and dvp are both held but not locked.
2581 error = cache_inefficient_scan(nch, cred, dvp, fakename);
2583 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
2584 pvp, nch->ncp->nc_name, dvp);
2586 /* nch was NULLed out, reload mount */
2587 nch->mount = dvp->v_mount;
2590 if (ncvp_debug & 1) {
2591 kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
2592 pvp, nch->ncp->nc_name);
2595 /* nch was NULLed out, reload mount */
2596 nch->mount = dvp->v_mount;
2600 * If nch->ncp is non-NULL it will have been held already.
2603 kfree(fakename, M_TEMP);
2612 * Go up the chain of parent directories until we find something
2613 * we can resolve into the namecache. This is very inefficient.
2617 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
2618 struct vnode **saved_dvp)
2620 struct nchandle nch;
2623 static time_t last_fromdvp_report;
2627 * Loop getting the parent directory vnode until we get something we
2628 * can resolve in the namecache.
2631 nch.mount = dvp->v_mount;
2637 kfree(fakename, M_TEMP);
2640 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
2647 spin_lock_shared(&pvp->v_spin);
2648 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
2649 _cache_hold(nch.ncp);
2650 spin_unlock_shared(&pvp->v_spin);
2654 spin_unlock_shared(&pvp->v_spin);
2655 if (pvp->v_flag & VROOT) {
2656 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp);
2657 error = cache_resolve_mp(nch.mount, 1);
2658 _cache_unlock(nch.ncp);
2661 _cache_drop(nch.ncp);
2671 if (last_fromdvp_report != time_uptime) {
2672 last_fromdvp_report = time_uptime;
2673 kprintf("Warning: extremely inefficient path "
2674 "resolution on %s\n",
2677 error = cache_inefficient_scan(&nch, cred, dvp, fakename);
2680 * Hopefully dvp now has a namecache record associated with
2681 * it. Leave it referenced to prevent the kernel from
2682 * recycling the vnode. Otherwise extremely long directory
2683 * paths could result in endless recycling.
2688 _cache_drop(nch.ncp);
2691 kfree(fakename, M_TEMP);
2696 * Do an inefficient scan of the directory represented by ncp looking for
2697 * the directory vnode dvp. ncp must be held but not locked on entry and
2698 * will be held on return. dvp must be refd but not locked on entry and
2699 * will remain refd on return.
2701 * Why do this at all? Well, due to its stateless nature the NFS server
2702 * converts file handles directly to vnodes without necessarily going through
2703 * the namecache ops that would otherwise create the namecache topology
2704 * leading to the vnode. We could either (1) Change the namecache algorithms
2705 * to allow disconnect namecache records that are re-merged opportunistically,
2706 * or (2) Make the NFS server backtrack and scan to recover a connected
2707 * namecache topology in order to then be able to issue new API lookups.
2709 * It turns out that (1) is a huge mess. It takes a nice clean set of
2710 * namecache algorithms and introduces a lot of complication in every subsystem
2711 * that calls into the namecache to deal with the re-merge case, especially
2712 * since we are using the namecache to placehold negative lookups and the
2713 * vnode might not be immediately assigned. (2) is certainly far less
2714 * efficient then (1), but since we are only talking about directories here
2715 * (which are likely to remain cached), the case does not actually run all
2716 * that often and has the supreme advantage of not polluting the namecache
2719 * If a fakename is supplied just construct a namecache entry using the
2723 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
2724 struct vnode *dvp, char *fakename)
2726 struct nlcomponent nlc;
2727 struct nchandle rncp;
2739 vat.va_blocksize = 0;
2740 if ((error = VOP_GETATTR(dvp, &vat)) != 0)
2743 error = cache_vref(nch, cred, &pvp);
2747 if (ncvp_debug & 1) {
2748 kprintf("inefficient_scan of (%p,%s): directory iosize %ld "
2749 "vattr fileid = %lld\n",
2750 nch->ncp, nch->ncp->nc_name,
2752 (long long)vat.va_fileid);
2756 * Use the supplied fakename if not NULL. Fake names are typically
2757 * not in the actual filesystem hierarchy. This is used by HAMMER
2758 * to glue @@timestamp recursions together.
2761 nlc.nlc_nameptr = fakename;
2762 nlc.nlc_namelen = strlen(fakename);
2763 rncp = cache_nlookup(nch, &nlc);
2767 if ((blksize = vat.va_blocksize) == 0)
2768 blksize = DEV_BSIZE;
2769 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK);
2775 iov.iov_base = rbuf;
2776 iov.iov_len = blksize;
2779 uio.uio_resid = blksize;
2780 uio.uio_segflg = UIO_SYSSPACE;
2781 uio.uio_rw = UIO_READ;
2782 uio.uio_td = curthread;
2785 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
2786 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
2788 den = (struct dirent *)rbuf;
2789 bytes = blksize - uio.uio_resid;
2792 if (ncvp_debug & 2) {
2793 kprintf("cache_inefficient_scan: %*.*s\n",
2794 den->d_namlen, den->d_namlen,
2797 if (den->d_type != DT_WHT &&
2798 den->d_ino == vat.va_fileid) {
2799 if (ncvp_debug & 1) {
2800 kprintf("cache_inefficient_scan: "
2801 "MATCHED inode %lld path %s/%*.*s\n",
2802 (long long)vat.va_fileid,
2804 den->d_namlen, den->d_namlen,
2807 nlc.nlc_nameptr = den->d_name;
2808 nlc.nlc_namelen = den->d_namlen;
2809 rncp = cache_nlookup(nch, &nlc);
2810 KKASSERT(rncp.ncp != NULL);
2813 bytes -= _DIRENT_DIRSIZ(den);
2814 den = _DIRENT_NEXT(den);
2816 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
2819 kfree(rbuf, M_TEMP);
2823 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) {
2824 _cache_setvp(rncp.mount, rncp.ncp, dvp, 1);
2825 if (ncvp_debug & 2) {
2826 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
2827 nch->ncp->nc_name, rncp.ncp->nc_name, dvp);
2830 if (ncvp_debug & 2) {
2831 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
2832 nch->ncp->nc_name, rncp.ncp->nc_name, dvp,
2836 if (rncp.ncp->nc_vp == NULL)
2837 error = rncp.ncp->nc_error;
2839 * Release rncp after a successful nlookup. rncp was fully
2844 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
2845 dvp, nch->ncp->nc_name);
2852 * This function must be called with the ncp held and locked and will unlock
2853 * and drop it during zapping.
2855 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
2856 * state, which disassociates it from its vnode or pcpu_ncache[n].neg_list
2857 * and removes the related reference. If the ncp can be removed, and the
2858 * parent can be zapped non-blocking, this function loops up.
2860 * There will be one ref from the caller (which we now own). The only
2861 * remaining autonomous refs to the ncp will then be due to nc_parent->nc_list,
2862 * so possibly 2 refs left. Taking this into account, if there are no
2863 * additional refs and no children, the ncp will be removed from the topology
2866 * References and/or children may exist if the ncp is in the middle of the
2867 * topology, preventing the ncp from being destroyed.
2869 * If nonblock is non-zero and the parent ncp cannot be locked we give up.
2871 * This function may return a held (but NOT locked) parent node which the
2872 * caller must drop in a loop. Looping is one way to avoid unbounded recursion
2873 * due to deep namecache trees.
2875 * WARNING! For MPSAFE operation this routine must acquire up to three
2876 * spin locks to be able to safely test nc_refs. Lock order is
2879 * hash spinlock if on hash list
2880 * parent spinlock if child of parent
2881 * (the ncp is unresolved so there is no vnode association)
2884 cache_zap(struct namecache *ncp)
2886 struct namecache *par;
2887 struct nchash_head *nchpp;
2889 int nonblock = 1; /* XXX cleanup */
2894 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
2895 * This gets rid of any vp->v_namecache list or negative list and
2898 _cache_setunresolved(ncp, 1);
2901 * Try to scrap the entry and possibly tail-recurse on its parent.
2902 * We only scrap unref'd (other then our ref) unresolved entries,
2903 * we do not scrap 'live' entries.
2905 * If nc_parent is non NULL we expect 2 references, else just 1.
2906 * If there are more, someone else also holds the ncp and we cannot
2909 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
2910 KKASSERT(ncp->nc_refs > 0);
2913 * If the ncp is linked to its parent it will also be in the hash
2914 * table. We have to be able to lock the parent and the hash table.
2916 * Acquire locks. Note that the parent can't go away while we hold
2917 * a child locked. If nc_parent is present, expect 2 refs instead
2921 if ((par = ncp->nc_parent) != NULL) {
2923 if (_cache_lock_nonblock(par)) {
2925 ncp->nc_flag |= NCF_DEFEREDZAP;
2927 &pcpu_ncache[mycpu->gd_cpuid].numdefered,
2930 _cache_drop(ncp); /* caller's ref */
2938 nchpp = ncp->nc_head;
2939 spin_lock(&nchpp->spin);
2943 * With the parent and nchpp locked, and the vnode removed
2944 * (no vp->v_namecache), we expect 1 or 2 refs. If there are
2945 * more someone else has a ref and we cannot zap the entry.
2948 * one for our parent link (parent also has one from the linkage)
2956 * On failure undo the work we've done so far and drop the
2957 * caller's ref and ncp.
2959 if (ncp->nc_refs != refcmp || TAILQ_FIRST(&ncp->nc_list)) {
2961 spin_unlock(&nchpp->spin);
2970 * We own all the refs and with the spinlocks held no further
2971 * refs can be acquired by others.
2973 * Remove us from the hash list and parent list. We have to
2974 * drop a ref on the parent's vp if the parent's list becomes
2978 KKASSERT(nchpp == ncp->nc_head);
2979 _cache_unlink_parent(par, ncp, nchpp); /* eats nhcpp */
2980 /*_cache_unlock(par);*/
2981 /* &nchpp->spin is unlocked by call */
2983 KKASSERT(ncp->nc_head == NULL);
2987 * ncp should not have picked up any refs. Physically
2990 if (ncp->nc_refs != refcmp) {
2991 panic("cache_zap: %p bad refs %d (expected %d)\n",
2992 ncp, ncp->nc_refs, refcmp);
2994 /* _cache_unlock(ncp) not required */
2995 ncp->nc_refs = -1; /* safety */
2997 kfree(ncp->nc_name, M_VFSCACHEAUX);
2998 kfree_obj(ncp, M_VFSCACHE);
3002 * Loop up if we can recursively clean out the parent.
3005 refcmp = 1; /* ref on parent */
3006 if (par->nc_parent) /* par->par */
3008 par->nc_flag &= ~NCF_DEFEREDZAP;
3009 if ((par->nc_flag & NCF_UNRESOLVED) &&
3010 par->nc_refs == refcmp &&
3011 TAILQ_EMPTY(&par->nc_list))
3023 * Clean up dangling negative cache and defered-drop entries in the
3026 * This routine is called in the critical path and also called from
3027 * vnlru(). When called from vnlru we use a lower limit to try to
3028 * deal with the negative cache before the critical path has to start
3031 typedef enum { CHI_LOW, CHI_HIGH } cache_hs_t;
3033 static cache_hs_t neg_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW };
3034 static cache_hs_t pos_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW };
3035 static cache_hs_t exc_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW };
3038 cache_hysteresis(int critpath)
3050 * Calculate negative ncp limit
3052 neglimit = maxvnodes / ncnegfactor;
3054 neglimit = neglimit * 8 / 10;
3057 * Don't cache too many negative hits. We use hysteresis to reduce
3058 * the impact on the critical path.
3062 switch(neg_cache_hysteresis_state[critpath]) {
3064 if (vfscache_negs > MINNEG && vfscache_negs > neglimit) {
3066 clean_neg = ncnegflush;
3068 clean_neg = ncnegflush +
3069 vfscache_negs - neglimit;
3070 neg_cache_hysteresis_state[critpath] = CHI_HIGH;
3074 if (vfscache_negs > MINNEG * 9 / 10 &&
3075 vfscache_negs * 9 / 10 > neglimit
3078 clean_neg = ncnegflush;
3080 clean_neg = ncnegflush +
3081 vfscache_negs * 9 / 10 -
3084 neg_cache_hysteresis_state[critpath] = CHI_LOW;
3089 _cache_cleanneg(clean_neg);
3092 * Don't cache too many unresolved elements. We use hysteresis to
3093 * reduce the impact on the critical path.
3095 if ((poslimit = ncposlimit) == 0)
3096 poslimit = maxvnodes / ncposfactor;
3098 poslimit = poslimit * 8 / 10;
3101 * Number of unresolved leaf elements in the namecache. These
3102 * can build-up for various reasons and may have to be disposed
3103 * of to allow the inactive list to be cleaned out by vnlru_proc()
3107 xnumunres = vfscache_unres;
3110 switch(pos_cache_hysteresis_state[critpath]) {
3112 if (xnumunres > poslimit && xnumunres > MINPOS) {
3114 clean_unres = ncposflush;
3116 clean_unres = ncposflush + xnumunres -
3118 pos_cache_hysteresis_state[critpath] = CHI_HIGH;
3122 if (xnumunres > poslimit * 5 / 6 && xnumunres > MINPOS) {
3124 clean_unres = ncposflush;
3126 clean_unres = ncposflush + xnumunres -
3129 pos_cache_hysteresis_state[critpath] = CHI_LOW;
3135 * Excessive positive hits can accumulate due to large numbers of
3136 * hardlinks (the vnode cache will not prevent ncps representing
3137 * hardlinks from growing into infinity).
3139 exclimit = maxvnodes * 2;
3141 exclimit = exclimit * 8 / 10;
3142 xnumleafs = vfscache_leafs;
3145 switch(exc_cache_hysteresis_state[critpath]) {
3147 if (xnumleafs > exclimit && xnumleafs > MINPOS) {
3149 clean_excess = ncposflush;
3151 clean_excess = ncposflush + xnumleafs -
3153 exc_cache_hysteresis_state[critpath] = CHI_HIGH;
3157 if (xnumleafs > exclimit * 5 / 6 && xnumleafs > MINPOS) {
3159 clean_excess = ncposflush;
3161 clean_excess = ncposflush + xnumleafs -
3164 exc_cache_hysteresis_state[critpath] = CHI_LOW;
3169 if (clean_unres || clean_excess)
3170 _cache_cleanpos(clean_unres, clean_excess);
3173 * Clean out dangling defered-zap ncps which could not be cleanly
3174 * dropped if too many build up. Note that numdefered is
3175 * heuristical. Make sure we are real-time for the current cpu,
3176 * plus the global rollup.
3178 if (pcpu_ncache[mycpu->gd_cpuid].numdefered + numdefered > neglimit) {
3179 _cache_cleandefered();
3184 * NEW NAMECACHE LOOKUP API
3186 * Lookup an entry in the namecache. The passed par_nch must be referenced
3187 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp
3188 * is ALWAYS returned, eve if the supplied component is illegal.
3190 * The resulting namecache entry should be returned to the system with
3191 * cache_put() or cache_unlock() + cache_drop().
3193 * namecache locks are recursive but care must be taken to avoid lock order
3194 * reversals (hence why the passed par_nch must be unlocked). Locking
3195 * rules are to order for parent traversals, not for child traversals.
3197 * Nobody else will be able to manipulate the associated namespace (e.g.
3198 * create, delete, rename, rename-target) until the caller unlocks the
3201 * The returned entry will be in one of three states: positive hit (non-null
3202 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
3203 * Unresolved entries must be resolved through the filesystem to associate the
3204 * vnode and/or determine whether a positive or negative hit has occured.
3206 * It is not necessary to lock a directory in order to lock namespace under
3207 * that directory. In fact, it is explicitly not allowed to do that. A
3208 * directory is typically only locked when being created, renamed, or
3211 * The directory (par) may be unresolved, in which case any returned child
3212 * will likely also be marked unresolved. Likely but not guarenteed. Since
3213 * the filesystem lookup requires a resolved directory vnode the caller is
3214 * responsible for resolving the namecache chain top-down. This API
3215 * specifically allows whole chains to be created in an unresolved state.
3218 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc)
3220 struct nchandle nch;
3221 struct namecache *ncp;
3222 struct namecache *new_ncp;
3223 struct namecache *rep_ncp; /* reuse a destroyed ncp */
3224 struct nchash_head *nchpp;
3232 mp = par_nch->mount;
3236 * This is a good time to call it, no ncp's are locked by
3239 cache_hysteresis(1);
3242 * Try to locate an existing entry
3244 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
3245 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
3248 nchpp = NCHHASH(hash);
3252 spin_lock(&nchpp->spin);
3254 spin_lock_shared(&nchpp->spin);
3257 * Do a reverse scan to collect any DESTROYED ncps prior to matching
3258 * an existing entry.
3260 TAILQ_FOREACH_REVERSE(ncp, &nchpp->list, nchash_list, nc_hash) {
3262 * Break out if we find a matching entry. Note that
3263 * UNRESOLVED entries may match, but DESTROYED entries
3266 * We may be able to reuse DESTROYED entries that we come
3267 * across, even if the name does not match, as long as
3268 * nc_nlen is correct and the only hold ref is from the nchpp
3271 if (ncp->nc_parent == par_nch->ncp &&
3272 ncp->nc_nlen == nlc->nlc_namelen) {
3273 if (ncp->nc_flag & NCF_DESTROYED) {
3274 if (ncp->nc_refs == 1 && rep_ncp == NULL)
3278 if (bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen))
3286 _cache_hold(rep_ncp);
3289 spin_unlock(&nchpp->spin);
3291 spin_unlock_shared(&nchpp->spin);
3294 _cache_unlock(par_nch->ncp);
3299 * Really try to destroy rep_ncp if encountered.
3300 * Various edge cases can build up more than one,
3301 * so loop if we succeed. This isn't perfect, but
3302 * we can't afford to have tons of entries build
3303 * up on a single nhcpp list due to rename-over
3304 * operations. If that were to happen, the system
3305 * would bog down quickly.
3308 if (_cache_lock_nonblock(rep_ncp) == 0) {
3309 if (rep_ncp->nc_flag & NCF_DESTROYED) {
3310 if (cache_zap(rep_ncp)) {
3315 _cache_unlock(rep_ncp);
3316 _cache_drop(rep_ncp);
3319 _cache_drop(rep_ncp);
3324 * Continue processing the matched entry
3326 if (_cache_lock_special(ncp) == 0) {
3328 * Successfully locked but we must re-test
3329 * conditions that might have changed since
3330 * we did not have the lock before.
3332 if (ncp->nc_parent != par_nch->ncp ||
3333 ncp->nc_nlen != nlc->nlc_namelen ||
3334 bcmp(ncp->nc_name, nlc->nlc_nameptr,
3336 (ncp->nc_flag & NCF_DESTROYED)) {
3340 _cache_auto_unresolve(mp, ncp);
3342 _cache_free(new_ncp);
3343 new_ncp = NULL; /* safety */
3347 _cache_get(ncp); /* cycle the lock to block */
3355 * We failed to locate the entry, try to resurrect a destroyed
3356 * entry that we did find that is already correctly linked into
3357 * nchpp and the parent. We must re-test conditions after
3358 * successfully locking rep_ncp.
3360 * This case can occur under heavy loads due to not being able
3361 * to safely lock the parent in cache_zap(). Nominally a repeated
3362 * create/unlink load, but only the namelen needs to match.
3364 * An exclusive lock on the nchpp is required to process this case,
3365 * otherwise a race can cause duplicate entries to be created with
3366 * one cpu reusing a DESTROYED ncp while another creates a new_ncp.
3368 if (rep_ncp && use_excl) {
3369 if (_cache_lock_nonblock(rep_ncp) == 0) {
3370 _cache_hold(rep_ncp);
3371 if (rep_ncp->nc_parent == par_nch->ncp &&
3372 rep_ncp->nc_nlen == nlc->nlc_namelen &&
3373 (rep_ncp->nc_flag & NCF_DESTROYED) &&
3374 rep_ncp->nc_refs == 2)
3381 _cache_ncp_gen_enter(ncp);
3383 bcopy(nlc->nlc_nameptr, ncp->nc_name,
3387 * This takes some care. We must clear the
3388 * NCF_DESTROYED flag before unlocking the
3389 * hash chain so other concurrent searches
3390 * do not skip this element.
3392 * We must also unlock the hash chain before
3393 * unresolving the ncp to avoid deadlocks.
3394 * We hold the lock on the ncp so we can safely
3395 * reinitialize nc_flag after that.
3397 ncp->nc_flag &= ~NCF_DESTROYED;
3398 spin_unlock(&nchpp->spin); /* use_excl */
3400 _cache_setunresolved(ncp, 0);
3401 ncp->nc_flag = NCF_UNRESOLVED;
3402 ncp->nc_error = ENOTCONN;
3404 _cache_ncp_gen_exit(ncp);
3407 _cache_unlock(par_nch->ncp);
3411 _cache_free(new_ncp);
3412 new_ncp = NULL; /* safety */
3416 _cache_put(rep_ncp);
3421 * Otherwise create a new entry and add it to the cache. The parent
3422 * ncp must also be locked so we can link into it.
3424 * We have to relookup after possibly blocking in kmalloc or
3425 * when locking par_nch.
3427 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
3428 * mount case, in which case nc_name will be NULL.
3430 * NOTE: In the rep_ncp != NULL case we are trying to reuse
3431 * a DESTROYED entry, but didn't have an exclusive lock.
3432 * In this situation we do not create a new_ncp.
3434 if (new_ncp == NULL) {
3436 spin_unlock(&nchpp->spin);
3438 spin_unlock_shared(&nchpp->spin);
3439 if (rep_ncp == NULL) {
3440 new_ncp = cache_alloc(nlc->nlc_namelen);
3441 if (nlc->nlc_namelen) {
3442 bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
3444 new_ncp->nc_name[nlc->nlc_namelen] = 0;
3452 * NOTE! The spinlock is held exclusively here because new_ncp
3455 if (par_locked == 0) {
3456 spin_unlock(&nchpp->spin);
3457 _cache_lock(par_nch->ncp);
3463 * Link to parent (requires another ref, the one already in new_ncp
3464 * is what we wil lreturn).
3466 * WARNING! We still hold the spinlock. We have to set the hash
3467 * table entry atomically.
3471 _cache_link_parent(ncp, par_nch->ncp, nchpp);
3472 spin_unlock(&nchpp->spin);
3473 _cache_unlock(par_nch->ncp);
3474 /* par_locked = 0 - not used */
3477 * stats and namecache size management
3479 if (ncp->nc_flag & NCF_UNRESOLVED)
3480 ++gd->gd_nchstats->ncs_miss;
3481 else if (ncp->nc_vp)
3482 ++gd->gd_nchstats->ncs_goodhits;
3484 ++gd->gd_nchstats->ncs_neghits;
3487 _cache_mntref(nch.mount);
3493 * Attempt to lookup a namecache entry and return with a shared namecache
3494 * lock. This operates non-blocking. EWOULDBLOCK is returned if excl is
3495 * set or we are unable to lock.
3498 cache_nlookup_maybe_shared(struct nchandle *par_nch,
3499 struct nlcomponent *nlc,
3500 int excl, struct nchandle *res_nch)
3502 struct namecache *ncp;
3503 struct nchash_head *nchpp;
3509 * If exclusive requested or shared namecache locks are disabled,
3512 if (ncp_shared_lock_disable || excl)
3513 return(EWOULDBLOCK);
3516 mp = par_nch->mount;
3519 * This is a good time to call it, no ncp's are locked by
3522 cache_hysteresis(1);
3525 * Try to locate an existing entry
3527 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
3528 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
3529 nchpp = NCHHASH(hash);
3531 spin_lock_shared(&nchpp->spin);
3533 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) {
3535 * Break out if we find a matching entry. Note that
3536 * UNRESOLVED entries may match, but DESTROYED entries
3539 if (ncp->nc_parent == par_nch->ncp &&
3540 ncp->nc_nlen == nlc->nlc_namelen &&
3541 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
3542 (ncp->nc_flag & NCF_DESTROYED) == 0
3545 spin_unlock_shared(&nchpp->spin);
3547 if (_cache_lock_shared_special(ncp) == 0) {
3548 if (ncp->nc_parent == par_nch->ncp &&
3549 ncp->nc_nlen == nlc->nlc_namelen &&
3550 bcmp(ncp->nc_name, nlc->nlc_nameptr,
3551 ncp->nc_nlen) == 0 &&
3552 (ncp->nc_flag & NCF_DESTROYED) == 0 &&
3553 (ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
3554 _cache_auto_unresolve_test(mp, ncp) == 0)
3561 return(EWOULDBLOCK);
3568 spin_unlock_shared(&nchpp->spin);
3569 return(EWOULDBLOCK);
3574 * Note that nc_error might be non-zero (e.g ENOENT).
3577 res_nch->mount = mp;
3579 ++gd->gd_nchstats->ncs_goodhits;
3580 _cache_mntref(res_nch->mount);
3582 KKASSERT(ncp->nc_error != EWOULDBLOCK);
3583 return(ncp->nc_error);
3587 * This is a non-blocking verison of cache_nlookup() used by
3588 * nfs_readdirplusrpc_uio(). It can fail for any reason and
3589 * will return nch.ncp == NULL in that case.
3592 cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc)
3594 struct nchandle nch;
3595 struct namecache *ncp;
3596 struct namecache *new_ncp;
3597 struct nchash_head *nchpp;
3604 mp = par_nch->mount;
3608 * Try to locate an existing entry
3610 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
3611 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
3613 nchpp = NCHHASH(hash);
3615 spin_lock(&nchpp->spin);
3616 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) {
3618 * Break out if we find a matching entry. Note that
3619 * UNRESOLVED entries may match, but DESTROYED entries
3622 if (ncp->nc_parent == par_nch->ncp &&
3623 ncp->nc_nlen == nlc->nlc_namelen &&
3624 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
3625 (ncp->nc_flag & NCF_DESTROYED) == 0
3628 spin_unlock(&nchpp->spin);
3630 _cache_unlock(par_nch->ncp);
3633 if (_cache_lock_special(ncp) == 0) {
3634 if (ncp->nc_parent != par_nch->ncp ||
3635 ncp->nc_nlen != nlc->nlc_namelen ||
3636 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) ||
3637 (ncp->nc_flag & NCF_DESTROYED)) {
3638 kprintf("cache_lookup_nonblock: "
3639 "ncp-race %p %*.*s\n",
3648 _cache_auto_unresolve(mp, ncp);
3650 _cache_free(new_ncp);
3661 * We failed to locate an entry, create a new entry and add it to
3662 * the cache. The parent ncp must also be locked so we
3665 * We have to relookup after possibly blocking in kmalloc or
3666 * when locking par_nch.
3668 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
3669 * mount case, in which case nc_name will be NULL.
3671 if (new_ncp == NULL) {
3672 spin_unlock(&nchpp->spin);
3673 new_ncp = cache_alloc(nlc->nlc_namelen);
3674 if (nlc->nlc_namelen) {
3675 bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
3677 new_ncp->nc_name[nlc->nlc_namelen] = 0;
3681 if (par_locked == 0) {
3682 spin_unlock(&nchpp->spin);
3683 if (_cache_lock_nonblock(par_nch->ncp) == 0) {
3691 * Link to parent (requires another ref, the one already in new_ncp
3692 * is what we wil lreturn).
3694 * WARNING! We still hold the spinlock. We have to set the hash
3695 * table entry atomically.
3699 _cache_link_parent(ncp, par_nch->ncp, nchpp);
3700 spin_unlock(&nchpp->spin);
3701 _cache_unlock(par_nch->ncp);
3702 /* par_locked = 0 - not used */
3705 * stats and namecache size management
3707 if (ncp->nc_flag & NCF_UNRESOLVED)
3708 ++gd->gd_nchstats->ncs_miss;
3709 else if (ncp->nc_vp)
3710 ++gd->gd_nchstats->ncs_goodhits;
3712 ++gd->gd_nchstats->ncs_neghits;
3715 _cache_mntref(nch.mount);
3720 _cache_free(new_ncp);
3729 * This is a non-locking optimized lookup that depends on adding a ref
3730 * to prevent normal eviction. nch.ncp can be returned as NULL for any
3731 * reason and the caller will retry with normal locking in that case.
3733 * This function only returns resolved entries so callers do not accidentally
3734 * race doing out of order / unfenced field checks.
3736 * The caller must validate the result for parent-to-child continuity.
3739 cache_nlookup_nonlocked(struct nchandle *par_nch, struct nlcomponent *nlc)
3741 struct nchandle nch;
3742 struct namecache *ncp;
3743 struct nchash_head *nchpp;
3749 mp = par_nch->mount;
3752 * Try to locate an existing entry
3754 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
3755 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
3756 nchpp = NCHHASH(hash);
3758 spin_lock_shared(&nchpp->spin);
3759 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) {
3761 * Break out if we find a matching entry. Note that
3762 * UNRESOLVED entries may match, but DESTROYED entries
3763 * do not. However, UNRESOLVED entries still return failure.
3765 if (ncp->nc_parent == par_nch->ncp &&
3766 ncp->nc_nlen == nlc->nlc_namelen &&
3767 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
3768 (ncp->nc_flag & NCF_DESTROYED) == 0
3771 * Test NFS timeout for auto-unresolve. Give up if
3772 * the entry is not resolved.
3774 * Getting the ref with the nchpp locked prevents
3775 * any transition to NCF_DESTROYED.
3777 if (_cache_auto_unresolve_test(par_nch->mount, ncp))
3779 if (ncp->nc_flag & NCF_UNRESOLVED)
3782 spin_unlock_shared(&nchpp->spin);
3785 * We need an additional test to ensure that the ref
3786 * we got above prevents transitions to NCF_UNRESOLVED.
3787 * This can occur if another thread is currently
3788 * holding the ncp exclusively locked or (if we raced
3789 * that and it unlocked before our test) the flag
3792 * XXX check if superceeded by nc_generation XXX
3794 if (_cache_lockstatus(ncp) < 0 ||
3795 (ncp->nc_flag & (NCF_DESTROYED | NCF_UNRESOLVED)))
3797 if ((ncvp_debug & 4) &&
3799 (NCF_DESTROYED | NCF_UNRESOLVED)))
3801 kprintf("ncp state change: %p %08x %d %s\n",
3802 ncp, ncp->nc_flag, ncp->nc_error,
3806 spin_lock_shared(&nchpp->spin);
3811 * Return the ncp bundled into a nch on success.
3812 * The ref should passively prevent the ncp from
3813 * becoming unresolved without having to hold a lock.
3814 * (XXX this may not be entirely true)
3819 spin_unlock_shared(&nchpp->spin);
3826 * stats and namecache size management
3828 if (ncp->nc_flag & NCF_UNRESOLVED)
3829 ++gd->gd_nchstats->ncs_miss;
3830 else if (ncp->nc_vp)
3831 ++gd->gd_nchstats->ncs_goodhits;
3833 ++gd->gd_nchstats->ncs_neghits;
3836 _cache_mntref(nch.mount);
3842 * The namecache entry is marked as being used as a mount point.
3843 * Locate the mount if it is visible to the caller. The DragonFly
3844 * mount system allows arbitrary loops in the topology and disentangles
3845 * those loops by matching against (mp, ncp) rather than just (ncp).
3846 * This means any given ncp can dive any number of mounts, depending
3847 * on the relative mount (e.g. nullfs) the caller is at in the topology.
3849 * We use a very simple frontend cache to reduce SMP conflicts,
3850 * which we have to do because the mountlist scan needs an exclusive
3851 * lock around its ripout info list. Not to mention that there might
3852 * be a lot of mounts.
3854 * Because all mounts can potentially be accessed by all cpus, break the cpu's
3855 * down a bit to allow some contention rather than making the cache
3858 * The hash table is split into per-cpu areas, is 4-way set-associative.
3860 struct findmount_info {
3861 struct mount *result;
3862 struct mount *nch_mount;
3863 struct namecache *nch_ncp;
3867 struct ncmount_cache *
3868 ncmount_cache_lookup4(struct mount *mp, struct namecache *ncp)
3872 hash = iscsi_crc32(&mp, sizeof(mp));
3873 hash = iscsi_crc32_ext(&ncp, sizeof(ncp), hash);
3875 hash = hash & ((NCMOUNT_NUMCACHE - 1) & ~(NCMOUNT_SET - 1));
3877 return (&ncmount_cache[hash]);
3881 struct ncmount_cache *
3882 ncmount_cache_lookup(struct mount *mp, struct namecache *ncp)
3884 struct ncmount_cache *ncc;
3885 struct ncmount_cache *best;
3890 ncc = ncmount_cache_lookup4(mp, ncp);
3893 * NOTE: When checking for a ticks overflow implement a slop of
3894 * 2 ticks just to be safe, because ticks is accessed
3895 * non-atomically one CPU can increment it while another
3896 * is still using the old value.
3898 if (ncc->ncp == ncp && ncc->mp == mp) /* 0 */
3900 delta = (int)(ticks - ncc->ticks); /* beware GCC opts */
3901 if (delta < -2) /* overflow reset */
3906 for (i = 1; i < NCMOUNT_SET; ++i) { /* 1, 2, 3 */
3908 if (ncc->ncp == ncp && ncc->mp == mp)
3910 delta = (int)(ticks - ncc->ticks);
3913 if (delta > best_delta) {
3922 * pcpu-optimized mount search. Locate the recursive mountpoint, avoid
3923 * doing an expensive mountlist_scan*() if possible.
3925 * (mp, ncp) -> mountonpt.k
3927 * Returns a referenced mount pointer or NULL
3929 * General SMP operation uses a per-cpu umount_spin to interlock unmount
3930 * operations (that is, where the mp_target can be freed out from under us).
3932 * Lookups use the ncc->updating counter to validate the contents in order
3933 * to avoid having to obtain the per cache-element spin-lock. In addition,
3934 * the ticks field is only updated when it changes. However, if our per-cpu
3935 * lock fails due to an unmount-in-progress, we fall-back to the
3936 * cache-element's spin-lock.
3939 cache_findmount(struct nchandle *nch)
3941 struct findmount_info info;
3942 struct ncmount_cache *ncc;
3943 struct ncmount_cache ncc_copy;
3944 struct mount *target;
3945 struct pcpu_ncache *pcpu;
3946 struct spinlock *spinlk;
3950 if (ncmount_cache_enable == 0 || pcpu == NULL) {
3954 pcpu += mycpu->gd_cpuid;
3957 ncc = ncmount_cache_lookup(nch->mount, nch->ncp);
3958 if (ncc->ncp == nch->ncp && ncc->mp == nch->mount) {
3961 * This is a bit messy for now because we do not yet have
3962 * safe disposal of mount structures. We have to ref
3963 * ncc->mp_target but the 'update' counter only tell us
3964 * whether the cache has changed after the fact.
3966 * For now get a per-cpu spinlock that will only contend
3967 * against umount's. This is the best path. If it fails,
3968 * instead of waiting on the umount we fall-back to a
3969 * shared ncc->spin lock, which will generally only cost a
3972 update = ncc->updating;
3973 if (__predict_true(spin_trylock(&pcpu->umount_spin))) {
3974 spinlk = &pcpu->umount_spin;
3976 spinlk = &ncc->spin;
3977 spin_lock_shared(spinlk);
3979 if (update & 1) { /* update in progress */
3980 spin_unlock_any(spinlk);
3985 if (ncc->updating != update) { /* content changed */
3986 spin_unlock_any(spinlk);
3989 if (ncc_copy.ncp != nch->ncp || ncc_copy.mp != nch->mount) {
3990 spin_unlock_any(spinlk);
3993 if (ncc_copy.isneg == 0) {
3994 target = ncc_copy.mp_target;
3995 if (target->mnt_ncmounton.mount == nch->mount &&
3996 target->mnt_ncmounton.ncp == nch->ncp) {
3998 * Cache hit (positive) (avoid dirtying
3999 * the cache line if possible)
4001 if (ncc->ticks != (int)ticks)
4002 ncc->ticks = (int)ticks;
4003 _cache_mntref(target);
4007 * Cache hit (negative) (avoid dirtying
4008 * the cache line if possible)
4010 if (ncc->ticks != (int)ticks)
4011 ncc->ticks = (int)ticks;
4014 spin_unlock_any(spinlk);
4024 info.nch_mount = nch->mount;
4025 info.nch_ncp = nch->ncp;
4026 mountlist_scan(cache_findmount_callback, &info,
4027 MNTSCAN_FORWARD | MNTSCAN_NOBUSY | MNTSCAN_NOUNLOCK);
4030 * To reduce multi-re-entry on the cache, relookup in the cache.
4031 * This can still race, obviously, but that's ok.
4033 ncc = ncmount_cache_lookup(nch->mount, nch->ncp);
4034 if (ncc->ncp == nch->ncp && ncc->mp == nch->mount) {
4036 atomic_add_int(&info.result->mnt_refs, -1);
4043 if ((info.result == NULL ||
4044 (info.result->mnt_kern_flag & MNTK_UNMOUNT) == 0)) {
4045 spin_lock(&ncc->spin);
4046 atomic_add_int_nonlocked(&ncc->updating, 1);
4048 KKASSERT(ncc->updating & 1);
4049 if (ncc->mp != nch->mount) {
4051 atomic_add_int(&ncc->mp->mnt_refs, -1);
4052 atomic_add_int(&nch->mount->mnt_refs, 1);
4053 ncc->mp = nch->mount;
4055 ncc->ncp = nch->ncp; /* ptr compares only, not refd*/
4056 ncc->ticks = (int)ticks;
4060 if (ncc->mp_target != info.result) {
4062 atomic_add_int(&ncc->mp_target->mnt_refs, -1);
4063 ncc->mp_target = info.result;
4064 atomic_add_int(&info.result->mnt_refs, 1);
4068 if (ncc->mp_target) {
4069 atomic_add_int(&ncc->mp_target->mnt_refs, -1);
4070 ncc->mp_target = NULL;
4074 atomic_add_int_nonlocked(&ncc->updating, 1);
4075 spin_unlock(&ncc->spin);
4077 return(info.result);
4082 cache_findmount_callback(struct mount *mp, void *data)
4084 struct findmount_info *info = data;
4087 * Check the mount's mounted-on point against the passed nch.
4089 if (mp->mnt_ncmounton.mount == info->nch_mount &&
4090 mp->mnt_ncmounton.ncp == info->nch_ncp
4100 cache_dropmount(struct mount *mp)
4106 * mp is being mounted, scrap entries matching mp->mnt_ncmounton (positive
4109 * A full scan is not required, but for now just do it anyway.
4112 cache_ismounting(struct mount *mp)
4114 struct ncmount_cache *ncc;
4115 struct mount *ncc_mp;
4118 if (pcpu_ncache == NULL)
4121 for (i = 0; i < NCMOUNT_NUMCACHE; ++i) {
4122 ncc = &ncmount_cache[i];
4123 if (ncc->mp != mp->mnt_ncmounton.mount ||
4124 ncc->ncp != mp->mnt_ncmounton.ncp) {
4127 spin_lock(&ncc->spin);
4128 atomic_add_int_nonlocked(&ncc->updating, 1);
4130 KKASSERT(ncc->updating & 1);
4131 if (ncc->mp != mp->mnt_ncmounton.mount ||
4132 ncc->ncp != mp->mnt_ncmounton.ncp) {
4135 spin_unlock(&ncc->spin);
4142 atomic_add_int(&ncc_mp->mnt_refs, -1);
4143 ncc_mp = ncc->mp_target;
4144 ncc->mp_target = NULL;
4146 atomic_add_int(&ncc_mp->mnt_refs, -1);
4147 ncc->ticks = (int)ticks - hz * 120;
4150 atomic_add_int_nonlocked(&ncc->updating, 1);
4151 spin_unlock(&ncc->spin);
4155 * Pre-cache the mount point
4157 ncc = ncmount_cache_lookup(mp->mnt_ncmounton.mount,
4158 mp->mnt_ncmounton.ncp);
4160 spin_lock(&ncc->spin);
4161 atomic_add_int_nonlocked(&ncc->updating, 1);
4163 KKASSERT(ncc->updating & 1);
4166 atomic_add_int(&ncc->mp->mnt_refs, -1);
4167 atomic_add_int(&mp->mnt_ncmounton.mount->mnt_refs, 1);
4168 ncc->mp = mp->mnt_ncmounton.mount;
4169 ncc->ncp = mp->mnt_ncmounton.ncp; /* ptr compares only */
4170 ncc->ticks = (int)ticks;
4173 if (ncc->mp_target != mp) {
4175 atomic_add_int(&ncc->mp_target->mnt_refs, -1);
4176 ncc->mp_target = mp;
4177 atomic_add_int(&mp->mnt_refs, 1);
4180 atomic_add_int_nonlocked(&ncc->updating, 1);
4181 spin_unlock(&ncc->spin);
4185 * Scrap any ncmount_cache entries related to mp. Not only do we need to
4186 * scrap entries matching mp->mnt_ncmounton, but we also need to scrap any
4187 * negative hits involving (mp, <any>).
4189 * A full scan is required.
4192 cache_unmounting(struct mount *mp)
4194 struct ncmount_cache *ncc;
4195 struct pcpu_ncache *pcpu;
4196 struct mount *ncc_mp;
4203 for (i = 0; i < ncpus; ++i)
4204 spin_lock(&pcpu[i].umount_spin);
4206 for (i = 0; i < NCMOUNT_NUMCACHE; ++i) {
4207 ncc = &ncmount_cache[i];
4208 if (ncc->mp != mp && ncc->mp_target != mp)
4210 spin_lock(&ncc->spin);
4211 atomic_add_int_nonlocked(&ncc->updating, 1);
4214 if (ncc->mp != mp && ncc->mp_target != mp) {
4215 atomic_add_int_nonlocked(&ncc->updating, 1);
4217 spin_unlock(&ncc->spin);
4224 atomic_add_int(&ncc_mp->mnt_refs, -1);
4225 ncc_mp = ncc->mp_target;
4226 ncc->mp_target = NULL;
4228 atomic_add_int(&ncc_mp->mnt_refs, -1);
4229 ncc->ticks = (int)ticks - hz * 120;
4232 atomic_add_int_nonlocked(&ncc->updating, 1);
4233 spin_unlock(&ncc->spin);
4236 for (i = 0; i < ncpus; ++i)
4237 spin_unlock(&pcpu[i].umount_spin);
4241 * Resolve an unresolved namecache entry, generally by looking it up.
4242 * The passed ncp must be locked and refd.
4244 * Theoretically since a vnode cannot be recycled while held, and since
4245 * the nc_parent chain holds its vnode as long as children exist, the
4246 * direct parent of the cache entry we are trying to resolve should
4247 * have a valid vnode. If not then generate an error that we can
4248 * determine is related to a resolver bug.
4250 * However, if a vnode was in the middle of a recyclement when the NCP
4251 * got locked, ncp->nc_vp might point to a vnode that is about to become
4252 * invalid. cache_resolve() handles this case by unresolving the entry
4253 * and then re-resolving it.
4255 * Note that successful resolution does not necessarily return an error
4256 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
4259 * (*genp) is adjusted based on our resolution operation. If it is already
4260 * wrong, that's ok... it will still be wrong on return.
4263 cache_resolve(struct nchandle *nch, u_int *genp, struct ucred *cred)
4265 struct namecache *par_tmp;
4266 struct namecache *par;
4267 struct namecache *ncp;
4268 struct nchandle nctmp;
4275 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE);
4279 * If the ncp is already resolved we have nothing to do. However,
4280 * we do want to guarentee that a usable vnode is returned when
4281 * a vnode is present, so make sure it hasn't been reclaimed.
4283 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
4284 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) {
4285 _cache_ncp_gen_enter(ncp);
4286 _cache_setunresolved(ncp, 0);
4287 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
4288 _cache_ncp_gen_exit(ncp);
4290 return (ncp->nc_error);
4292 } else if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
4293 return (ncp->nc_error);
4295 _cache_ncp_gen_enter(ncp);
4298 _cache_ncp_gen_enter(ncp);
4300 /* in gen_enter state */
4304 * If the ncp was destroyed it will never resolve again. This
4305 * can basically only happen when someone is chdir'd into an
4306 * empty directory which is then rmdir'd. We want to catch this
4307 * here and not dive the VFS because the VFS might actually
4308 * have a way to re-resolve the disconnected ncp, which will
4309 * result in inconsistencies in the cdir/nch for proc->p_fd.
4311 if (ncp->nc_flag & NCF_DESTROYED) {
4312 _cache_ncp_gen_exit(ncp);
4317 * Mount points need special handling because the parent does not
4318 * belong to the same filesystem as the ncp.
4320 if (ncp == mp->mnt_ncmountpt.ncp) {
4321 error = cache_resolve_mp(mp, 0);
4322 _cache_ncp_gen_exit(ncp);
4327 * We expect an unbroken chain of ncps to at least the mount point,
4328 * and even all the way to root (but this code doesn't have to go
4329 * past the mount point).
4331 if (ncp->nc_parent == NULL) {
4332 kprintf("EXDEV case 1 %p %*.*s\n", ncp,
4333 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
4334 ncp->nc_error = EXDEV;
4335 _cache_ncp_gen_exit(ncp);
4336 return(ncp->nc_error);
4340 * The vp's of the parent directories in the chain are held via vhold()
4341 * due to the existance of the child, and should not disappear.
4342 * However, there are cases where they can disappear:
4344 * - due to filesystem I/O errors.
4345 * - due to NFS being stupid about tracking the namespace and
4346 * destroys the namespace for entire directories quite often.
4347 * - due to forced unmounts.
4348 * - due to an rmdir (parent will be marked DESTROYED)
4350 * When this occurs we have to track the chain backwards and resolve
4351 * it, looping until the resolver catches up to the current node. We
4352 * could recurse here but we might run ourselves out of kernel stack
4353 * so we do it in a more painful manner. This situation really should
4354 * not occur all that often, or if it does not have to go back too
4355 * many nodes to resolve the ncp.
4357 while ((dvp = cache_dvpref(ncp)) == NULL) {
4359 * This case can occur if a process is CD'd into a
4360 * directory which is then rmdir'd. If the parent is marked
4361 * destroyed there is no point trying to resolve it.
4363 if (ncp->nc_parent->nc_flag & NCF_DESTROYED) {
4364 if (ncvp_debug & 8) {
4365 kprintf("nc_parent destroyed: %s/%s\n",
4366 ncp->nc_parent->nc_name, ncp->nc_name);
4368 _cache_ncp_gen_exit(ncp);
4371 par = ncp->nc_parent;
4374 while ((par_tmp = par->nc_parent) != NULL &&
4375 par_tmp->nc_vp == NULL) {
4376 _cache_hold(par_tmp);
4377 _cache_lock(par_tmp);
4381 if (par->nc_parent == NULL) {
4382 kprintf("EXDEV case 2 %*.*s\n",
4383 par->nc_nlen, par->nc_nlen, par->nc_name);
4385 _cache_ncp_gen_exit(ncp);
4389 * The parent is not set in stone, ref and lock it to prevent
4390 * it from disappearing. Also note that due to renames it
4391 * is possible for our ncp to move and for par to no longer
4392 * be one of its parents. We resolve it anyway, the loop
4393 * will handle any moves.
4395 _cache_get(par); /* additional hold/lock */
4396 _cache_put(par); /* from earlier hold/lock */
4397 if (par == nch->mount->mnt_ncmountpt.ncp) {
4398 cache_resolve_mp(nch->mount, 0);
4399 } else if ((dvp = cache_dvpref(par)) == NULL) {
4400 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n",
4401 par->nc_nlen, par->nc_nlen, par->nc_name);
4405 if (par->nc_flag & NCF_UNRESOLVED) {
4408 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
4412 if ((error = par->nc_error) != 0) {
4413 if (par->nc_error != EAGAIN) {
4414 kprintf("EXDEV case 3 %*.*s error %d\n",
4415 par->nc_nlen, par->nc_nlen, par->nc_name,
4418 _cache_ncp_gen_exit(ncp);
4421 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
4422 par, par->nc_nlen, par->nc_nlen, par->nc_name);
4429 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
4430 * ncp's and reattach them. If this occurs the original ncp is marked
4431 * EAGAIN to force a relookup.
4433 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
4434 * ncp must already be resolved.
4439 *genp += 4; /* setvp bumps the generation */
4440 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
4443 ncp->nc_error = EPERM;
4446 if (ncp->nc_error == EAGAIN) {
4447 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
4448 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
4451 _cache_ncp_gen_exit(ncp);
4453 return(ncp->nc_error);
4457 * Resolve the ncp associated with a mount point. Such ncp's almost always
4458 * remain resolved and this routine is rarely called. NFS MPs tends to force
4459 * re-resolution more often due to its mac-truck-smash-the-namecache
4460 * method of tracking namespace changes.
4462 * The semantics for this call is that the passed ncp must be locked on
4463 * entry and will be locked on return. However, if we actually have to
4464 * resolve the mount point we temporarily unlock the entry in order to
4465 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
4466 * the unlock we have to recheck the flags after we relock.
4469 cache_resolve_mp(struct mount *mp, int adjgen)
4471 struct namecache *ncp = mp->mnt_ncmountpt.ncp;
4475 KKASSERT(mp != NULL);
4478 * If the ncp is already resolved we have nothing to do. However,
4479 * we do want to guarentee that a usable vnode is returned when
4480 * a vnode is present, so make sure it hasn't been reclaimed.
4482 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
4483 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
4484 _cache_setunresolved(ncp, adjgen);
4487 if (ncp->nc_flag & NCF_UNRESOLVED) {
4489 * ncp must be unlocked across the vfs_busy(), but
4490 * once busied lock ordering is ncp(s), then vnodes,
4491 * so we must relock the ncp before issuing the VFS_ROOT().
4494 while (vfs_busy(mp, 0))
4497 error = VFS_ROOT(mp, &vp);
4500 * recheck the ncp state after relocking.
4502 if (ncp->nc_flag & NCF_UNRESOLVED) {
4503 ncp->nc_error = error;
4505 _cache_setvp(mp, ncp, vp, adjgen);
4508 kprintf("[diagnostic] cache_resolve_mp: failed"
4509 " to resolve mount %p err=%d ncp=%p\n",
4511 _cache_setvp(mp, ncp, NULL, adjgen);
4513 } else if (error == 0) {
4518 return(ncp->nc_error);
4522 * Resolve the parent vnode
4525 cache_resolve_dvp(struct nchandle *nch, struct ucred *cred, struct vnode **dvpp)
4527 struct namecache *par_tmp;
4528 struct namecache *par;
4529 struct namecache *ncp;
4530 struct nchandle nctmp;
4538 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE);
4541 * Treat this as a mount point even if it has a parent (e.g.
4542 * null-mount). Return a NULL dvp and no error.
4544 if (ncp == mp->mnt_ncmountpt.ncp)
4548 * If the ncp was destroyed there is no parent directory, return
4551 if (ncp->nc_flag & NCF_DESTROYED)
4555 * No parent if at the root of a filesystem, no error. Typically
4556 * not applicable to null-mounts. This case should have been caught
4557 * in the above ncmountpt check.
4559 if (ncp->nc_parent == NULL)
4563 * Resolve the parent dvp.
4565 * The vp's of the parent directories in the chain are held via vhold()
4566 * due to the existance of the child, and should not disappear.
4567 * However, there are cases where they can disappear:
4569 * - due to filesystem I/O errors.
4570 * - due to NFS being stupid about tracking the namespace and
4571 * destroys the namespace for entire directories quite often.
4572 * - due to forced unmounts.
4573 * - due to an rmdir (parent will be marked DESTROYED)
4575 * When this occurs we have to track the chain backwards and resolve
4576 * it, looping until the resolver catches up to the current node. We
4577 * could recurse here but we might run ourselves out of kernel stack
4578 * so we do it in a more painful manner. This situation really should
4579 * not occur all that often, or if it does not have to go back too
4580 * many nodes to resolve the ncp.
4582 while ((dvp = cache_dvpref(ncp)) == NULL) {
4584 * This case can occur if a process is CD'd into a
4585 * directory which is then rmdir'd. If the parent is marked
4586 * destroyed there is no point trying to resolve it.
4588 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
4590 par = ncp->nc_parent;
4593 while ((par_tmp = par->nc_parent) != NULL &&
4594 par_tmp->nc_vp == NULL) {
4595 _cache_hold(par_tmp);
4596 _cache_lock(par_tmp);
4600 if (par->nc_parent == NULL) {
4601 kprintf("EXDEV case 2 %*.*s\n",
4602 par->nc_nlen, par->nc_nlen, par->nc_name);
4608 * The parent is not set in stone, ref and lock it to prevent
4609 * it from disappearing. Also note that due to renames it
4610 * is possible for our ncp to move and for par to no longer
4611 * be one of its parents. We resolve it anyway, the loop
4612 * will handle any moves.
4614 _cache_get(par); /* additional hold/lock */
4615 _cache_put(par); /* from earlier hold/lock */
4616 if (par == nch->mount->mnt_ncmountpt.ncp) {
4617 cache_resolve_mp(nch->mount, 1);
4618 } else if ((dvp = cache_dvpref(par)) == NULL) {
4619 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n",
4620 par->nc_nlen, par->nc_nlen, par->nc_name);
4624 if (par->nc_flag & NCF_UNRESOLVED) {
4627 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
4631 if ((error = par->nc_error) != 0) {
4632 if (par->nc_error != EAGAIN) {
4633 kprintf("EXDEV case 3 %*.*s error %d\n",
4634 par->nc_nlen, par->nc_nlen, par->nc_name,
4639 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
4640 par, par->nc_nlen, par->nc_nlen, par->nc_name);
4647 * We have a referenced dvp
4654 * Clean out negative cache entries when too many have accumulated.
4657 _cache_cleanneg(long count)
4659 struct pcpu_ncache *pn;
4660 struct namecache *ncp;
4661 static uint32_t neg_rover;
4665 n = neg_rover++; /* SMP heuristical, race ok */
4667 n = n % (uint32_t)ncpus;
4670 * Normalize vfscache_negs and count. count is sometimes based
4671 * on vfscache_negs. vfscache_negs is heuristical and can sometimes
4672 * have crazy values.
4674 vnegs = vfscache_negs;
4676 if (vnegs <= MINNEG)
4681 pn = &pcpu_ncache[n];
4682 spin_lock(&pn->neg_spin);
4683 count = pn->neg_count * count / vnegs + 1;
4684 spin_unlock(&pn->neg_spin);
4687 * Attempt to clean out the specified number of negative cache
4691 spin_lock(&pn->neg_spin);
4692 ncp = TAILQ_FIRST(&pn->neg_list);
4694 spin_unlock(&pn->neg_spin);
4697 TAILQ_REMOVE(&pn->neg_list, ncp, nc_vnode);
4698 TAILQ_INSERT_TAIL(&pn->neg_list, ncp, nc_vnode);
4700 spin_unlock(&pn->neg_spin);
4703 * This can race, so we must re-check that the ncp
4704 * is on the ncneg.list after successfully locking it.
4706 * Don't scrap actively referenced ncps. There should be
4707 * 3 refs. The natural ref, one from being on the neg list,
4710 * Recheck fields after successfully locking to ensure
4711 * that it is in-fact still on the negative list with no
4714 * WARNING! On the ncneglist scan any race against other
4715 * destructors (zaps or cache_inval_vp_quick() calls)
4716 * will have already unresolved the ncp and cause
4717 * us to drop instead of zap. This fine, if
4718 * our drop winds up being the last one it will
4721 if (_cache_lock_special(ncp) == 0) {
4722 if (ncp->nc_vp == NULL &&
4723 ncp->nc_refs == 3 &&
4724 (ncp->nc_flag & NCF_UNRESOLVED) == 0)
4726 ++pcpu_ncache[mycpu->gd_cpuid].clean_neg_count;
4740 * Clean out unresolved cache entries when too many have accumulated.
4741 * Resolved cache entries are cleaned out via the vnode reclamation
4742 * mechanism and by _cache_cleanneg().
4745 _cache_cleanpos(long ucount, long xcount)
4747 static volatile int rover;
4748 struct nchash_head *nchpp;
4749 struct namecache *ncp;
4754 * Don't burn too much cpu looking for stuff
4756 count = (ucount > xcount) ? ucount : xcount;
4760 * Attempt to clean out the specified number of cache entries.
4762 while (count > 0 && (ucount > 0 || xcount > 0)) {
4763 rover_copy = ++rover; /* MPSAFEENOUGH */
4765 nchpp = NCHHASH(rover_copy);
4767 if (TAILQ_FIRST(&nchpp->list) == NULL) {
4775 spin_lock(&nchpp->spin);
4776 ncp = TAILQ_FIRST(&nchpp->list);
4779 * Skip placeholder ncp's. Do not shift their
4780 * position in the list.
4782 while (ncp && (ncp->nc_flag & NCF_DUMMY))
4783 ncp = TAILQ_NEXT(ncp, nc_hash);
4787 * Move to end of list
4789 TAILQ_REMOVE(&nchpp->list, ncp, nc_hash);
4790 TAILQ_INSERT_TAIL(&nchpp->list, ncp, nc_hash);
4792 if (ncp->nc_refs != ncpbaserefs(ncp)) {
4794 * Do not destroy internal nodes that have
4795 * children or nodes which have thread
4799 } else if (ucount > 0 &&
4800 (ncp->nc_flag & NCF_UNRESOLVED))
4803 * Destroy unresolved nodes if asked.
4808 } else if (xcount > 0) {
4810 * Destroy any other node if asked.
4821 spin_unlock(&nchpp->spin);
4824 * Try to scap the ncp if we can do so non-blocking.
4825 * We must re-check nc_refs after locking, and it will
4826 * have one additional ref from above.
4829 if (_cache_lock_special(ncp) == 0) {
4830 if (ncp->nc_refs == 1 + ncpbaserefs(ncp)) {
4831 ++pcpu_ncache[mycpu->gd_cpuid].
4847 * This is a kitchen sink function to clean out ncps which we
4848 * tried to zap from cache_drop() but failed because we were
4849 * unable to acquire the parent lock.
4851 * Such entries can also be removed via cache_inval_vp(), such
4852 * as when unmounting.
4855 _cache_cleandefered(void)
4857 struct nchash_head *nchpp;
4858 struct namecache *ncp;
4859 struct namecache dummy;
4863 * Create a list iterator. DUMMY indicates that this is a list
4864 * iterator, DESTROYED prevents matches by lookup functions.
4867 pcpu_ncache[mycpu->gd_cpuid].numdefered = 0;
4868 bzero(&dummy, sizeof(dummy));
4869 dummy.nc_flag = NCF_DESTROYED | NCF_DUMMY;
4872 for (i = 0; i <= nchash; ++i) {
4873 nchpp = &nchashtbl[i];
4875 spin_lock(&nchpp->spin);
4876 TAILQ_INSERT_HEAD(&nchpp->list, &dummy, nc_hash);
4878 while ((ncp = TAILQ_NEXT(ncp, nc_hash)) != NULL) {
4879 if ((ncp->nc_flag & NCF_DEFEREDZAP) == 0)
4881 TAILQ_REMOVE(&nchpp->list, &dummy, nc_hash);
4882 TAILQ_INSERT_AFTER(&nchpp->list, ncp, &dummy, nc_hash);
4884 spin_unlock(&nchpp->spin);
4885 if (_cache_lock_nonblock(ncp) == 0) {
4886 ncp->nc_flag &= ~NCF_DEFEREDZAP;
4890 spin_lock(&nchpp->spin);
4893 TAILQ_REMOVE(&nchpp->list, &dummy, nc_hash);
4894 spin_unlock(&nchpp->spin);
4899 * Name cache initialization, from vfsinit() when we are booting
4904 struct pcpu_ncache *pn;
4909 * Per-cpu accounting and negative hit list
4911 pcpu_ncache = kmalloc(sizeof(*pcpu_ncache) * ncpus,
4912 M_VFSCACHEAUX, M_WAITOK|M_ZERO);
4913 for (i = 0; i < ncpus; ++i) {
4914 pn = &pcpu_ncache[i];
4915 TAILQ_INIT(&pn->neg_list);
4916 spin_init(&pn->neg_spin, "ncneg");
4917 spin_init(&pn->umount_spin, "ncumm");
4921 * Initialise per-cpu namecache effectiveness statistics.
4923 for (i = 0; i < ncpus; ++i) {
4924 gd = globaldata_find(i);
4925 gd->gd_nchstats = &nchstats[i];
4929 * Create a generous namecache hash table
4931 nchashtbl = hashinit_ext(vfs_inodehashsize(),
4932 sizeof(struct nchash_head),
4933 M_VFSCACHEAUX, &nchash);
4934 for (i = 0; i <= (int)nchash; ++i) {
4935 TAILQ_INIT(&nchashtbl[i].list);
4936 spin_init(&nchashtbl[i].spin, "nchinit_hash");
4938 for (i = 0; i < NCMOUNT_NUMCACHE; ++i)
4939 spin_init(&ncmount_cache[i].spin, "nchinit_cache");
4940 nclockwarn = 5 * hz;
4944 * Called from start_init() to bootstrap the root filesystem. Returns
4945 * a referenced, unlocked namecache record to serve as a root or the
4946 * root of the system.
4948 * Adjust our namecache counts
4951 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp)
4953 /*struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid];*/
4955 /* nc_parent is NULL, doesn't count as a leaf or unresolved */
4956 /*atomic_add_long(&pn->vfscache_leafs, 1);*/
4957 /*atomic_add_long(&pn->vfscache_unres, 1);*/
4959 nch->ncp = cache_alloc(0);
4963 _cache_setvp(nch->mount, nch->ncp, vp, 1);
4967 * vfs_cache_setroot()
4969 * Create an association between the root of our namecache and
4970 * the root vnode. This routine may be called several times during
4973 * If the caller intends to save the returned namecache pointer somewhere
4974 * it must cache_hold() it.
4977 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch)
4980 struct nchandle onch;
4988 cache_zero(&rootnch);
4996 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
4997 * topology and is being removed as quickly as possible. The new VOP_N*()
4998 * API calls are required to make specific adjustments using the supplied
4999 * ncp pointers rather then just bogusly purging random vnodes.
5001 * Invalidate all namecache entries to a particular vnode as well as
5002 * any direct children of that vnode in the namecache. This is a
5003 * 'catch all' purge used by filesystems that do not know any better.
5005 * Note that the linkage between the vnode and its namecache entries will
5006 * be removed, but the namecache entries themselves might stay put due to
5007 * active references from elsewhere in the system or due to the existance of
5008 * the children. The namecache topology is left intact even if we do not
5009 * know what the vnode association is. Such entries will be marked
5013 cache_purge(struct vnode *vp)
5015 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
5018 __read_mostly static int disablecwd;
5019 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
5026 sys___getcwd(struct sysmsg *sysmsg, const struct __getcwd_args *uap)
5036 buflen = uap->buflen;
5039 if (buflen > MAXPATHLEN)
5040 buflen = MAXPATHLEN;
5042 buf = kmalloc(buflen, M_TEMP, M_WAITOK);
5043 bp = kern_getcwd(buf, buflen, &error);
5045 error = copyout(bp, uap->buf, strlen(bp) + 1);
5051 kern_getcwd(char *buf, size_t buflen, int *error)
5053 struct proc *p = curproc;
5055 int i, slash_prefixed;
5056 struct filedesc *fdp;
5057 struct nchandle nch;
5058 struct namecache *ncp;
5066 nch = fdp->fd_ncdir;
5071 while (ncp && (ncp != fdp->fd_nrdir.ncp ||
5072 nch.mount != fdp->fd_nrdir.mount)
5074 if (ncp->nc_flag & NCF_DESTROYED) {
5080 * While traversing upwards if we encounter the root
5081 * of the current mount we have to skip to the mount point
5082 * in the underlying filesystem.
5084 if (ncp == nch.mount->mnt_ncmountpt.ncp) {
5085 nch = nch.mount->mnt_ncmounton;
5094 * Prepend the path segment
5096 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
5102 *--bp = ncp->nc_name[i];
5113 * Go up a directory. This isn't a mount point so we don't
5114 * have to check again.
5116 while ((nch.ncp = ncp->nc_parent) != NULL) {
5117 if (ncp_shared_lock_disable)
5120 _cache_lock_shared(ncp);
5121 if (nch.ncp != ncp->nc_parent) {
5125 _cache_hold(nch.ncp);
5137 if (!slash_prefixed) {
5153 * Thus begins the fullpath magic.
5155 * The passed nchp is referenced but not locked.
5157 __read_mostly static int disablefullpath;
5158 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
5159 &disablefullpath, 0,
5160 "Disable fullpath lookups");
5163 cache_fullpath(struct proc *p, struct nchandle *nchp, struct nchandle *nchbase,
5164 char **retbuf, char **freebuf, int guess)
5166 struct nchandle fd_nrdir;
5167 struct nchandle nch;
5168 struct namecache *ncp;
5169 struct mount *mp, *new_mp;
5178 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK);
5179 bp = buf + MAXPATHLEN - 1;
5182 fd_nrdir = *nchbase;
5184 fd_nrdir = p->p_fd->fd_nrdir;
5194 while (ncp && (ncp != fd_nrdir.ncp || mp != fd_nrdir.mount)) {
5198 * If we are asked to guess the upwards path, we do so whenever
5199 * we encounter an ncp marked as a mountpoint. We try to find
5200 * the actual mountpoint by finding the mountpoint with this
5203 if (guess && (ncp->nc_flag & NCF_ISMOUNTPT)) {
5204 new_mp = mount_get_by_nc(ncp);
5207 * While traversing upwards if we encounter the root
5208 * of the current mount we have to skip to the mount point.
5210 if (ncp == mp->mnt_ncmountpt.ncp) {
5214 nch = new_mp->mnt_ncmounton;
5224 * Prepend the path segment
5226 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
5232 *--bp = ncp->nc_name[i];
5243 * Go up a directory. This isn't a mount point so we don't
5244 * have to check again.
5246 * We can only safely access nc_parent with ncp held locked.
5248 while ((nch.ncp = ncp->nc_parent) != NULL) {
5249 _cache_lock_shared(ncp);
5250 if (nch.ncp != ncp->nc_parent) {
5254 _cache_hold(nch.ncp);
5267 if (!slash_prefixed) {
5285 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf,
5286 char **freebuf, int guess)
5288 struct namecache *ncp;
5289 struct nchandle nch;
5293 if (disablefullpath)
5299 /* vn is NULL, client wants us to use p->p_textvp */
5301 if ((vn = p->p_textvp) == NULL)
5304 spin_lock_shared(&vn->v_spin);
5305 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
5310 spin_unlock_shared(&vn->v_spin);
5314 spin_unlock_shared(&vn->v_spin);
5317 nch.mount = vn->v_mount;
5318 error = cache_fullpath(p, &nch, NULL, retbuf, freebuf, guess);
5324 vfscache_rollup_cpu(struct globaldata *gd)
5326 struct pcpu_ncache *pn;
5329 if (pcpu_ncache == NULL)
5331 pn = &pcpu_ncache[gd->gd_cpuid];
5334 * namecache statistics
5336 if (pn->vfscache_count) {
5337 count = atomic_swap_long(&pn->vfscache_count, 0);
5338 atomic_add_long(&vfscache_count, count);
5340 if (pn->vfscache_leafs) {
5341 count = atomic_swap_long(&pn->vfscache_leafs, 0);
5342 atomic_add_long(&vfscache_leafs, count);
5344 if (pn->vfscache_unres) {
5345 count = atomic_swap_long(&pn->vfscache_unres, 0);
5346 atomic_add_long(&vfscache_unres, count);
5348 if (pn->vfscache_negs) {
5349 count = atomic_swap_long(&pn->vfscache_negs, 0);
5350 atomic_add_long(&vfscache_negs, count);
5354 * hysteresis based cleanings
5356 if (pn->inv_kid_quick_count) {
5357 count = atomic_swap_long(&pn->inv_kid_quick_count, 0);
5358 atomic_add_long(&inv_kid_quick_count, count);
5360 if (pn->inv_ncp_quick_count) {
5361 count = atomic_swap_long(&pn->inv_ncp_quick_count, 0);
5362 atomic_add_long(&inv_ncp_quick_count, count);
5364 if (pn->clean_pos_count) {
5365 count = atomic_swap_long(&pn->clean_pos_count, 0);
5366 atomic_add_long(&clean_pos_count, count);
5368 if (pn->clean_neg_count) {
5369 count = atomic_swap_long(&pn->clean_neg_count, 0);
5370 atomic_add_long(&clean_neg_count, count);
5373 if (pn->numdefered) {
5374 count = atomic_swap_long(&pn->numdefered, 0);
5375 atomic_add_long(&numdefered, count);