2 * Copyright (c) 2003-2020 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 #include <sys/param.h>
66 #include <sys/systm.h>
68 #include <sys/kernel.h>
69 #include <sys/sysctl.h>
70 #include <sys/mount.h>
71 #include <sys/vnode.h>
72 #include <sys/malloc.h>
73 #include <sys/sysmsg.h>
74 #include <sys/spinlock.h>
76 #include <sys/nlookup.h>
77 #include <sys/filedesc.h>
78 #include <sys/fnv_hash.h>
79 #include <sys/globaldata.h>
80 #include <sys/kern_syscall.h>
81 #include <sys/dirent.h>
84 #include <sys/spinlock2.h>
86 #define MAX_RECURSION_DEPTH 64
89 * Random lookups in the cache are accomplished with a hash table using
90 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock,
91 * but we use the ncp->update counter trick to avoid acquiring any
92 * contestable spin-locks during a lookup.
94 * Negative entries may exist and correspond to resolved namecache
95 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT
96 * will be set if the entry corresponds to a whited-out directory entry
97 * (verses simply not finding the entry at all). pcpu_ncache[n].neg_list
98 * is locked via pcpu_ncache[n].neg_spin;
102 * (1) ncp's typically have at least a nc_refs of 1, and usually 2. One
103 * is applicable to direct lookups via the hash table nchpp or via
104 * nc_list (the two are added or removed together). Removal of the ncp
105 * from the hash table drops this reference. The second is applicable
106 * to vp->v_namecache linkages (or negative list linkages), and removal
107 * of the ncp from these lists drops this reference.
109 * On the 1->0 transition of nc_refs the ncp can no longer be referenced
110 * and must be destroyed. No other thread should have access to it at
111 * this point so it can be safely locked and freed without any deadlock
114 * The 1->0 transition can occur at almost any juncture and so cache_drop()
115 * deals with it directly.
117 * (2) Once the 1->0 transition occurs, the entity that caused the transition
118 * will be responsible for destroying the ncp. The ncp cannot be on any
119 * list or hash at this time, or be held by anyone other than the caller
120 * responsible for the transition.
122 * (3) A ncp must be locked in order to modify it.
124 * (5) ncp locks are ordered, child-to-parent. Child first, then parent.
125 * This may seem backwards but forward-scans use the hash table and thus
126 * can hold the parent unlocked while traversing downward. Deletions,
127 * on the other-hand, tend to propagate bottom-up since the ref on the
128 * is dropped as the children go away.
130 * (6) Both parent and child must be locked in order to enter the child onto
131 * the parent's nc_list.
135 * Structures associated with name cacheing.
137 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
140 #define NCMOUNT_NUMCACHE (16384) /* power of 2 */
141 #define NCMOUNT_SET (8) /* power of 2 */
143 MALLOC_DEFINE_OBJ(M_VFSCACHE, sizeof(struct namecache),
144 "namecache", "namecache entries");
145 MALLOC_DEFINE(M_VFSCACHEAUX, "namecachestr", "namecache strings");
147 TAILQ_HEAD(nchash_list, namecache);
150 * Don't cachealign, but at least pad to 32 bytes so entries
151 * don't cross a cache line.
154 struct nchash_list list; /* 16 bytes */
155 struct spinlock spin; /* 8 bytes */
156 long pad01; /* 8 bytes */
159 struct ncmount_cache {
160 struct spinlock spin;
161 struct namecache *ncp;
163 struct mount *mp_target;
171 struct spinlock umount_spin; /* cache_findmount/interlock */
172 struct spinlock neg_spin; /* for neg_list and neg_count */
173 struct namecache_list neg_list;
180 long inv_kid_quick_count;
181 long inv_ncp_quick_count;
182 long clean_pos_count;
183 long clean_neg_count;
186 __read_mostly static struct nchash_head *nchashtbl;
187 __read_mostly static struct pcpu_ncache *pcpu_ncache;
188 static struct ncmount_cache ncmount_cache[NCMOUNT_NUMCACHE];
191 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
192 * to create the namecache infrastructure leading to a dangling vnode.
194 * 0 Only errors are reported
195 * 1 Successes are reported
196 * 2 Successes + the whole directory scan is reported
197 * 3 Force the directory scan code run as if the parent vnode did not
198 * have a namecache record, even if it does have one.
200 __read_mostly int ncvp_debug;
201 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0,
202 "Namecache debug level (0-3)");
204 __read_mostly static u_long nchash; /* size of hash table */
205 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
206 "Size of namecache hash table");
208 __read_mostly static int ncnegflush = 10; /* burst for negative flush */
209 SYSCTL_INT(_debug, OID_AUTO, ncnegflush, CTLFLAG_RW, &ncnegflush, 0,
210 "Batch flush negative entries");
212 __read_mostly static int ncposflush = 10; /* burst for positive flush */
213 SYSCTL_INT(_debug, OID_AUTO, ncposflush, CTLFLAG_RW, &ncposflush, 0,
214 "Batch flush positive entries");
216 __read_mostly static int ncnegfactor = 16; /* ratio of negative entries */
217 SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
218 "Ratio of negative namecache entries");
220 __read_mostly static int ncposfactor = 16; /* ratio of unres+leaf entries */
221 SYSCTL_INT(_debug, OID_AUTO, ncposfactor, CTLFLAG_RW, &ncposfactor, 0,
222 "Ratio of unresolved leaf namecache entries");
224 __read_mostly static int nclockwarn; /* warn on locked entries in ticks */
225 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0,
226 "Warn on locked namecache entries in ticks");
228 __read_mostly static int ncposlimit; /* number of cache entries allocated */
229 SYSCTL_INT(_debug, OID_AUTO, ncposlimit, CTLFLAG_RW, &ncposlimit, 0,
230 "Number of cache entries allocated");
232 __read_mostly static int ncp_shared_lock_disable = 0;
233 SYSCTL_INT(_debug, OID_AUTO, ncp_shared_lock_disable, CTLFLAG_RW,
234 &ncp_shared_lock_disable, 0, "Disable shared namecache locks");
236 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode),
237 "sizeof(struct vnode)");
238 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache),
239 "sizeof(struct namecache)");
241 __read_mostly static int ncmount_cache_enable = 1;
242 SYSCTL_INT(_debug, OID_AUTO, ncmount_cache_enable, CTLFLAG_RW,
243 &ncmount_cache_enable, 0, "mount point cache");
245 static __inline void _cache_drop(struct namecache *ncp);
246 static int cache_resolve_mp(struct mount *mp, int adjgen);
247 static int cache_findmount_callback(struct mount *mp, void *data);
248 static void _cache_setunresolved(struct namecache *ncp, int adjgen);
249 static void _cache_cleanneg(long count);
250 static void _cache_cleanpos(long ucount, long xcount);
251 static void _cache_cleandefered(void);
252 static void _cache_unlink(struct namecache *ncp);
255 * The new name cache statistics (these are rolled up globals and not
256 * modified in the critical path, see struct pcpu_ncache).
258 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
259 static long vfscache_negs;
260 SYSCTL_LONG(_vfs_cache, OID_AUTO, numneg, CTLFLAG_RD, &vfscache_negs, 0,
261 "Number of negative namecache entries");
262 static long vfscache_count;
263 SYSCTL_LONG(_vfs_cache, OID_AUTO, numcache, CTLFLAG_RD, &vfscache_count, 0,
264 "Number of namecaches entries");
265 static long vfscache_leafs;
266 SYSCTL_LONG(_vfs_cache, OID_AUTO, numleafs, CTLFLAG_RD, &vfscache_leafs, 0,
267 "Number of leaf namecaches entries");
268 static long vfscache_unres;
269 SYSCTL_LONG(_vfs_cache, OID_AUTO, numunres, CTLFLAG_RD, &vfscache_unres, 0,
270 "Number of unresolved leaf namecaches entries");
272 static long inv_kid_quick_count;
273 SYSCTL_LONG(_vfs_cache, OID_AUTO, inv_kid_quick_count, CTLFLAG_RD,
274 &inv_kid_quick_count, 0,
275 "quick kid invalidations");
276 static long inv_ncp_quick_count;
277 SYSCTL_LONG(_vfs_cache, OID_AUTO, inv_ncp_quick_count, CTLFLAG_RD,
278 &inv_ncp_quick_count, 0,
279 "quick ncp invalidations");
280 static long clean_pos_count;
281 SYSCTL_LONG(_vfs_cache, OID_AUTO, clean_pos_count, CTLFLAG_RD,
283 "positive ncp cleanings");
284 static long clean_neg_count;
285 SYSCTL_LONG(_vfs_cache, OID_AUTO, clean_neg_count, CTLFLAG_RD,
287 "negative ncp cleanings");
289 static long numdefered;
290 SYSCTL_LONG(_debug, OID_AUTO, numdefered, CTLFLAG_RD, &numdefered, 0,
291 "Number of cache entries allocated");
294 * Returns the number of basic references expected on the ncp, not
295 * including any children. 1 for the natural ref, and an addition ref
296 * if the ncp is resolved (representing a positive or negative hit).
299 ncpbaserefs(struct namecache *ncp)
301 return (1 + ((ncp->nc_flag & NCF_UNRESOLVED) == 0));
304 struct nchstats nchstats[SMP_MAXCPU];
306 * Export VFS cache effectiveness statistics to user-land.
308 * The statistics are left for aggregation to user-land so
309 * neat things can be achieved, like observing per-CPU cache
313 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
315 struct globaldata *gd;
319 for (i = 0; i < ncpus; ++i) {
320 gd = globaldata_find(i);
321 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
322 sizeof(struct nchstats))))
328 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
329 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
331 static int cache_zap(struct namecache *ncp);
334 * Cache mount points and namecache records in order to avoid unnecessary
335 * atomic ops on mnt_refs and ncp->refs. This improves concurrent SMP
336 * performance and is particularly important on multi-socket systems to
337 * reduce cache-line ping-ponging.
339 * Try to keep the pcpu structure within one cache line (~64 bytes).
341 #define MNTCACHE_COUNT 32 /* power of 2, multiple of SET */
342 #define MNTCACHE_SET 8 /* set associativity */
344 struct mntcache_elm {
345 struct namecache *ncp;
352 struct mntcache_elm array[MNTCACHE_COUNT];
355 static struct mntcache pcpu_mntcache[MAXCPU];
359 _cache_ncp_gen_enter(struct namecache *ncp)
361 ncp->nc_generation += 2;
367 _cache_ncp_gen_exit(struct namecache *ncp)
370 ncp->nc_generation += 2;
375 struct mntcache_elm *
376 _cache_mntcache_hash(void *ptr)
378 struct mntcache_elm *elm;
381 hv = iscsi_crc32(&ptr, sizeof(ptr)) & (MNTCACHE_COUNT - 1);
382 elm = &pcpu_mntcache[mycpu->gd_cpuid].array[hv & ~(MNTCACHE_SET - 1)];
389 _cache_mntref(struct mount *mp)
391 struct mntcache_elm *elm;
395 elm = _cache_mntcache_hash(mp);
396 for (i = 0; i < MNTCACHE_SET; ++i) {
398 mpr = atomic_swap_ptr((void *)&elm->mp, NULL);
399 if (__predict_true(mpr == mp))
402 atomic_add_int(&mpr->mnt_refs, -1);
406 atomic_add_int(&mp->mnt_refs, 1);
411 _cache_mntrel(struct mount *mp)
413 struct mntcache_elm *elm;
414 struct mntcache_elm *best;
420 elm = _cache_mntcache_hash(mp);
422 for (i = 0; i < MNTCACHE_SET; ++i) {
423 if (elm->mp == NULL) {
424 mpr = atomic_swap_ptr((void *)&elm->mp, mp);
425 if (__predict_false(mpr != NULL)) {
426 atomic_add_int(&mpr->mnt_refs, -1);
431 delta1 = ticks - best->ticks;
432 delta2 = ticks - elm->ticks;
433 if (delta2 > delta1 || delta1 < -1 || delta2 < -1)
437 mpr = atomic_swap_ptr((void *)&best->mp, mp);
440 atomic_add_int(&mpr->mnt_refs, -1);
444 * Clears all cached mount points on all cpus. This routine should only
445 * be called when we are waiting for a mount to clear, e.g. so we can
449 cache_clearmntcache(struct mount *target __unused)
453 for (n = 0; n < ncpus; ++n) {
454 struct mntcache *cache = &pcpu_mntcache[n];
455 struct mntcache_elm *elm;
456 struct namecache *ncp;
460 for (i = 0; i < MNTCACHE_COUNT; ++i) {
461 elm = &cache->array[i];
463 mp = atomic_swap_ptr((void *)&elm->mp, NULL);
465 atomic_add_int(&mp->mnt_refs, -1);
468 ncp = atomic_swap_ptr((void *)&elm->ncp, NULL);
477 * Namespace locking. The caller must already hold a reference to the
478 * namecache structure in order to lock/unlock it. The controlling entity
479 * in a 1->0 transition does not need to lock the ncp to dispose of it,
480 * as nobody else will have visibility to it at that point.
482 * Note that holding a locked namecache structure prevents other threads
483 * from making namespace changes (e.g. deleting or creating), prevents
484 * vnode association state changes by other threads, and prevents the
485 * namecache entry from being resolved or unresolved by other threads.
487 * An exclusive lock owner has full authority to associate/disassociate
488 * vnodes and resolve/unresolve the locked ncp.
490 * A shared lock owner only has authority to acquire the underlying vnode,
493 * The primary lock field is nc_lockstatus. nc_locktd is set after the
494 * fact (when locking) or cleared prior to unlocking.
496 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
497 * or recycled, but it does NOT help you if the vnode had already
498 * initiated a recyclement. If this is important, use cache_get()
499 * rather then cache_lock() (and deal with the differences in the
500 * way the refs counter is handled). Or, alternatively, make an
501 * unconditional call to cache_validate() or cache_resolve()
502 * after cache_lock() returns.
506 _cache_lock(struct namecache *ncp)
511 error = lockmgr(&ncp->nc_lock, LK_EXCLUSIVE);
512 while (__predict_false(error == EWOULDBLOCK)) {
514 didwarn = ticks - nclockwarn;
515 kprintf("[diagnostic] cache_lock: "
518 curthread->td_comm, ncp,
519 ncp->nc_nlen, ncp->nc_nlen,
522 error = lockmgr(&ncp->nc_lock, LK_EXCLUSIVE | LK_TIMELOCK);
524 if (__predict_false(didwarn)) {
525 kprintf("[diagnostic] cache_lock: "
526 "%s unblocked %*.*s after %d secs\n",
528 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name,
529 (int)(ticks - didwarn) / hz);
534 * Release a previously acquired lock.
536 * A concurrent shared-lock acquisition or acquisition/release can
537 * race bit 31 so only drop the ncp if bit 31 was set.
541 _cache_unlock(struct namecache *ncp)
543 lockmgr(&ncp->nc_lock, LK_RELEASE);
547 * Lock ncp exclusively, non-blocking. Return 0 on success.
551 _cache_lock_nonblock(struct namecache *ncp)
555 error = lockmgr(&ncp->nc_lock, LK_EXCLUSIVE | LK_NOWAIT);
556 if (__predict_false(error != 0)) {
563 * This is a special form of _cache_lock() which only succeeds if
564 * it can get a pristine, non-recursive lock. The caller must have
565 * already ref'd the ncp.
567 * On success the ncp will be locked, on failure it will not. The
568 * ref count does not change either way.
570 * We want _cache_lock_special() (on success) to return a definitively
571 * usable vnode or a definitively unresolved ncp.
575 _cache_lock_special(struct namecache *ncp)
577 if (_cache_lock_nonblock(ncp) == 0) {
578 if (lockmgr_oneexcl(&ncp->nc_lock)) {
579 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
580 _cache_setunresolved(ncp, 1);
589 * Shared lock, guarantees vp held
591 * The shared lock holds vp on the 0->1 transition. It is possible to race
592 * another shared lock release, preventing the other release from dropping
593 * the vnode and clearing bit 31.
595 * If it is not set then we are responsible for setting it, and this
596 * responsibility does not race with anyone else.
600 _cache_lock_shared(struct namecache *ncp)
605 error = lockmgr(&ncp->nc_lock, LK_SHARED | LK_TIMELOCK);
606 while (__predict_false(error == EWOULDBLOCK)) {
608 didwarn = ticks - nclockwarn;
609 kprintf("[diagnostic] cache_lock_shared: "
612 curthread->td_comm, ncp,
613 ncp->nc_nlen, ncp->nc_nlen,
616 error = lockmgr(&ncp->nc_lock, LK_SHARED | LK_TIMELOCK);
618 if (__predict_false(didwarn)) {
619 kprintf("[diagnostic] cache_lock_shared: "
620 "%s unblocked %*.*s after %d secs\n",
622 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name,
623 (int)(ticks - didwarn) / hz);
628 * Shared lock, guarantees vp held. Non-blocking. Returns 0 on success
632 _cache_lock_shared_nonblock(struct namecache *ncp)
636 error = lockmgr(&ncp->nc_lock, LK_SHARED | LK_NOWAIT);
637 if (__predict_false(error != 0)) {
644 * This function tries to get a shared lock but will back-off to an
647 * (1) Some other thread is trying to obtain an exclusive lock
648 * (to prevent the exclusive requester from getting livelocked out
649 * by many shared locks).
651 * (2) The current thread already owns an exclusive lock (to avoid
654 * WARNING! On machines with lots of cores we really want to try hard to
655 * get a shared lock or concurrent path lookups can chain-react
656 * into a very high-latency exclusive lock.
658 * This is very evident in dsynth's initial scans.
662 _cache_lock_shared_special(struct namecache *ncp)
665 * Only honor a successful shared lock (returning 0) if there is
666 * no exclusive request pending and the vnode, if present, is not
667 * in a reclaimed state.
669 if (_cache_lock_shared_nonblock(ncp) == 0) {
670 if (__predict_true(!lockmgr_exclpending(&ncp->nc_lock))) {
671 if (ncp->nc_vp == NULL ||
672 (ncp->nc_vp->v_flag & VRECLAIMED) == 0) {
681 * Non-blocking shared lock failed. If we already own the exclusive
682 * lock just acquire another exclusive lock (instead of deadlocking).
683 * Otherwise acquire a shared lock.
685 if (lockstatus(&ncp->nc_lock, curthread) == LK_EXCLUSIVE) {
689 _cache_lock_shared(ncp);
697 * (v) LK_SHARED or LK_EXCLUSIVE
701 _cache_lockstatus(struct namecache *ncp)
705 status = lockstatus(&ncp->nc_lock, curthread);
706 if (status == LK_EXCLOTHER)
712 * cache_hold() and cache_drop() prevent the premature deletion of a
713 * namecache entry but do not prevent operations (such as zapping) on
714 * that namecache entry.
716 * This routine may only be called from outside this source module if
717 * nc_refs is already deterministically at least 1, such as being
718 * associated with e.g. a process, file descriptor, or some other entity.
720 * Only the above situations, similar situations within this module where
721 * the ref count is deterministically at least 1, or when the ncp is found
722 * via the nchpp (hash table) lookup, can bump nc_refs.
724 * Very specifically, a ncp found via nc_list CANNOT bump nc_refs. It
725 * can still be removed from the nc_list, however, as long as the caller
726 * can acquire its lock (in the wrong order).
728 * This is a rare case where callers are allowed to hold a spinlock,
729 * so we can't ourselves.
733 _cache_hold(struct namecache *ncp)
735 KKASSERT(ncp->nc_refs > 0);
736 atomic_add_int(&ncp->nc_refs, 1);
742 * Drop a cache entry.
744 * The 1->0 transition can only occur after or because the natural ref
745 * is being dropped. If another thread had a temporary ref during the
746 * ncp's destruction, then that other thread might wind up being the
747 * one to drop the last ref.
751 _cache_drop(struct namecache *ncp)
753 if (atomic_fetchadd_int(&ncp->nc_refs, -1) == 1) {
754 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
759 ncp->nc_refs = -1; /* safety */
761 kfree(ncp->nc_name, M_VFSCACHEAUX);
762 kfree_obj(ncp, M_VFSCACHE);
767 * Link a new namecache entry to its parent and to the hash table. Be
768 * careful to avoid races if vhold() blocks in the future.
770 * Both ncp and par must be referenced and locked. The reference is
771 * transfered to the nchpp (and, most notably, NOT to the parent list).
773 * NOTE: The hash table spinlock is held across this call, we can't do
777 _cache_link_parent(struct namecache *ncp, struct namecache *par,
778 struct nchash_head *nchpp)
780 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid];
782 KKASSERT(ncp->nc_parent == NULL);
783 _cache_ncp_gen_enter(ncp);
784 ncp->nc_parent = par;
785 ncp->nc_head = nchpp;
788 * Set inheritance flags. Note that the parent flags may be
789 * stale due to getattr potentially not having been run yet
790 * (it gets run during nlookup()'s).
792 ncp->nc_flag &= ~(NCF_SF_PNOCACHE | NCF_UF_PCACHE);
793 if (par->nc_flag & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE))
794 ncp->nc_flag |= NCF_SF_PNOCACHE;
795 if (par->nc_flag & (NCF_UF_CACHE | NCF_UF_PCACHE))
796 ncp->nc_flag |= NCF_UF_PCACHE;
799 * Add to hash table and parent, adjust accounting
801 TAILQ_INSERT_HEAD(&nchpp->list, ncp, nc_hash);
802 atomic_add_long(&pn->vfscache_count, 1);
805 * ncp is a new leaf being added to the tree
807 if (TAILQ_EMPTY(&ncp->nc_list)) {
808 atomic_add_long(&pn->vfscache_leafs, 1);
809 if (ncp->nc_flag & NCF_UNRESOLVED)
810 atomic_add_long(&pn->vfscache_unres, 1);
813 if (TAILQ_EMPTY(&par->nc_list)) {
815 * Parent was, but now is no longer a leaf
817 _cache_ncp_gen_enter(par);
818 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
819 if (par->nc_flag & NCF_UNRESOLVED)
820 atomic_add_long(&pn->vfscache_unres, -1);
821 atomic_add_long(&pn->vfscache_leafs, -1);
824 * Any vp associated with an ncp which has children must
825 * be held to prevent it from being recycled.
829 _cache_ncp_gen_exit(par);
831 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
833 _cache_hold(par); /* add nc_parent ref */
834 _cache_ncp_gen_exit(ncp);
838 * Remove the parent and hash associations from a namecache structure.
839 * Drop the ref-count on the parent. The caller receives the ref
840 * from the ncp's nchpp linkage that was removed and may forward that
841 * ref to a new linkage.
843 * The caller usually holds an additional ref * on the ncp so the unlink
844 * cannot be the final drop. XXX should not be necessary now since the
845 * caller receives the ref from the nchpp linkage, assuming the ncp
846 * was linked in the first place.
848 * ncp must be locked, which means that there won't be any nc_parent
849 * removal races. This routine will acquire a temporary lock on
850 * the parent as well as the appropriate hash chain.
852 * par must be locked and will remain locked on return.
854 * nhcpp must be spin-locked. This routine eats the spin-lock.
857 _cache_unlink_parent(struct namecache *par, struct namecache *ncp,
858 struct nchash_head *nchpp)
860 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid];
861 struct vnode *dropvp;
863 KKASSERT(ncp->nc_parent == par);
865 _cache_ncp_gen_enter(ncp);
867 /* don't add a ref, we drop the nchpp ref later */
870 * Remove from hash table and parent, adjust accounting
872 TAILQ_REMOVE(&ncp->nc_head->list, ncp, nc_hash);
873 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
874 atomic_add_long(&pn->vfscache_count, -1);
877 * Removing leaf from tree
879 if (TAILQ_EMPTY(&ncp->nc_list)) {
880 if (ncp->nc_flag & NCF_UNRESOLVED)
881 atomic_add_long(&pn->vfscache_unres, -1);
882 atomic_add_long(&pn->vfscache_leafs, -1);
886 * Parent is now a leaf?
889 if (TAILQ_EMPTY(&par->nc_list)) {
890 _cache_ncp_gen_enter(par);
891 if (par->nc_flag & NCF_UNRESOLVED)
892 atomic_add_long(&pn->vfscache_unres, 1);
893 atomic_add_long(&pn->vfscache_leafs, 1);
896 _cache_ncp_gen_exit(par);
898 ncp->nc_parent = NULL;
900 spin_unlock(&nchpp->spin);
901 _cache_drop(par); /* drop ncp's nc_parent ref from (par) */
904 * We can only safely vdrop with no spinlocks held.
908 _cache_ncp_gen_exit(ncp);
912 * Allocate a new namecache structure. Most of the code does not require
913 * zero-termination of the string but it makes vop_compat_ncreate() easier.
915 * The returned ncp will be locked and referenced. The ref is generally meant
916 * to be transfered to the nchpp linkage.
918 static struct namecache *
919 cache_alloc(int nlen)
921 struct namecache *ncp;
923 ncp = kmalloc_obj(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
925 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHEAUX, M_WAITOK);
927 ncp->nc_flag = NCF_UNRESOLVED;
928 ncp->nc_error = ENOTCONN; /* needs to be resolved */
929 ncp->nc_refs = 1; /* natural ref */
930 ncp->nc_generation = 0; /* link/unlink/res/unres op */
931 TAILQ_INIT(&ncp->nc_list);
932 lockinit(&ncp->nc_lock, "ncplk", hz, LK_CANRECURSE);
933 lockmgr(&ncp->nc_lock, LK_EXCLUSIVE);
939 * Can only be called for the case where the ncp has never been
940 * associated with anything (so no spinlocks are needed).
943 _cache_free(struct namecache *ncp)
945 KKASSERT(ncp->nc_refs == 1);
947 kfree(ncp->nc_name, M_VFSCACHEAUX);
948 kfree_obj(ncp, M_VFSCACHE);
952 * [re]initialize a nchandle.
955 cache_zero(struct nchandle *nch)
962 * Ref and deref a nchandle structure (ncp + mp)
964 * The caller must specify a stable ncp pointer, typically meaning the
965 * ncp is already referenced but this can also occur indirectly through
966 * e.g. holding a lock on a direct child.
968 * WARNING: Caller may hold an unrelated read spinlock, which means we can't
969 * use read spinlocks here.
972 cache_hold(struct nchandle *nch)
974 _cache_hold(nch->ncp);
975 _cache_mntref(nch->mount);
980 * Create a copy of a namecache handle for an already-referenced
984 cache_copy(struct nchandle *nch, struct nchandle *target)
986 struct namecache *ncp;
988 struct mntcache_elm *elm;
989 struct namecache *ncpr;
997 elm = _cache_mntcache_hash(ncp);
998 for (i = 0; i < MNTCACHE_SET; ++i) {
999 if (elm->ncp == ncp) {
1000 ncpr = atomic_swap_ptr((void *)&elm->ncp, NULL);
1016 * Drop the nchandle, but try to cache the ref to avoid global atomic
1017 * ops. This is typically done on the system root and jail root nchandles.
1020 cache_drop_and_cache(struct nchandle *nch, int elmno)
1022 struct mntcache_elm *elm;
1023 struct mntcache_elm *best;
1024 struct namecache *ncpr;
1031 _cache_drop(nch->ncp);
1035 _cache_mntrel(nch->mount);
1041 elm = _cache_mntcache_hash(nch->ncp);
1043 for (i = 0; i < MNTCACHE_SET; ++i) {
1044 if (elm->ncp == NULL) {
1045 ncpr = atomic_swap_ptr((void *)&elm->ncp, nch->ncp);
1046 _cache_mntrel(nch->mount);
1054 delta1 = ticks - best->ticks;
1055 delta2 = ticks - elm->ticks;
1056 if (delta2 > delta1 || delta1 < -1 || delta2 < -1)
1060 ncpr = atomic_swap_ptr((void *)&best->ncp, nch->ncp);
1061 _cache_mntrel(nch->mount);
1062 best->ticks = ticks;
1070 cache_changemount(struct nchandle *nch, struct mount *mp)
1073 _cache_mntrel(nch->mount);
1078 cache_drop(struct nchandle *nch)
1080 _cache_mntrel(nch->mount);
1081 _cache_drop(nch->ncp);
1088 * -1 Locked by other
1090 * (v) LK_SHARED or LK_EXCLUSIVE
1093 cache_lockstatus(struct nchandle *nch)
1095 return(_cache_lockstatus(nch->ncp));
1099 cache_lock(struct nchandle *nch)
1101 _cache_lock(nch->ncp);
1105 * Returns a shared or exclusive-locked ncp. The ncp will only be
1106 * shared-locked if it is already resolved.
1109 cache_lock_maybe_shared(struct nchandle *nch, int excl)
1111 struct namecache *ncp = nch->ncp;
1113 if (ncp_shared_lock_disable || excl ||
1114 (ncp->nc_flag & NCF_UNRESOLVED)) {
1117 _cache_lock_shared(ncp);
1118 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1119 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) {
1131 * Lock fncpd, fncp, tncpd, and tncp. tncp is already locked but may
1132 * have to be cycled to avoid deadlocks. Make sure all four are resolved.
1134 * The caller is responsible for checking the validity upon return as
1135 * the records may have been flagged DESTROYED in the interim.
1137 * Namecache lock ordering is leaf first, then parent. However, complex
1138 * interactions may occur between the source and target because there is
1139 * no ordering guarantee between (fncpd, fncp) and (tncpd and tncp).
1142 cache_lock4_tondlocked(struct nchandle *fncpd, struct nchandle *fncp,
1143 struct nchandle *tncpd, struct nchandle *tncp,
1144 struct ucred *fcred, struct ucred *tcred)
1149 * Lock tncp and tncpd
1151 * NOTE: Because these ncps are not locked to begin with, it is
1152 * possible for other rename races to cause the normal lock
1153 * order assumptions to fail.
1155 * NOTE: Lock ordering assumptions are valid if a leaf's parent
1156 * matches after the leaf has been locked. However, ordering
1157 * between the 'from' and the 'to' is not and an overlapping
1158 * lock order reversal is still possible.
1161 if (__predict_false(tlocked == 0)) {
1164 if (__predict_false(cache_lock_nonblock(tncpd) != 0)) {
1166 cache_lock(tncpd); /* cycle tncpd lock */
1167 cache_unlock(tncpd);
1173 * Lock fncp and fncpd
1175 * NOTE: Because these ncps are not locked to begin with, it is
1176 * possible for other rename races to cause the normal lock
1177 * order assumptions to fail.
1179 * NOTE: Lock ordering assumptions are valid if a leaf's parent
1180 * matches after the leaf has been locked. However, ordering
1181 * between the 'from' and the 'to' is not and an overlapping
1182 * lock order reversal is still possible.
1184 if (__predict_false(cache_lock_nonblock(fncp) != 0)) {
1185 cache_unlock(tncpd);
1187 cache_lock(fncp); /* cycle fncp lock */
1193 if (__predict_false(cache_lock_nonblock(fncpd) != 0)) {
1195 cache_unlock(tncpd);
1198 cache_unlock(fncpd); /* cycle fncpd lock */
1203 if (__predict_true((fncpd->ncp->nc_flag & NCF_DESTROYED) == 0))
1204 cache_resolve(fncpd, fcred);
1205 if (__predict_true((tncpd->ncp->nc_flag & NCF_DESTROYED) == 0))
1206 cache_resolve(tncpd, tcred);
1207 if (__predict_true((fncp->ncp->nc_flag & NCF_DESTROYED) == 0))
1208 cache_resolve(fncp, fcred);
1209 if (__predict_true((tncp->ncp->nc_flag & NCF_DESTROYED) == 0))
1210 cache_resolve(tncp, tcred);
1214 cache_lock_nonblock(struct nchandle *nch)
1216 return(_cache_lock_nonblock(nch->ncp));
1220 cache_unlock(struct nchandle *nch)
1222 _cache_unlock(nch->ncp);
1226 * ref-and-lock, unlock-and-deref functions.
1228 * This function is primarily used by nlookup. Even though cache_lock
1229 * holds the vnode, it is possible that the vnode may have already
1230 * initiated a recyclement.
1232 * We want cache_get() to return a definitively usable vnode or a
1233 * definitively unresolved ncp.
1237 _cache_get(struct namecache *ncp)
1241 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1242 _cache_setunresolved(ncp, 1);
1247 * Attempt to obtain a shared lock on the ncp. A shared lock will only
1248 * be obtained if the ncp is resolved and the vnode (if not ENOENT) is
1249 * valid. Otherwise an exclusive lock will be acquired instead.
1253 _cache_get_maybe_shared(struct namecache *ncp, int excl)
1255 if (ncp_shared_lock_disable || excl ||
1256 (ncp->nc_flag & NCF_UNRESOLVED))
1258 return(_cache_get(ncp));
1261 _cache_lock_shared(ncp);
1262 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1263 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) {
1265 ncp = _cache_get(ncp);
1270 ncp = _cache_get(ncp);
1277 * NOTE: The same nchandle can be passed for both arguments.
1280 cache_get(struct nchandle *nch, struct nchandle *target)
1282 KKASSERT(nch->ncp->nc_refs > 0);
1283 target->mount = nch->mount;
1284 target->ncp = _cache_get(nch->ncp);
1285 _cache_mntref(target->mount);
1289 cache_get_maybe_shared(struct nchandle *nch, struct nchandle *target, int excl)
1291 KKASSERT(nch->ncp->nc_refs > 0);
1292 target->mount = nch->mount;
1293 target->ncp = _cache_get_maybe_shared(nch->ncp, excl);
1294 _cache_mntref(target->mount);
1298 * Release a held and locked ncp
1302 _cache_put(struct namecache *ncp)
1309 cache_put(struct nchandle *nch)
1311 _cache_mntrel(nch->mount);
1312 _cache_put(nch->ncp);
1318 * Resolve an unresolved ncp by associating a vnode with it. If the
1319 * vnode is NULL, a negative cache entry is created.
1321 * The ncp should be locked on entry and will remain locked on return.
1325 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp,
1328 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid];
1330 KKASSERT((ncp->nc_flag & NCF_UNRESOLVED) &&
1331 (_cache_lockstatus(ncp) == LK_EXCLUSIVE) &&
1332 ncp->nc_vp == NULL);
1335 _cache_ncp_gen_enter(ncp);
1339 * Any vp associated with an ncp which has children must
1340 * be held. Any vp associated with a locked ncp must be held.
1342 if (!TAILQ_EMPTY(&ncp->nc_list))
1344 spin_lock(&vp->v_spin);
1346 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
1347 ++vp->v_namecache_count;
1348 _cache_hold(ncp); /* v_namecache assoc */
1349 spin_unlock(&vp->v_spin);
1350 vhold(vp); /* nc_vp */
1353 * Set auxiliary flags
1355 switch(vp->v_type) {
1357 ncp->nc_flag |= NCF_ISDIR;
1360 ncp->nc_flag |= NCF_ISSYMLINK;
1361 /* XXX cache the contents of the symlink */
1370 * XXX: this is a hack to work-around the lack of a real pfs vfs
1374 if (strncmp(mp->mnt_stat.f_fstypename, "null", 5) == 0)
1379 * When creating a negative cache hit we set the
1380 * namecache_gen. A later resolve will clean out the
1381 * negative cache hit if the mount point's namecache_gen
1382 * has changed. Used by devfs, could also be used by
1386 ncp->nc_negcpu = mycpu->gd_cpuid;
1387 spin_lock(&pn->neg_spin);
1388 TAILQ_INSERT_TAIL(&pn->neg_list, ncp, nc_vnode);
1389 _cache_hold(ncp); /* neg_list assoc */
1391 spin_unlock(&pn->neg_spin);
1392 atomic_add_long(&pn->vfscache_negs, 1);
1394 ncp->nc_error = ENOENT;
1396 VFS_NCPGEN_SET(mp, ncp);
1400 * Previously unresolved leaf is now resolved.
1402 * Clear the NCF_UNRESOLVED flag last (see cache_nlookup_nonlocked()).
1403 * We only adjust vfscache_unres for ncp's that are in the tree.
1405 if (TAILQ_EMPTY(&ncp->nc_list) && ncp->nc_parent)
1406 atomic_add_long(&pn->vfscache_unres, -1);
1407 ncp->nc_flag &= ~(NCF_UNRESOLVED | NCF_DEFEREDZAP);
1409 _cache_ncp_gen_exit(ncp);
1413 cache_setvp(struct nchandle *nch, struct vnode *vp)
1415 _cache_setvp(nch->mount, nch->ncp, vp, 1);
1422 cache_settimeout(struct nchandle *nch, int nticks)
1424 struct namecache *ncp = nch->ncp;
1426 if ((ncp->nc_timeout = ticks + nticks) == 0)
1427 ncp->nc_timeout = 1;
1431 * Disassociate the vnode or negative-cache association and mark a
1432 * namecache entry as unresolved again. Note that the ncp is still
1433 * left in the hash table and still linked to its parent.
1435 * The ncp should be locked and refd on entry and will remain locked and refd
1438 * This routine is normally never called on a directory containing children.
1439 * However, NFS often does just that in its rename() code as a cop-out to
1440 * avoid complex namespace operations. This disconnects a directory vnode
1441 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
1447 _cache_setunresolved(struct namecache *ncp, int adjgen)
1451 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1452 struct pcpu_ncache *pn;
1455 _cache_ncp_gen_enter(ncp);
1458 * Is a resolved or destroyed leaf now becoming unresolved?
1459 * Only adjust vfscache_unres for linked ncp's.
1461 if (TAILQ_EMPTY(&ncp->nc_list) && ncp->nc_parent) {
1462 pn = &pcpu_ncache[mycpu->gd_cpuid];
1463 atomic_add_long(&pn->vfscache_unres, 1);
1466 ncp->nc_flag |= NCF_UNRESOLVED;
1467 ncp->nc_timeout = 0;
1468 ncp->nc_error = ENOTCONN;
1469 if ((vp = ncp->nc_vp) != NULL) {
1470 spin_lock(&vp->v_spin);
1472 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
1473 --vp->v_namecache_count;
1474 spin_unlock(&vp->v_spin);
1477 * Any vp associated with an ncp with children is
1478 * held by that ncp. Any vp associated with ncp
1479 * is held by that ncp. These conditions must be
1480 * undone when the vp is cleared out from the ncp.
1482 if (!TAILQ_EMPTY(&ncp->nc_list))
1486 pn = &pcpu_ncache[ncp->nc_negcpu];
1488 atomic_add_long(&pn->vfscache_negs, -1);
1489 spin_lock(&pn->neg_spin);
1490 TAILQ_REMOVE(&pn->neg_list, ncp, nc_vnode);
1492 spin_unlock(&pn->neg_spin);
1494 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK);
1497 _cache_ncp_gen_exit(ncp);
1498 _cache_drop(ncp); /* from v_namecache or neg_list */
1503 * The cache_nresolve() code calls this function to automatically
1504 * set a resolved cache element to unresolved if it has timed out
1505 * or if it is a negative cache hit and the mount point namecache_gen
1509 _cache_auto_unresolve_test(struct mount *mp, struct namecache *ncp)
1512 * Try to zap entries that have timed out. We have
1513 * to be careful here because locked leafs may depend
1514 * on the vnode remaining intact in a parent, so only
1515 * do this under very specific conditions.
1517 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 &&
1518 TAILQ_EMPTY(&ncp->nc_list)) {
1523 * If a resolved negative cache hit is invalid due to
1524 * the mount's namecache generation being bumped, zap it.
1526 if (ncp->nc_vp == NULL && VFS_NCPGEN_TEST(mp, ncp)) {
1531 * Otherwise we are good
1536 static __inline void
1537 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp)
1540 * Already in an unresolved state, nothing to do.
1542 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1543 if (_cache_auto_unresolve_test(mp, ncp))
1544 _cache_setunresolved(ncp, 1);
1549 cache_setunresolved(struct nchandle *nch)
1551 _cache_setunresolved(nch->ncp, 1);
1555 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
1556 * looking for matches. This flag tells the lookup code when it must
1557 * check for a mount linkage and also prevents the directories in question
1558 * from being deleted or renamed.
1562 cache_clrmountpt_callback(struct mount *mp, void *data)
1564 struct nchandle *nch = data;
1566 if (mp->mnt_ncmounton.ncp == nch->ncp)
1568 if (mp->mnt_ncmountpt.ncp == nch->ncp)
1574 * Clear NCF_ISMOUNTPT on nch->ncp if it is no longer associated
1575 * with a mount point.
1578 cache_clrmountpt(struct nchandle *nch)
1582 count = mountlist_scan(cache_clrmountpt_callback, nch,
1583 MNTSCAN_FORWARD | MNTSCAN_NOBUSY |
1586 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT;
1590 * Invalidate portions of the namecache topology given a starting entry.
1591 * The passed ncp is set to an unresolved state and:
1593 * The passed ncp must be referenced and locked. The routine may unlock
1594 * and relock ncp several times, and will recheck the children and loop
1595 * to catch races. When done the passed ncp will be returned with the
1596 * reference and lock intact.
1598 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
1599 * that the physical underlying nodes have been
1600 * destroyed... as in deleted. For example, when
1601 * a directory is removed. This will cause record
1602 * lookups on the name to no longer be able to find
1603 * the record and tells the resolver to return failure
1604 * rather then trying to resolve through the parent.
1606 * The topology itself, including ncp->nc_name,
1609 * This only applies to the passed ncp, if CINV_CHILDREN
1610 * is specified the children are not flagged.
1612 * CINV_CHILDREN - Set all children (recursively) to an unresolved
1615 * Note that this will also have the side effect of
1616 * cleaning out any unreferenced nodes in the topology
1617 * from the leaves up as the recursion backs out.
1619 * Note that the topology for any referenced nodes remains intact, but
1620 * the nodes will be marked as having been destroyed and will be set
1621 * to an unresolved state.
1623 * It is possible for cache_inval() to race a cache_resolve(), meaning that
1624 * the namecache entry may not actually be invalidated on return if it was
1625 * revalidated while recursing down into its children. This code guarentees
1626 * that the node(s) will go through an invalidation cycle, but does not
1627 * guarentee that they will remain in an invalidated state.
1629 * Returns non-zero if a revalidation was detected during the invalidation
1630 * recursion, zero otherwise. Note that since only the original ncp is
1631 * locked the revalidation ultimately can only indicate that the original ncp
1632 * *MIGHT* no have been reresolved.
1634 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
1635 * have to avoid blowing out the kernel stack. We do this by saving the
1636 * deep namecache node and aborting the recursion, then re-recursing at that
1637 * node using a depth-first algorithm in order to allow multiple deep
1638 * recursions to chain through each other, then we restart the invalidation
1643 struct namecache *resume_ncp;
1647 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *);
1651 _cache_inval(struct namecache *ncp, int flags)
1653 struct cinvtrack track;
1654 struct namecache *ncp2;
1658 track.resume_ncp = NULL;
1661 r = _cache_inval_internal(ncp, flags, &track);
1662 if (track.resume_ncp == NULL)
1665 while ((ncp2 = track.resume_ncp) != NULL) {
1666 track.resume_ncp = NULL;
1668 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY,
1670 /*_cache_put(ncp2);*/
1679 cache_inval(struct nchandle *nch, int flags)
1681 return(_cache_inval(nch->ncp, flags));
1685 * Helper for _cache_inval(). The passed ncp is refd and locked and
1686 * remains that way on return, but may be unlocked/relocked multiple
1687 * times by the routine.
1690 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track)
1692 struct namecache *nextkid;
1695 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE);
1697 _cache_ncp_gen_enter(ncp);
1698 _cache_setunresolved(ncp, 0);
1699 if (flags & CINV_DESTROY) {
1700 ncp->nc_flag |= NCF_DESTROYED;
1704 while ((flags & CINV_CHILDREN) &&
1705 (nextkid = TAILQ_FIRST(&ncp->nc_list)) != NULL
1707 struct namecache *kid;
1711 _cache_hold(nextkid);
1712 if (++track->depth > MAX_RECURSION_DEPTH) {
1713 track->resume_ncp = ncp;
1717 while ((kid = nextkid) != NULL) {
1719 * Parent (ncp) must be locked for the iteration.
1722 if (kid->nc_parent != ncp) {
1724 kprintf("cache_inval_internal restartA %s\n",
1729 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
1730 _cache_hold(nextkid);
1733 * Parent unlocked for this section to avoid
1734 * deadlocks. Then lock the kid and check for
1738 if (track->resume_ncp) {
1744 if (kid->nc_parent != ncp) {
1745 kprintf("cache_inval_internal "
1754 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
1755 TAILQ_FIRST(&kid->nc_list)
1758 rcnt += _cache_inval_internal(kid,
1759 flags & ~CINV_DESTROY, track);
1760 /*_cache_unlock(kid);*/
1761 /*_cache_drop(kid);*/
1768 * Relock parent to continue scan
1773 _cache_drop(nextkid);
1780 * Someone could have gotten in there while ncp was unlocked,
1783 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1785 _cache_ncp_gen_exit(ncp);
1791 * Invalidate a vnode's namecache associations. To avoid races against
1792 * the resolver we do not invalidate a node which we previously invalidated
1793 * but which was then re-resolved while we were in the invalidation loop.
1795 * Returns non-zero if any namecache entries remain after the invalidation
1798 * NOTE: Unlike the namecache topology which guarentees that ncp's will not
1799 * be ripped out of the topology while held, the vnode's v_namecache
1800 * list has no such restriction. NCP's can be ripped out of the list
1801 * at virtually any time if not locked, even if held.
1803 * In addition, the v_namecache list itself must be locked via
1804 * the vnode's spinlock.
1807 cache_inval_vp(struct vnode *vp, int flags)
1809 struct namecache *ncp;
1810 struct namecache *next;
1813 spin_lock(&vp->v_spin);
1814 ncp = TAILQ_FIRST(&vp->v_namecache);
1818 /* loop entered with ncp held and vp spin-locked */
1819 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1821 spin_unlock(&vp->v_spin);
1823 if (ncp->nc_vp != vp) {
1824 kprintf("Warning: cache_inval_vp: race-A detected on "
1825 "%s\n", ncp->nc_name);
1831 _cache_inval(ncp, flags);
1832 _cache_put(ncp); /* also releases reference */
1834 spin_lock(&vp->v_spin);
1835 if (ncp && ncp->nc_vp != vp) {
1836 spin_unlock(&vp->v_spin);
1837 kprintf("Warning: cache_inval_vp: race-B detected on "
1838 "%s\n", ncp->nc_name);
1843 spin_unlock(&vp->v_spin);
1844 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1848 * This routine is used instead of the normal cache_inval_vp() when we
1849 * are trying to recycle otherwise good vnodes.
1851 * Return 0 on success, non-zero if not all namecache records could be
1852 * disassociated from the vnode (for various reasons).
1855 cache_inval_vp_nonblock(struct vnode *vp)
1857 struct namecache *ncp;
1858 struct namecache *next;
1860 spin_lock(&vp->v_spin);
1862 ncp = TAILQ_FIRST(&vp->v_namecache);
1867 /* loop entered with ncp held */
1868 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1870 spin_unlock(&vp->v_spin);
1871 if (_cache_lock_nonblock(ncp)) {
1877 if (ncp->nc_vp != vp) {
1878 kprintf("Warning: cache_inval_vp: race-A detected on "
1879 "%s\n", ncp->nc_name);
1885 _cache_inval(ncp, 0);
1886 _cache_put(ncp); /* also releases reference */
1888 spin_lock(&vp->v_spin);
1889 if (ncp && ncp->nc_vp != vp) {
1890 spin_unlock(&vp->v_spin);
1891 kprintf("Warning: cache_inval_vp: race-B detected on "
1892 "%s\n", ncp->nc_name);
1897 spin_unlock(&vp->v_spin);
1899 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1903 * Attempt to quickly invalidate the vnode's namecache entry. This function
1904 * will also dive the ncp and free its children but only if they are trivial.
1905 * All locks are non-blocking and the function will fail if required locks
1906 * cannot be obtained.
1908 * We want this sort of function to be able to guarantee progress when vnlru
1909 * wants to recycle a vnode. Directories could otherwise get stuck and not
1910 * be able to recycle due to destroyed or unresolved children in the
1914 cache_inval_vp_quick(struct vnode *vp)
1916 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid];
1917 struct namecache *ncp;
1918 struct namecache *kid;
1920 spin_lock(&vp->v_spin);
1921 while ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) {
1923 spin_unlock(&vp->v_spin);
1924 if (_cache_lock_nonblock(ncp)) {
1930 * Try to trivially destroy any children.
1932 while ((kid = TAILQ_FIRST(&ncp->nc_list)) != NULL) {
1933 struct nchash_head *nchpp;
1936 * Early test without the lock. Give-up if the
1937 * child has children of its own, the child is
1938 * positively-resolved, or the ref-count is
1941 if (TAILQ_FIRST(&kid->nc_list) ||
1943 kid->nc_refs != ncpbaserefs(kid))
1950 if (_cache_lock_nonblock(kid)) {
1957 * A destruction/free test requires the parent,
1958 * the kid, and the hash table to be locked. Note
1959 * that the kid may still be on the negative cache
1962 nchpp = kid->nc_head;
1963 spin_lock(&nchpp->spin);
1966 * Give up if the child isn't trivial. It can be
1967 * resolved or unresolved but must not have a vp.
1969 if (kid->nc_parent != ncp ||
1971 TAILQ_FIRST(&kid->nc_list) ||
1972 kid->nc_refs != 1 + ncpbaserefs(kid))
1974 spin_unlock(&nchpp->spin);
1980 ++pn->inv_kid_quick_count;
1983 * We can safely destroy the kid. It may still
1984 * have extra refs due to ncneglist races, but since
1985 * we checked above with the lock held those races
1986 * will self-resolve.
1988 * With these actions the kid should nominally
1989 * have just its natural ref plus our ref.
1991 * This is only safe because we hold locks on
1992 * the parent, the kid, and the nchpp. The only
1993 * lock we don't have is on the ncneglist and that
1994 * can race a ref, but as long as we unresolve the
1995 * kid before executing our final drop the ncneglist
1996 * code path(s) will just drop their own ref so all
1999 _cache_unlink_parent(ncp, kid, nchpp);
2000 _cache_setunresolved(kid, 1);
2001 if (kid->nc_refs != 2) {
2002 kprintf("Warning: kid %p unexpected refs=%d "
2005 kid->nc_flag, kid->nc_name);
2007 _cache_put(kid); /* drop our ref and lock */
2008 _cache_drop(kid); /* drop natural ref to destroy */
2012 * Now check ncp itself against our expectations. With
2013 * no children left we have our ref plus whether it is
2014 * resolved or not (which it has to be, actually, since it
2015 * is hanging off the vp->v_namecache).
2017 if (ncp->nc_refs != 1 + ncpbaserefs(ncp)) {
2019 spin_lock(&vp->v_spin);
2023 ++pn->inv_ncp_quick_count;
2026 * Success, disassociate and release the ncp. Do not
2027 * try to zap it here.
2029 * NOTE: Releasing the ncp here leaves it in the tree,
2030 * but since we have disassociated the vnode this
2031 * ncp entry becomes 'trivial' and successive calls
2032 * to cache_inval_vp_quick() will be able to continue
2035 _cache_setunresolved(ncp, 1);
2037 spin_lock(&vp->v_spin);
2039 spin_unlock(&vp->v_spin);
2043 * Clears the universal directory search 'ok' flag. This flag allows
2044 * nlookup() to bypass normal vnode checks. This flag is a cached flag
2045 * so clearing it simply forces revalidation.
2048 cache_inval_wxok(struct vnode *vp)
2050 struct namecache *ncp;
2052 spin_lock(&vp->v_spin);
2053 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
2054 if (ncp->nc_flag & (NCF_WXOK | NCF_NOTX))
2055 atomic_clear_short(&ncp->nc_flag, NCF_WXOK | NCF_NOTX);
2057 spin_unlock(&vp->v_spin);
2061 * The source ncp has been renamed to the target ncp. All elements have been
2062 * locked, including the parent ncp's.
2064 * The target ncp is destroyed (as a normal rename-over would destroy the
2065 * target file or directory).
2067 * Because there may be references to the source ncp we cannot copy its
2068 * contents to the target. Instead the source ncp is relinked as the target
2069 * and the target ncp is removed from the namecache topology.
2072 cache_rename(struct nchandle *fnch, struct nchandle *tnch)
2074 struct namecache *fncp = fnch->ncp;
2075 struct namecache *tncp = tnch->ncp;
2076 struct namecache *par;
2077 struct nchash_head *nchpp;
2082 if (tncp->nc_nlen) {
2083 nname = kmalloc(tncp->nc_nlen + 1, M_VFSCACHEAUX, M_WAITOK);
2084 bcopy(tncp->nc_name, nname, tncp->nc_nlen);
2085 nname[tncp->nc_nlen] = 0;
2091 * Rename fncp (unlink)
2093 if (fncp->nc_parent) {
2094 par = fncp->nc_parent;
2097 nchpp = fncp->nc_head;
2098 spin_lock(&nchpp->spin);
2099 _cache_unlink_parent(par, fncp, nchpp); /* eats nchpp */
2105 oname = fncp->nc_name;
2106 fncp->nc_name = nname;
2107 fncp->nc_nlen = tncp->nc_nlen;
2109 kfree(oname, M_VFSCACHEAUX);
2111 par = tncp->nc_parent;
2112 KKASSERT(par->nc_lock.lk_lockholder == curthread);
2115 * Rename fncp (relink)
2117 hash = fnv_32_buf(fncp->nc_name, fncp->nc_nlen, FNV1_32_INIT);
2118 hash = fnv_32_buf(&par, sizeof(par), hash);
2119 nchpp = NCHHASH(hash);
2121 spin_lock(&nchpp->spin);
2122 _cache_link_parent(fncp, par, nchpp);
2123 spin_unlock(&nchpp->spin);
2126 * Get rid of the overwritten tncp (unlink)
2128 _cache_unlink(tncp);
2132 * Perform actions consistent with unlinking a file. The passed-in ncp
2135 * The ncp is marked DESTROYED so it no longer shows up in searches,
2136 * and will be physically deleted when the vnode goes away.
2138 * If the related vnode has no refs then we cycle it through vget()/vput()
2139 * to (possibly if we don't have a ref race) trigger a deactivation,
2140 * allowing the VFS to trivially detect and recycle the deleted vnode
2141 * via VOP_INACTIVE().
2143 * NOTE: _cache_rename() will automatically call _cache_unlink() on the
2147 cache_unlink(struct nchandle *nch)
2149 _cache_unlink(nch->ncp);
2153 _cache_unlink(struct namecache *ncp)
2158 * Causes lookups to fail and allows another ncp with the same
2159 * name to be created under ncp->nc_parent.
2161 _cache_ncp_gen_enter(ncp);
2162 ncp->nc_flag |= NCF_DESTROYED;
2165 * Attempt to trigger a deactivation. Set VREF_FINALIZE to
2166 * force action on the 1->0 transition. Do not destroy the
2167 * vp association if a vp is present (leave the destroyed ncp
2168 * resolved through the vp finalization).
2170 * Cleanup the refs in the resolved-not-found case by setting
2171 * the ncp to an unresolved state. This improves our ability
2172 * to get rid of dead ncp elements in other cache_*() routines.
2174 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
2177 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE);
2178 if (VREFCNT(vp) <= 0) {
2179 if (vget(vp, LK_SHARED) == 0)
2183 _cache_setunresolved(ncp, 0);
2186 _cache_ncp_gen_exit(ncp);
2190 * Return non-zero if the nch might be associated with an open and/or mmap()'d
2191 * file. The easy solution is to just return non-zero if the vnode has refs.
2192 * Used to interlock hammer2 reclaims (VREF_FINALIZE should already be set to
2193 * force the reclaim).
2196 cache_isopen(struct nchandle *nch)
2199 struct namecache *ncp = nch->ncp;
2201 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
2202 (vp = ncp->nc_vp) != NULL &&
2211 * vget the vnode associated with the namecache entry. Resolve the namecache
2212 * entry if necessary. The passed ncp must be referenced and locked. If
2213 * the ncp is resolved it might be locked shared.
2215 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
2216 * (depending on the passed lk_type) will be returned in *vpp with an error
2217 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
2218 * most typical error is ENOENT, meaning that the ncp represents a negative
2219 * cache hit and there is no vnode to retrieve, but other errors can occur
2222 * The vget() can race a reclaim. If this occurs we re-resolve the
2225 * There are numerous places in the kernel where vget() is called on a
2226 * vnode while one or more of its namecache entries is locked. Releasing
2227 * a vnode never deadlocks against locked namecache entries (the vnode
2228 * will not get recycled while referenced ncp's exist). This means we
2229 * can safely acquire the vnode. In fact, we MUST NOT release the ncp
2230 * lock when acquiring the vp lock or we might cause a deadlock.
2232 * NOTE: The passed-in ncp must be locked exclusively if it is initially
2233 * unresolved. If a reclaim race occurs the passed-in ncp will be
2234 * relocked exclusively before being re-resolved.
2237 cache_vget(struct nchandle *nch, struct ucred *cred,
2238 int lk_type, struct vnode **vpp)
2240 struct namecache *ncp;
2247 if (ncp->nc_flag & NCF_UNRESOLVED)
2248 error = cache_resolve(nch, cred);
2252 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
2253 error = vget(vp, lk_type);
2258 * The ncp may have been locked shared, we must relock
2259 * it exclusively before we can set it to unresolved.
2261 if (error == ENOENT) {
2262 kprintf("Warning: vnode reclaim race detected "
2263 "in cache_vget on %p (%s)\n",
2267 _cache_setunresolved(ncp, 1);
2272 * Not a reclaim race, some other error.
2274 KKASSERT(ncp->nc_vp == vp);
2277 KKASSERT(ncp->nc_vp == vp);
2278 KKASSERT((vp->v_flag & VRECLAIMED) == 0);
2281 if (error == 0 && vp == NULL)
2288 * Similar to cache_vget() but only acquires a ref on the vnode. The vnode
2289 * is already held by virtuue of the ncp being locked, but it might not be
2290 * referenced and while it is not referenced it can transition into the
2293 * NOTE: The passed-in ncp must be locked exclusively if it is initially
2294 * unresolved. If a reclaim race occurs the passed-in ncp will be
2295 * relocked exclusively before being re-resolved.
2297 * NOTE: At the moment we have to issue a vget() on the vnode, even though
2298 * we are going to immediately release the lock, in order to resolve
2299 * potential reclamation races. Once we have a solid vnode ref that
2300 * was (at some point) interlocked via a vget(), the vnode will not
2303 * NOTE: vhold counts (v_auxrefs) do not prevent reclamation.
2306 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp)
2308 struct namecache *ncp;
2316 if (ncp->nc_flag & NCF_UNRESOLVED)
2317 error = cache_resolve(nch, cred);
2321 while (error == 0 && (vp = ncp->nc_vp) != NULL) {
2323 * Try a lockless ref of the vnode. VRECLAIMED transitions
2324 * use the vx_lock state and update-counter mechanism so we
2325 * can detect if one is in-progress or occurred.
2327 * If we can successfully ref the vnode and interlock against
2328 * the update-counter mechanism, and VRECLAIMED is found to
2329 * not be set after that, we should be good.
2331 v = spin_access_start_only(&vp->v_spin);
2332 if (__predict_true(spin_access_check_inprog(v) == 0)) {
2334 if (__predict_false(
2335 spin_access_end_only(&vp->v_spin, v))) {
2339 if (__predict_true((vp->v_flag & VRECLAIMED) == 0)) {
2343 kprintf("CACHE_VREF: IN-RECLAIM\n");
2347 * Do it the slow way
2349 error = vget(vp, LK_SHARED);
2354 if (error == ENOENT) {
2355 kprintf("Warning: vnode reclaim race detected "
2356 "in cache_vget on %p (%s)\n",
2360 _cache_setunresolved(ncp, 1);
2365 * Not a reclaim race, some other error.
2367 KKASSERT(ncp->nc_vp == vp);
2370 KKASSERT(ncp->nc_vp == vp);
2371 KKASSERT((vp->v_flag & VRECLAIMED) == 0);
2372 /* caller does not want a lock */
2377 if (error == 0 && vp == NULL)
2385 * Return a referenced vnode representing the parent directory of
2388 * Because the caller has locked the ncp it should not be possible for
2389 * the parent ncp to go away. However, the parent can unresolve its
2390 * dvp at any time so we must be able to acquire a lock on the parent
2391 * to safely access nc_vp.
2393 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock,
2394 * so use vhold()/vdrop() while holding the lock to prevent dvp from
2395 * getting destroyed.
2397 * NOTE: vhold() is allowed when dvp has 0 refs if we hold a
2398 * lock on the ncp in question..
2401 cache_dvpref(struct namecache *ncp)
2403 struct namecache *par;
2407 if ((par = ncp->nc_parent) != NULL) {
2410 if ((par->nc_flag & NCF_UNRESOLVED) == 0) {
2411 if ((dvp = par->nc_vp) != NULL)
2416 if (vget(dvp, LK_SHARED) == 0) {
2419 /* return refd, unlocked dvp */
2431 * Convert a directory vnode to a namecache record without any other
2432 * knowledge of the topology. This ONLY works with directory vnodes and
2433 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
2434 * returned ncp (if not NULL) will be held and unlocked.
2436 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
2437 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
2438 * for dvp. This will fail only if the directory has been deleted out from
2441 * Callers must always check for a NULL return no matter the value of 'makeit'.
2443 * To avoid underflowing the kernel stack each recursive call increments
2444 * the makeit variable.
2447 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
2448 struct vnode *dvp, char *fakename);
2449 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
2450 struct vnode **saved_dvp);
2453 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit,
2454 struct nchandle *nch)
2456 struct vnode *saved_dvp;
2462 nch->mount = dvp->v_mount;
2467 * Handle the makeit == 0 degenerate case
2470 spin_lock_shared(&dvp->v_spin);
2471 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
2474 spin_unlock_shared(&dvp->v_spin);
2478 * Loop until resolution, inside code will break out on error.
2482 * Break out if we successfully acquire a working ncp.
2484 spin_lock_shared(&dvp->v_spin);
2485 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
2488 spin_unlock_shared(&dvp->v_spin);
2491 spin_unlock_shared(&dvp->v_spin);
2494 * If dvp is the root of its filesystem it should already
2495 * have a namecache pointer associated with it as a side
2496 * effect of the mount, but it may have been disassociated.
2498 if (dvp->v_flag & VROOT) {
2499 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp);
2500 error = cache_resolve_mp(nch->mount, 1);
2501 _cache_put(nch->ncp);
2502 if (ncvp_debug & 1) {
2503 kprintf("cache_fromdvp: resolve root of "
2504 "mount %p error %d",
2505 dvp->v_mount, error);
2509 kprintf(" failed\n");
2514 kprintf(" succeeded\n");
2519 * If we are recursed too deeply resort to an O(n^2)
2520 * algorithm to resolve the namecache topology. The
2521 * resolved pvp is left referenced in saved_dvp to
2522 * prevent the tree from being destroyed while we loop.
2525 error = cache_fromdvp_try(dvp, cred, &saved_dvp);
2527 kprintf("lookupdotdot(longpath) failed %d "
2528 "dvp %p\n", error, dvp);
2536 * Get the parent directory and resolve its ncp.
2539 kfree(fakename, M_TEMP);
2542 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
2545 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp);
2551 * Reuse makeit as a recursion depth counter. On success
2552 * nch will be fully referenced.
2554 cache_fromdvp(pvp, cred, makeit + 1, nch);
2556 if (nch->ncp == NULL)
2560 * Do an inefficient scan of pvp (embodied by ncp) to look
2561 * for dvp. This will create a namecache record for dvp on
2562 * success. We loop up to recheck on success.
2564 * ncp and dvp are both held but not locked.
2566 error = cache_inefficient_scan(nch, cred, dvp, fakename);
2568 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
2569 pvp, nch->ncp->nc_name, dvp);
2571 /* nch was NULLed out, reload mount */
2572 nch->mount = dvp->v_mount;
2575 if (ncvp_debug & 1) {
2576 kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
2577 pvp, nch->ncp->nc_name);
2580 /* nch was NULLed out, reload mount */
2581 nch->mount = dvp->v_mount;
2585 * If nch->ncp is non-NULL it will have been held already.
2588 kfree(fakename, M_TEMP);
2597 * Go up the chain of parent directories until we find something
2598 * we can resolve into the namecache. This is very inefficient.
2602 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
2603 struct vnode **saved_dvp)
2605 struct nchandle nch;
2608 static time_t last_fromdvp_report;
2612 * Loop getting the parent directory vnode until we get something we
2613 * can resolve in the namecache.
2616 nch.mount = dvp->v_mount;
2622 kfree(fakename, M_TEMP);
2625 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
2632 spin_lock_shared(&pvp->v_spin);
2633 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
2634 _cache_hold(nch.ncp);
2635 spin_unlock_shared(&pvp->v_spin);
2639 spin_unlock_shared(&pvp->v_spin);
2640 if (pvp->v_flag & VROOT) {
2641 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp);
2642 error = cache_resolve_mp(nch.mount, 1);
2643 _cache_unlock(nch.ncp);
2646 _cache_drop(nch.ncp);
2656 if (last_fromdvp_report != time_uptime) {
2657 last_fromdvp_report = time_uptime;
2658 kprintf("Warning: extremely inefficient path "
2659 "resolution on %s\n",
2662 error = cache_inefficient_scan(&nch, cred, dvp, fakename);
2665 * Hopefully dvp now has a namecache record associated with
2666 * it. Leave it referenced to prevent the kernel from
2667 * recycling the vnode. Otherwise extremely long directory
2668 * paths could result in endless recycling.
2673 _cache_drop(nch.ncp);
2676 kfree(fakename, M_TEMP);
2681 * Do an inefficient scan of the directory represented by ncp looking for
2682 * the directory vnode dvp. ncp must be held but not locked on entry and
2683 * will be held on return. dvp must be refd but not locked on entry and
2684 * will remain refd on return.
2686 * Why do this at all? Well, due to its stateless nature the NFS server
2687 * converts file handles directly to vnodes without necessarily going through
2688 * the namecache ops that would otherwise create the namecache topology
2689 * leading to the vnode. We could either (1) Change the namecache algorithms
2690 * to allow disconnect namecache records that are re-merged opportunistically,
2691 * or (2) Make the NFS server backtrack and scan to recover a connected
2692 * namecache topology in order to then be able to issue new API lookups.
2694 * It turns out that (1) is a huge mess. It takes a nice clean set of
2695 * namecache algorithms and introduces a lot of complication in every subsystem
2696 * that calls into the namecache to deal with the re-merge case, especially
2697 * since we are using the namecache to placehold negative lookups and the
2698 * vnode might not be immediately assigned. (2) is certainly far less
2699 * efficient then (1), but since we are only talking about directories here
2700 * (which are likely to remain cached), the case does not actually run all
2701 * that often and has the supreme advantage of not polluting the namecache
2704 * If a fakename is supplied just construct a namecache entry using the
2708 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
2709 struct vnode *dvp, char *fakename)
2711 struct nlcomponent nlc;
2712 struct nchandle rncp;
2724 vat.va_blocksize = 0;
2725 if ((error = VOP_GETATTR(dvp, &vat)) != 0)
2728 error = cache_vref(nch, cred, &pvp);
2732 if (ncvp_debug & 1) {
2733 kprintf("inefficient_scan of (%p,%s): directory iosize %ld "
2734 "vattr fileid = %lld\n",
2735 nch->ncp, nch->ncp->nc_name,
2737 (long long)vat.va_fileid);
2741 * Use the supplied fakename if not NULL. Fake names are typically
2742 * not in the actual filesystem hierarchy. This is used by HAMMER
2743 * to glue @@timestamp recursions together.
2746 nlc.nlc_nameptr = fakename;
2747 nlc.nlc_namelen = strlen(fakename);
2748 rncp = cache_nlookup(nch, &nlc);
2752 if ((blksize = vat.va_blocksize) == 0)
2753 blksize = DEV_BSIZE;
2754 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK);
2760 iov.iov_base = rbuf;
2761 iov.iov_len = blksize;
2764 uio.uio_resid = blksize;
2765 uio.uio_segflg = UIO_SYSSPACE;
2766 uio.uio_rw = UIO_READ;
2767 uio.uio_td = curthread;
2770 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
2771 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
2773 den = (struct dirent *)rbuf;
2774 bytes = blksize - uio.uio_resid;
2777 if (ncvp_debug & 2) {
2778 kprintf("cache_inefficient_scan: %*.*s\n",
2779 den->d_namlen, den->d_namlen,
2782 if (den->d_type != DT_WHT &&
2783 den->d_ino == vat.va_fileid) {
2784 if (ncvp_debug & 1) {
2785 kprintf("cache_inefficient_scan: "
2786 "MATCHED inode %lld path %s/%*.*s\n",
2787 (long long)vat.va_fileid,
2789 den->d_namlen, den->d_namlen,
2792 nlc.nlc_nameptr = den->d_name;
2793 nlc.nlc_namelen = den->d_namlen;
2794 rncp = cache_nlookup(nch, &nlc);
2795 KKASSERT(rncp.ncp != NULL);
2798 bytes -= _DIRENT_DIRSIZ(den);
2799 den = _DIRENT_NEXT(den);
2801 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
2804 kfree(rbuf, M_TEMP);
2808 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) {
2809 _cache_setvp(rncp.mount, rncp.ncp, dvp, 1);
2810 if (ncvp_debug & 2) {
2811 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
2812 nch->ncp->nc_name, rncp.ncp->nc_name, dvp);
2815 if (ncvp_debug & 2) {
2816 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
2817 nch->ncp->nc_name, rncp.ncp->nc_name, dvp,
2821 if (rncp.ncp->nc_vp == NULL)
2822 error = rncp.ncp->nc_error;
2824 * Release rncp after a successful nlookup. rncp was fully
2829 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
2830 dvp, nch->ncp->nc_name);
2837 * This function must be called with the ncp held and locked and will unlock
2838 * and drop it during zapping.
2840 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
2841 * state, which disassociates it from its vnode or pcpu_ncache[n].neg_list
2842 * and removes the related reference. If the ncp can be removed, and the
2843 * parent can be zapped non-blocking, this function loops up.
2845 * There will be one ref from the caller (which we now own). The only
2846 * remaining autonomous refs to the ncp will then be due to nc_parent->nc_list,
2847 * so possibly 2 refs left. Taking this into account, if there are no
2848 * additional refs and no children, the ncp will be removed from the topology
2851 * References and/or children may exist if the ncp is in the middle of the
2852 * topology, preventing the ncp from being destroyed.
2854 * If nonblock is non-zero and the parent ncp cannot be locked we give up.
2856 * This function may return a held (but NOT locked) parent node which the
2857 * caller must drop in a loop. Looping is one way to avoid unbounded recursion
2858 * due to deep namecache trees.
2860 * WARNING! For MPSAFE operation this routine must acquire up to three
2861 * spin locks to be able to safely test nc_refs. Lock order is
2864 * hash spinlock if on hash list
2865 * parent spinlock if child of parent
2866 * (the ncp is unresolved so there is no vnode association)
2869 cache_zap(struct namecache *ncp)
2871 struct namecache *par;
2872 struct nchash_head *nchpp;
2874 int nonblock = 1; /* XXX cleanup */
2879 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
2880 * This gets rid of any vp->v_namecache list or negative list and
2883 _cache_setunresolved(ncp, 1);
2886 * Try to scrap the entry and possibly tail-recurse on its parent.
2887 * We only scrap unref'd (other then our ref) unresolved entries,
2888 * we do not scrap 'live' entries.
2890 * If nc_parent is non NULL we expect 2 references, else just 1.
2891 * If there are more, someone else also holds the ncp and we cannot
2894 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
2895 KKASSERT(ncp->nc_refs > 0);
2898 * If the ncp is linked to its parent it will also be in the hash
2899 * table. We have to be able to lock the parent and the hash table.
2901 * Acquire locks. Note that the parent can't go away while we hold
2902 * a child locked. If nc_parent is present, expect 2 refs instead
2906 if ((par = ncp->nc_parent) != NULL) {
2908 if (_cache_lock_nonblock(par)) {
2910 ncp->nc_flag |= NCF_DEFEREDZAP;
2912 &pcpu_ncache[mycpu->gd_cpuid].numdefered,
2915 _cache_drop(ncp); /* caller's ref */
2923 nchpp = ncp->nc_head;
2924 spin_lock(&nchpp->spin);
2928 * With the parent and nchpp locked, and the vnode removed
2929 * (no vp->v_namecache), we expect 1 or 2 refs. If there are
2930 * more someone else has a ref and we cannot zap the entry.
2933 * one for our parent link (parent also has one from the linkage)
2941 * On failure undo the work we've done so far and drop the
2942 * caller's ref and ncp.
2944 if (ncp->nc_refs != refcmp || TAILQ_FIRST(&ncp->nc_list)) {
2946 spin_unlock(&nchpp->spin);
2955 * We own all the refs and with the spinlocks held no further
2956 * refs can be acquired by others.
2958 * Remove us from the hash list and parent list. We have to
2959 * drop a ref on the parent's vp if the parent's list becomes
2963 KKASSERT(nchpp == ncp->nc_head);
2964 _cache_unlink_parent(par, ncp, nchpp); /* eats nhcpp */
2965 /*_cache_unlock(par);*/
2966 /* &nchpp->spin is unlocked by call */
2968 KKASSERT(ncp->nc_head == NULL);
2972 * ncp should not have picked up any refs. Physically
2975 if (ncp->nc_refs != refcmp) {
2976 panic("cache_zap: %p bad refs %d (expected %d)\n",
2977 ncp, ncp->nc_refs, refcmp);
2979 /* _cache_unlock(ncp) not required */
2980 ncp->nc_refs = -1; /* safety */
2982 kfree(ncp->nc_name, M_VFSCACHEAUX);
2983 kfree_obj(ncp, M_VFSCACHE);
2987 * Loop up if we can recursively clean out the parent.
2990 refcmp = 1; /* ref on parent */
2991 if (par->nc_parent) /* par->par */
2993 par->nc_flag &= ~NCF_DEFEREDZAP;
2994 if ((par->nc_flag & NCF_UNRESOLVED) &&
2995 par->nc_refs == refcmp &&
2996 TAILQ_EMPTY(&par->nc_list))
3008 * Clean up dangling negative cache and defered-drop entries in the
3011 * This routine is called in the critical path and also called from
3012 * vnlru(). When called from vnlru we use a lower limit to try to
3013 * deal with the negative cache before the critical path has to start
3016 typedef enum { CHI_LOW, CHI_HIGH } cache_hs_t;
3018 static cache_hs_t neg_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW };
3019 static cache_hs_t pos_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW };
3020 static cache_hs_t exc_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW };
3023 cache_hysteresis(int critpath)
3035 * Calculate negative ncp limit
3037 neglimit = maxvnodes / ncnegfactor;
3039 neglimit = neglimit * 8 / 10;
3042 * Don't cache too many negative hits. We use hysteresis to reduce
3043 * the impact on the critical path.
3047 switch(neg_cache_hysteresis_state[critpath]) {
3049 if (vfscache_negs > MINNEG && vfscache_negs > neglimit) {
3051 clean_neg = ncnegflush;
3053 clean_neg = ncnegflush +
3054 vfscache_negs - neglimit;
3055 neg_cache_hysteresis_state[critpath] = CHI_HIGH;
3059 if (vfscache_negs > MINNEG * 9 / 10 &&
3060 vfscache_negs * 9 / 10 > neglimit
3063 clean_neg = ncnegflush;
3065 clean_neg = ncnegflush +
3066 vfscache_negs * 9 / 10 -
3069 neg_cache_hysteresis_state[critpath] = CHI_LOW;
3074 _cache_cleanneg(clean_neg);
3077 * Don't cache too many unresolved elements. We use hysteresis to
3078 * reduce the impact on the critical path.
3080 if ((poslimit = ncposlimit) == 0)
3081 poslimit = maxvnodes / ncposfactor;
3083 poslimit = poslimit * 8 / 10;
3086 * Number of unresolved leaf elements in the namecache. These
3087 * can build-up for various reasons and may have to be disposed
3088 * of to allow the inactive list to be cleaned out by vnlru_proc()
3092 xnumunres = vfscache_unres;
3095 switch(pos_cache_hysteresis_state[critpath]) {
3097 if (xnumunres > poslimit && xnumunres > MINPOS) {
3099 clean_unres = ncposflush;
3101 clean_unres = ncposflush + xnumunres -
3103 pos_cache_hysteresis_state[critpath] = CHI_HIGH;
3107 if (xnumunres > poslimit * 5 / 6 && xnumunres > MINPOS) {
3109 clean_unres = ncposflush;
3111 clean_unres = ncposflush + xnumunres -
3114 pos_cache_hysteresis_state[critpath] = CHI_LOW;
3120 * Excessive positive hits can accumulate due to large numbers of
3121 * hardlinks (the vnode cache will not prevent ncps representing
3122 * hardlinks from growing into infinity).
3124 exclimit = maxvnodes * 2;
3126 exclimit = exclimit * 8 / 10;
3127 xnumleafs = vfscache_leafs;
3130 switch(exc_cache_hysteresis_state[critpath]) {
3132 if (xnumleafs > exclimit && xnumleafs > MINPOS) {
3134 clean_excess = ncposflush;
3136 clean_excess = ncposflush + xnumleafs -
3138 exc_cache_hysteresis_state[critpath] = CHI_HIGH;
3142 if (xnumleafs > exclimit * 5 / 6 && xnumleafs > MINPOS) {
3144 clean_excess = ncposflush;
3146 clean_excess = ncposflush + xnumleafs -
3149 exc_cache_hysteresis_state[critpath] = CHI_LOW;
3154 if (clean_unres || clean_excess)
3155 _cache_cleanpos(clean_unres, clean_excess);
3158 * Clean out dangling defered-zap ncps which could not be cleanly
3159 * dropped if too many build up. Note that numdefered is
3160 * heuristical. Make sure we are real-time for the current cpu,
3161 * plus the global rollup.
3163 if (pcpu_ncache[mycpu->gd_cpuid].numdefered + numdefered > neglimit) {
3164 _cache_cleandefered();
3169 * NEW NAMECACHE LOOKUP API
3171 * Lookup an entry in the namecache. The passed par_nch must be referenced
3172 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp
3173 * is ALWAYS returned, eve if the supplied component is illegal.
3175 * The resulting namecache entry should be returned to the system with
3176 * cache_put() or cache_unlock() + cache_drop().
3178 * namecache locks are recursive but care must be taken to avoid lock order
3179 * reversals (hence why the passed par_nch must be unlocked). Locking
3180 * rules are to order for parent traversals, not for child traversals.
3182 * Nobody else will be able to manipulate the associated namespace (e.g.
3183 * create, delete, rename, rename-target) until the caller unlocks the
3186 * The returned entry will be in one of three states: positive hit (non-null
3187 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
3188 * Unresolved entries must be resolved through the filesystem to associate the
3189 * vnode and/or determine whether a positive or negative hit has occured.
3191 * It is not necessary to lock a directory in order to lock namespace under
3192 * that directory. In fact, it is explicitly not allowed to do that. A
3193 * directory is typically only locked when being created, renamed, or
3196 * The directory (par) may be unresolved, in which case any returned child
3197 * will likely also be marked unresolved. Likely but not guarenteed. Since
3198 * the filesystem lookup requires a resolved directory vnode the caller is
3199 * responsible for resolving the namecache chain top-down. This API
3200 * specifically allows whole chains to be created in an unresolved state.
3203 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc)
3205 struct nchandle nch;
3206 struct namecache *ncp;
3207 struct namecache *new_ncp;
3208 struct namecache *rep_ncp; /* reuse a destroyed ncp */
3209 struct nchash_head *nchpp;
3217 mp = par_nch->mount;
3221 * This is a good time to call it, no ncp's are locked by
3224 cache_hysteresis(1);
3227 * Try to locate an existing entry
3229 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
3230 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
3233 nchpp = NCHHASH(hash);
3237 spin_lock(&nchpp->spin);
3239 spin_lock_shared(&nchpp->spin);
3242 * Do a reverse scan to collect any DESTROYED ncps prior to matching
3243 * an existing entry.
3245 TAILQ_FOREACH_REVERSE(ncp, &nchpp->list, nchash_list, nc_hash) {
3247 * Break out if we find a matching entry. Note that
3248 * UNRESOLVED entries may match, but DESTROYED entries
3251 * We may be able to reuse DESTROYED entries that we come
3252 * across, even if the name does not match, as long as
3253 * nc_nlen is correct and the only hold ref is from the nchpp
3256 if (ncp->nc_parent == par_nch->ncp &&
3257 ncp->nc_nlen == nlc->nlc_namelen) {
3258 if (ncp->nc_flag & NCF_DESTROYED) {
3259 if (ncp->nc_refs == 1 && rep_ncp == NULL)
3263 if (bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen))
3271 _cache_hold(rep_ncp);
3274 spin_unlock(&nchpp->spin);
3276 spin_unlock_shared(&nchpp->spin);
3279 _cache_unlock(par_nch->ncp);
3284 * Really try to destroy rep_ncp if encountered.
3285 * Various edge cases can build up more than one,
3286 * so loop if we succeed. This isn't perfect, but
3287 * we can't afford to have tons of entries build
3288 * up on a single nhcpp list due to rename-over
3289 * operations. If that were to happen, the system
3290 * would bog down quickly.
3293 if (_cache_lock_nonblock(rep_ncp) == 0) {
3294 if (rep_ncp->nc_flag & NCF_DESTROYED) {
3295 if (cache_zap(rep_ncp)) {
3300 _cache_unlock(rep_ncp);
3301 _cache_drop(rep_ncp);
3304 _cache_drop(rep_ncp);
3309 * Continue processing the matched entry
3311 if (_cache_lock_special(ncp) == 0) {
3313 * Successfully locked but we must re-test
3314 * conditions that might have changed since
3315 * we did not have the lock before.
3317 if (ncp->nc_parent != par_nch->ncp ||
3318 ncp->nc_nlen != nlc->nlc_namelen ||
3319 bcmp(ncp->nc_name, nlc->nlc_nameptr,
3321 (ncp->nc_flag & NCF_DESTROYED)) {
3325 _cache_auto_unresolve(mp, ncp);
3327 _cache_free(new_ncp);
3328 new_ncp = NULL; /* safety */
3332 _cache_get(ncp); /* cycle the lock to block */
3340 * We failed to locate the entry, try to resurrect a destroyed
3341 * entry that we did find that is already correctly linked into
3342 * nchpp and the parent. We must re-test conditions after
3343 * successfully locking rep_ncp.
3345 * This case can occur under heavy loads due to not being able
3346 * to safely lock the parent in cache_zap(). Nominally a repeated
3347 * create/unlink load, but only the namelen needs to match.
3349 * An exclusive lock on the nchpp is required to process this case,
3350 * otherwise a race can cause duplicate entries to be created with
3351 * one cpu reusing a DESTROYED ncp while another creates a new_ncp.
3353 if (rep_ncp && use_excl) {
3354 if (_cache_lock_nonblock(rep_ncp) == 0) {
3355 _cache_hold(rep_ncp);
3356 if (rep_ncp->nc_parent == par_nch->ncp &&
3357 rep_ncp->nc_nlen == nlc->nlc_namelen &&
3358 (rep_ncp->nc_flag & NCF_DESTROYED) &&
3359 rep_ncp->nc_refs == 2)
3366 _cache_ncp_gen_enter(ncp);
3368 bcopy(nlc->nlc_nameptr, ncp->nc_name,
3372 * This takes some care. We must clear the
3373 * NCF_DESTROYED flag before unlocking the
3374 * hash chain so other concurrent searches
3375 * do not skip this element.
3377 * We must also unlock the hash chain before
3378 * unresolving the ncp to avoid deadlocks.
3379 * We hold the lock on the ncp so we can safely
3380 * reinitialize nc_flag after that.
3382 ncp->nc_flag &= ~NCF_DESTROYED;
3383 spin_unlock(&nchpp->spin); /* use_excl */
3385 _cache_setunresolved(ncp, 0);
3386 ncp->nc_flag = NCF_UNRESOLVED;
3387 ncp->nc_error = ENOTCONN;
3389 _cache_ncp_gen_exit(ncp);
3392 _cache_unlock(par_nch->ncp);
3396 _cache_free(new_ncp);
3397 new_ncp = NULL; /* safety */
3401 _cache_put(rep_ncp);
3406 * Otherwise create a new entry and add it to the cache. The parent
3407 * ncp must also be locked so we can link into it.
3409 * We have to relookup after possibly blocking in kmalloc or
3410 * when locking par_nch.
3412 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
3413 * mount case, in which case nc_name will be NULL.
3415 * NOTE: In the rep_ncp != NULL case we are trying to reuse
3416 * a DESTROYED entry, but didn't have an exclusive lock.
3417 * In this situation we do not create a new_ncp.
3419 if (new_ncp == NULL) {
3421 spin_unlock(&nchpp->spin);
3423 spin_unlock_shared(&nchpp->spin);
3424 if (rep_ncp == NULL) {
3425 new_ncp = cache_alloc(nlc->nlc_namelen);
3426 if (nlc->nlc_namelen) {
3427 bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
3429 new_ncp->nc_name[nlc->nlc_namelen] = 0;
3437 * NOTE! The spinlock is held exclusively here because new_ncp
3440 if (par_locked == 0) {
3441 spin_unlock(&nchpp->spin);
3442 _cache_lock(par_nch->ncp);
3448 * Link to parent (requires another ref, the one already in new_ncp
3449 * is what we wil lreturn).
3451 * WARNING! We still hold the spinlock. We have to set the hash
3452 * table entry atomically.
3456 _cache_link_parent(ncp, par_nch->ncp, nchpp);
3457 spin_unlock(&nchpp->spin);
3458 _cache_unlock(par_nch->ncp);
3459 /* par_locked = 0 - not used */
3462 * stats and namecache size management
3464 if (ncp->nc_flag & NCF_UNRESOLVED)
3465 ++gd->gd_nchstats->ncs_miss;
3466 else if (ncp->nc_vp)
3467 ++gd->gd_nchstats->ncs_goodhits;
3469 ++gd->gd_nchstats->ncs_neghits;
3472 _cache_mntref(nch.mount);
3478 * Attempt to lookup a namecache entry and return with a shared namecache
3479 * lock. This operates non-blocking. EWOULDBLOCK is returned if excl is
3480 * set or we are unable to lock.
3483 cache_nlookup_maybe_shared(struct nchandle *par_nch,
3484 struct nlcomponent *nlc,
3485 int excl, struct nchandle *res_nch)
3487 struct namecache *ncp;
3488 struct nchash_head *nchpp;
3494 * If exclusive requested or shared namecache locks are disabled,
3497 if (ncp_shared_lock_disable || excl)
3498 return(EWOULDBLOCK);
3501 mp = par_nch->mount;
3504 * This is a good time to call it, no ncp's are locked by
3507 cache_hysteresis(1);
3510 * Try to locate an existing entry
3512 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
3513 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
3514 nchpp = NCHHASH(hash);
3516 spin_lock_shared(&nchpp->spin);
3518 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) {
3520 * Break out if we find a matching entry. Note that
3521 * UNRESOLVED entries may match, but DESTROYED entries
3524 if (ncp->nc_parent == par_nch->ncp &&
3525 ncp->nc_nlen == nlc->nlc_namelen &&
3526 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
3527 (ncp->nc_flag & NCF_DESTROYED) == 0
3530 spin_unlock_shared(&nchpp->spin);
3532 if (_cache_lock_shared_special(ncp) == 0) {
3533 if (ncp->nc_parent == par_nch->ncp &&
3534 ncp->nc_nlen == nlc->nlc_namelen &&
3535 bcmp(ncp->nc_name, nlc->nlc_nameptr,
3536 ncp->nc_nlen) == 0 &&
3537 (ncp->nc_flag & NCF_DESTROYED) == 0 &&
3538 (ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
3539 _cache_auto_unresolve_test(mp, ncp) == 0)
3546 return(EWOULDBLOCK);
3553 spin_unlock_shared(&nchpp->spin);
3554 return(EWOULDBLOCK);
3559 * Note that nc_error might be non-zero (e.g ENOENT).
3562 res_nch->mount = mp;
3564 ++gd->gd_nchstats->ncs_goodhits;
3565 _cache_mntref(res_nch->mount);
3567 KKASSERT(ncp->nc_error != EWOULDBLOCK);
3568 return(ncp->nc_error);
3572 * This is a non-blocking verison of cache_nlookup() used by
3573 * nfs_readdirplusrpc_uio(). It can fail for any reason and
3574 * will return nch.ncp == NULL in that case.
3577 cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc)
3579 struct nchandle nch;
3580 struct namecache *ncp;
3581 struct namecache *new_ncp;
3582 struct nchash_head *nchpp;
3589 mp = par_nch->mount;
3593 * Try to locate an existing entry
3595 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
3596 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
3598 nchpp = NCHHASH(hash);
3600 spin_lock(&nchpp->spin);
3601 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) {
3603 * Break out if we find a matching entry. Note that
3604 * UNRESOLVED entries may match, but DESTROYED entries
3607 if (ncp->nc_parent == par_nch->ncp &&
3608 ncp->nc_nlen == nlc->nlc_namelen &&
3609 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
3610 (ncp->nc_flag & NCF_DESTROYED) == 0
3613 spin_unlock(&nchpp->spin);
3615 _cache_unlock(par_nch->ncp);
3618 if (_cache_lock_special(ncp) == 0) {
3619 if (ncp->nc_parent != par_nch->ncp ||
3620 ncp->nc_nlen != nlc->nlc_namelen ||
3621 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) ||
3622 (ncp->nc_flag & NCF_DESTROYED)) {
3623 kprintf("cache_lookup_nonblock: "
3624 "ncp-race %p %*.*s\n",
3633 _cache_auto_unresolve(mp, ncp);
3635 _cache_free(new_ncp);
3646 * We failed to locate an entry, create a new entry and add it to
3647 * the cache. The parent ncp must also be locked so we
3650 * We have to relookup after possibly blocking in kmalloc or
3651 * when locking par_nch.
3653 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
3654 * mount case, in which case nc_name will be NULL.
3656 if (new_ncp == NULL) {
3657 spin_unlock(&nchpp->spin);
3658 new_ncp = cache_alloc(nlc->nlc_namelen);
3659 if (nlc->nlc_namelen) {
3660 bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
3662 new_ncp->nc_name[nlc->nlc_namelen] = 0;
3666 if (par_locked == 0) {
3667 spin_unlock(&nchpp->spin);
3668 if (_cache_lock_nonblock(par_nch->ncp) == 0) {
3676 * Link to parent (requires another ref, the one already in new_ncp
3677 * is what we wil lreturn).
3679 * WARNING! We still hold the spinlock. We have to set the hash
3680 * table entry atomically.
3684 _cache_link_parent(ncp, par_nch->ncp, nchpp);
3685 spin_unlock(&nchpp->spin);
3686 _cache_unlock(par_nch->ncp);
3687 /* par_locked = 0 - not used */
3690 * stats and namecache size management
3692 if (ncp->nc_flag & NCF_UNRESOLVED)
3693 ++gd->gd_nchstats->ncs_miss;
3694 else if (ncp->nc_vp)
3695 ++gd->gd_nchstats->ncs_goodhits;
3697 ++gd->gd_nchstats->ncs_neghits;
3700 _cache_mntref(nch.mount);
3705 _cache_free(new_ncp);
3714 * This is a non-locking optimized lookup that depends on adding a ref
3715 * to prevent normal eviction. nch.ncp can be returned as NULL for any
3716 * reason and the caller will retry with normal locking in that case.
3718 * This function only returns resolved entries so callers do not accidentally
3719 * race doing out of order / unfenced field checks.
3721 * The caller must validate the result for parent-to-child continuity.
3724 cache_nlookup_nonlocked(struct nchandle *par_nch, struct nlcomponent *nlc)
3726 struct nchandle nch;
3727 struct namecache *ncp;
3728 struct nchash_head *nchpp;
3734 mp = par_nch->mount;
3737 * Try to locate an existing entry
3739 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
3740 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
3741 nchpp = NCHHASH(hash);
3743 spin_lock_shared(&nchpp->spin);
3744 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) {
3746 * Break out if we find a matching entry. Note that
3747 * UNRESOLVED entries may match, but DESTROYED entries
3748 * do not. However, UNRESOLVED entries still return failure.
3750 if (ncp->nc_parent == par_nch->ncp &&
3751 ncp->nc_nlen == nlc->nlc_namelen &&
3752 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
3753 (ncp->nc_flag & NCF_DESTROYED) == 0
3756 * Test NFS timeout for auto-unresolve. Give up if
3757 * the entry is not resolved.
3759 * Getting the ref with the nchpp locked prevents
3760 * any transition to NCF_DESTROYED.
3762 if (_cache_auto_unresolve_test(par_nch->mount, ncp))
3764 if (ncp->nc_flag & NCF_UNRESOLVED)
3767 spin_unlock_shared(&nchpp->spin);
3770 * We need an additional test to ensure that the ref
3771 * we got above prevents transitions to NCF_UNRESOLVED.
3772 * This can occur if another thread is currently
3773 * holding the ncp exclusively locked or (if we raced
3774 * that and it unlocked before our test) the flag
3777 * XXX check if superceeded by nc_generation XXX
3779 if (_cache_lockstatus(ncp) < 0 ||
3780 (ncp->nc_flag & (NCF_DESTROYED | NCF_UNRESOLVED)))
3782 if ((ncvp_debug & 4) &&
3784 (NCF_DESTROYED | NCF_UNRESOLVED)))
3786 kprintf("ncp state change: %p %08x %d %s\n",
3787 ncp, ncp->nc_flag, ncp->nc_error,
3791 spin_lock_shared(&nchpp->spin);
3796 * Return the ncp bundled into a nch on success.
3797 * The ref should passively prevent the ncp from
3798 * becoming unresolved without having to hold a lock.
3799 * (XXX this may not be entirely true)
3804 spin_unlock_shared(&nchpp->spin);
3811 * stats and namecache size management
3813 if (ncp->nc_flag & NCF_UNRESOLVED)
3814 ++gd->gd_nchstats->ncs_miss;
3815 else if (ncp->nc_vp)
3816 ++gd->gd_nchstats->ncs_goodhits;
3818 ++gd->gd_nchstats->ncs_neghits;
3821 _cache_mntref(nch.mount);
3827 * The namecache entry is marked as being used as a mount point.
3828 * Locate the mount if it is visible to the caller. The DragonFly
3829 * mount system allows arbitrary loops in the topology and disentangles
3830 * those loops by matching against (mp, ncp) rather than just (ncp).
3831 * This means any given ncp can dive any number of mounts, depending
3832 * on the relative mount (e.g. nullfs) the caller is at in the topology.
3834 * We use a very simple frontend cache to reduce SMP conflicts,
3835 * which we have to do because the mountlist scan needs an exclusive
3836 * lock around its ripout info list. Not to mention that there might
3837 * be a lot of mounts.
3839 * Because all mounts can potentially be accessed by all cpus, break the cpu's
3840 * down a bit to allow some contention rather than making the cache
3843 * The hash table is split into per-cpu areas, is 4-way set-associative.
3845 struct findmount_info {
3846 struct mount *result;
3847 struct mount *nch_mount;
3848 struct namecache *nch_ncp;
3852 struct ncmount_cache *
3853 ncmount_cache_lookup4(struct mount *mp, struct namecache *ncp)
3857 hash = iscsi_crc32(&mp, sizeof(mp));
3858 hash = iscsi_crc32_ext(&ncp, sizeof(ncp), hash);
3860 hash = hash & ((NCMOUNT_NUMCACHE - 1) & ~(NCMOUNT_SET - 1));
3862 return (&ncmount_cache[hash]);
3866 struct ncmount_cache *
3867 ncmount_cache_lookup(struct mount *mp, struct namecache *ncp)
3869 struct ncmount_cache *ncc;
3870 struct ncmount_cache *best;
3875 ncc = ncmount_cache_lookup4(mp, ncp);
3878 * NOTE: When checking for a ticks overflow implement a slop of
3879 * 2 ticks just to be safe, because ticks is accessed
3880 * non-atomically one CPU can increment it while another
3881 * is still using the old value.
3883 if (ncc->ncp == ncp && ncc->mp == mp) /* 0 */
3885 delta = (int)(ticks - ncc->ticks); /* beware GCC opts */
3886 if (delta < -2) /* overflow reset */
3891 for (i = 1; i < NCMOUNT_SET; ++i) { /* 1, 2, 3 */
3893 if (ncc->ncp == ncp && ncc->mp == mp)
3895 delta = (int)(ticks - ncc->ticks);
3898 if (delta > best_delta) {
3907 * pcpu-optimized mount search. Locate the recursive mountpoint, avoid
3908 * doing an expensive mountlist_scan*() if possible.
3910 * (mp, ncp) -> mountonpt.k
3912 * Returns a referenced mount pointer or NULL
3914 * General SMP operation uses a per-cpu umount_spin to interlock unmount
3915 * operations (that is, where the mp_target can be freed out from under us).
3917 * Lookups use the ncc->updating counter to validate the contents in order
3918 * to avoid having to obtain the per cache-element spin-lock. In addition,
3919 * the ticks field is only updated when it changes. However, if our per-cpu
3920 * lock fails due to an unmount-in-progress, we fall-back to the
3921 * cache-element's spin-lock.
3924 cache_findmount(struct nchandle *nch)
3926 struct findmount_info info;
3927 struct ncmount_cache *ncc;
3928 struct ncmount_cache ncc_copy;
3929 struct mount *target;
3930 struct pcpu_ncache *pcpu;
3931 struct spinlock *spinlk;
3935 if (ncmount_cache_enable == 0 || pcpu == NULL) {
3939 pcpu += mycpu->gd_cpuid;
3942 ncc = ncmount_cache_lookup(nch->mount, nch->ncp);
3943 if (ncc->ncp == nch->ncp && ncc->mp == nch->mount) {
3946 * This is a bit messy for now because we do not yet have
3947 * safe disposal of mount structures. We have to ref
3948 * ncc->mp_target but the 'update' counter only tell us
3949 * whether the cache has changed after the fact.
3951 * For now get a per-cpu spinlock that will only contend
3952 * against umount's. This is the best path. If it fails,
3953 * instead of waiting on the umount we fall-back to a
3954 * shared ncc->spin lock, which will generally only cost a
3957 update = ncc->updating;
3958 if (__predict_true(spin_trylock(&pcpu->umount_spin))) {
3959 spinlk = &pcpu->umount_spin;
3961 spinlk = &ncc->spin;
3962 spin_lock_shared(spinlk);
3964 if (update & 1) { /* update in progress */
3965 spin_unlock_any(spinlk);
3970 if (ncc->updating != update) { /* content changed */
3971 spin_unlock_any(spinlk);
3974 if (ncc_copy.ncp != nch->ncp || ncc_copy.mp != nch->mount) {
3975 spin_unlock_any(spinlk);
3978 if (ncc_copy.isneg == 0) {
3979 target = ncc_copy.mp_target;
3980 if (target->mnt_ncmounton.mount == nch->mount &&
3981 target->mnt_ncmounton.ncp == nch->ncp) {
3983 * Cache hit (positive) (avoid dirtying
3984 * the cache line if possible)
3986 if (ncc->ticks != (int)ticks)
3987 ncc->ticks = (int)ticks;
3988 _cache_mntref(target);
3992 * Cache hit (negative) (avoid dirtying
3993 * the cache line if possible)
3995 if (ncc->ticks != (int)ticks)
3996 ncc->ticks = (int)ticks;
3999 spin_unlock_any(spinlk);
4009 info.nch_mount = nch->mount;
4010 info.nch_ncp = nch->ncp;
4011 mountlist_scan(cache_findmount_callback, &info,
4012 MNTSCAN_FORWARD | MNTSCAN_NOBUSY | MNTSCAN_NOUNLOCK);
4015 * To reduce multi-re-entry on the cache, relookup in the cache.
4016 * This can still race, obviously, but that's ok.
4018 ncc = ncmount_cache_lookup(nch->mount, nch->ncp);
4019 if (ncc->ncp == nch->ncp && ncc->mp == nch->mount) {
4021 atomic_add_int(&info.result->mnt_refs, -1);
4028 if ((info.result == NULL ||
4029 (info.result->mnt_kern_flag & MNTK_UNMOUNT) == 0)) {
4030 spin_lock(&ncc->spin);
4031 atomic_add_int_nonlocked(&ncc->updating, 1);
4033 KKASSERT(ncc->updating & 1);
4034 if (ncc->mp != nch->mount) {
4036 atomic_add_int(&ncc->mp->mnt_refs, -1);
4037 atomic_add_int(&nch->mount->mnt_refs, 1);
4038 ncc->mp = nch->mount;
4040 ncc->ncp = nch->ncp; /* ptr compares only, not refd*/
4041 ncc->ticks = (int)ticks;
4045 if (ncc->mp_target != info.result) {
4047 atomic_add_int(&ncc->mp_target->mnt_refs, -1);
4048 ncc->mp_target = info.result;
4049 atomic_add_int(&info.result->mnt_refs, 1);
4053 if (ncc->mp_target) {
4054 atomic_add_int(&ncc->mp_target->mnt_refs, -1);
4055 ncc->mp_target = NULL;
4059 atomic_add_int_nonlocked(&ncc->updating, 1);
4060 spin_unlock(&ncc->spin);
4062 return(info.result);
4067 cache_findmount_callback(struct mount *mp, void *data)
4069 struct findmount_info *info = data;
4072 * Check the mount's mounted-on point against the passed nch.
4074 if (mp->mnt_ncmounton.mount == info->nch_mount &&
4075 mp->mnt_ncmounton.ncp == info->nch_ncp
4085 cache_dropmount(struct mount *mp)
4091 * mp is being mounted, scrap entries matching mp->mnt_ncmounton (positive
4094 * A full scan is not required, but for now just do it anyway.
4097 cache_ismounting(struct mount *mp)
4099 struct ncmount_cache *ncc;
4100 struct mount *ncc_mp;
4103 if (pcpu_ncache == NULL)
4106 for (i = 0; i < NCMOUNT_NUMCACHE; ++i) {
4107 ncc = &ncmount_cache[i];
4108 if (ncc->mp != mp->mnt_ncmounton.mount ||
4109 ncc->ncp != mp->mnt_ncmounton.ncp) {
4112 spin_lock(&ncc->spin);
4113 atomic_add_int_nonlocked(&ncc->updating, 1);
4115 KKASSERT(ncc->updating & 1);
4116 if (ncc->mp != mp->mnt_ncmounton.mount ||
4117 ncc->ncp != mp->mnt_ncmounton.ncp) {
4120 spin_unlock(&ncc->spin);
4127 atomic_add_int(&ncc_mp->mnt_refs, -1);
4128 ncc_mp = ncc->mp_target;
4129 ncc->mp_target = NULL;
4131 atomic_add_int(&ncc_mp->mnt_refs, -1);
4132 ncc->ticks = (int)ticks - hz * 120;
4135 atomic_add_int_nonlocked(&ncc->updating, 1);
4136 spin_unlock(&ncc->spin);
4140 * Pre-cache the mount point
4142 ncc = ncmount_cache_lookup(mp->mnt_ncmounton.mount,
4143 mp->mnt_ncmounton.ncp);
4145 spin_lock(&ncc->spin);
4146 atomic_add_int_nonlocked(&ncc->updating, 1);
4148 KKASSERT(ncc->updating & 1);
4151 atomic_add_int(&ncc->mp->mnt_refs, -1);
4152 atomic_add_int(&mp->mnt_ncmounton.mount->mnt_refs, 1);
4153 ncc->mp = mp->mnt_ncmounton.mount;
4154 ncc->ncp = mp->mnt_ncmounton.ncp; /* ptr compares only */
4155 ncc->ticks = (int)ticks;
4158 if (ncc->mp_target != mp) {
4160 atomic_add_int(&ncc->mp_target->mnt_refs, -1);
4161 ncc->mp_target = mp;
4162 atomic_add_int(&mp->mnt_refs, 1);
4165 atomic_add_int_nonlocked(&ncc->updating, 1);
4166 spin_unlock(&ncc->spin);
4170 * Scrap any ncmount_cache entries related to mp. Not only do we need to
4171 * scrap entries matching mp->mnt_ncmounton, but we also need to scrap any
4172 * negative hits involving (mp, <any>).
4174 * A full scan is required.
4177 cache_unmounting(struct mount *mp)
4179 struct ncmount_cache *ncc;
4180 struct pcpu_ncache *pcpu;
4181 struct mount *ncc_mp;
4188 for (i = 0; i < ncpus; ++i)
4189 spin_lock(&pcpu[i].umount_spin);
4191 for (i = 0; i < NCMOUNT_NUMCACHE; ++i) {
4192 ncc = &ncmount_cache[i];
4193 if (ncc->mp != mp && ncc->mp_target != mp)
4195 spin_lock(&ncc->spin);
4196 atomic_add_int_nonlocked(&ncc->updating, 1);
4199 if (ncc->mp != mp && ncc->mp_target != mp) {
4200 atomic_add_int_nonlocked(&ncc->updating, 1);
4202 spin_unlock(&ncc->spin);
4209 atomic_add_int(&ncc_mp->mnt_refs, -1);
4210 ncc_mp = ncc->mp_target;
4211 ncc->mp_target = NULL;
4213 atomic_add_int(&ncc_mp->mnt_refs, -1);
4214 ncc->ticks = (int)ticks - hz * 120;
4217 atomic_add_int_nonlocked(&ncc->updating, 1);
4218 spin_unlock(&ncc->spin);
4221 for (i = 0; i < ncpus; ++i)
4222 spin_unlock(&pcpu[i].umount_spin);
4226 * Resolve an unresolved namecache entry, generally by looking it up.
4227 * The passed ncp must be locked and refd.
4229 * Theoretically since a vnode cannot be recycled while held, and since
4230 * the nc_parent chain holds its vnode as long as children exist, the
4231 * direct parent of the cache entry we are trying to resolve should
4232 * have a valid vnode. If not then generate an error that we can
4233 * determine is related to a resolver bug.
4235 * However, if a vnode was in the middle of a recyclement when the NCP
4236 * got locked, ncp->nc_vp might point to a vnode that is about to become
4237 * invalid. cache_resolve() handles this case by unresolving the entry
4238 * and then re-resolving it.
4240 * Note that successful resolution does not necessarily return an error
4241 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
4245 cache_resolve(struct nchandle *nch, struct ucred *cred)
4247 struct namecache *par_tmp;
4248 struct namecache *par;
4249 struct namecache *ncp;
4250 struct nchandle nctmp;
4257 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE);
4261 * If the ncp is already resolved we have nothing to do. However,
4262 * we do want to guarentee that a usable vnode is returned when
4263 * a vnode is present, so make sure it hasn't been reclaimed.
4265 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
4266 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) {
4267 _cache_ncp_gen_enter(ncp);
4268 _cache_setunresolved(ncp, 0);
4269 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
4270 _cache_ncp_gen_exit(ncp);
4271 return (ncp->nc_error);
4273 } else if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
4274 return (ncp->nc_error);
4276 _cache_ncp_gen_enter(ncp);
4279 _cache_ncp_gen_enter(ncp);
4281 /* in gen_enter state */
4284 * If the ncp was destroyed it will never resolve again. This
4285 * can basically only happen when someone is chdir'd into an
4286 * empty directory which is then rmdir'd. We want to catch this
4287 * here and not dive the VFS because the VFS might actually
4288 * have a way to re-resolve the disconnected ncp, which will
4289 * result in inconsistencies in the cdir/nch for proc->p_fd.
4291 if (ncp->nc_flag & NCF_DESTROYED) {
4292 _cache_ncp_gen_exit(ncp);
4297 * Mount points need special handling because the parent does not
4298 * belong to the same filesystem as the ncp.
4300 if (ncp == mp->mnt_ncmountpt.ncp) {
4301 error = cache_resolve_mp(mp, 0);
4302 _cache_ncp_gen_exit(ncp);
4307 * We expect an unbroken chain of ncps to at least the mount point,
4308 * and even all the way to root (but this code doesn't have to go
4309 * past the mount point).
4311 if (ncp->nc_parent == NULL) {
4312 kprintf("EXDEV case 1 %p %*.*s\n", ncp,
4313 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
4314 ncp->nc_error = EXDEV;
4315 _cache_ncp_gen_exit(ncp);
4316 return(ncp->nc_error);
4320 * The vp's of the parent directories in the chain are held via vhold()
4321 * due to the existance of the child, and should not disappear.
4322 * However, there are cases where they can disappear:
4324 * - due to filesystem I/O errors.
4325 * - due to NFS being stupid about tracking the namespace and
4326 * destroys the namespace for entire directories quite often.
4327 * - due to forced unmounts.
4328 * - due to an rmdir (parent will be marked DESTROYED)
4330 * When this occurs we have to track the chain backwards and resolve
4331 * it, looping until the resolver catches up to the current node. We
4332 * could recurse here but we might run ourselves out of kernel stack
4333 * so we do it in a more painful manner. This situation really should
4334 * not occur all that often, or if it does not have to go back too
4335 * many nodes to resolve the ncp.
4337 while ((dvp = cache_dvpref(ncp)) == NULL) {
4339 * This case can occur if a process is CD'd into a
4340 * directory which is then rmdir'd. If the parent is marked
4341 * destroyed there is no point trying to resolve it.
4343 if (ncp->nc_parent->nc_flag & NCF_DESTROYED) {
4344 if (ncvp_debug & 8) {
4345 kprintf("nc_parent destroyed: %s/%s\n",
4346 ncp->nc_parent->nc_name, ncp->nc_name);
4348 _cache_ncp_gen_exit(ncp);
4351 par = ncp->nc_parent;
4354 while ((par_tmp = par->nc_parent) != NULL &&
4355 par_tmp->nc_vp == NULL) {
4356 _cache_hold(par_tmp);
4357 _cache_lock(par_tmp);
4361 if (par->nc_parent == NULL) {
4362 kprintf("EXDEV case 2 %*.*s\n",
4363 par->nc_nlen, par->nc_nlen, par->nc_name);
4365 _cache_ncp_gen_exit(ncp);
4369 * The parent is not set in stone, ref and lock it to prevent
4370 * it from disappearing. Also note that due to renames it
4371 * is possible for our ncp to move and for par to no longer
4372 * be one of its parents. We resolve it anyway, the loop
4373 * will handle any moves.
4375 _cache_get(par); /* additional hold/lock */
4376 _cache_put(par); /* from earlier hold/lock */
4377 if (par == nch->mount->mnt_ncmountpt.ncp) {
4378 cache_resolve_mp(nch->mount, 0);
4379 } else if ((dvp = cache_dvpref(par)) == NULL) {
4380 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n",
4381 par->nc_nlen, par->nc_nlen, par->nc_name);
4385 if (par->nc_flag & NCF_UNRESOLVED) {
4388 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
4392 if ((error = par->nc_error) != 0) {
4393 if (par->nc_error != EAGAIN) {
4394 kprintf("EXDEV case 3 %*.*s error %d\n",
4395 par->nc_nlen, par->nc_nlen, par->nc_name,
4398 _cache_ncp_gen_exit(ncp);
4401 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
4402 par, par->nc_nlen, par->nc_nlen, par->nc_name);
4409 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
4410 * ncp's and reattach them. If this occurs the original ncp is marked
4411 * EAGAIN to force a relookup.
4413 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
4414 * ncp must already be resolved.
4419 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
4422 ncp->nc_error = EPERM;
4425 if (ncp->nc_error == EAGAIN) {
4426 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
4427 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
4430 _cache_ncp_gen_exit(ncp);
4432 return(ncp->nc_error);
4436 * Resolve the ncp associated with a mount point. Such ncp's almost always
4437 * remain resolved and this routine is rarely called. NFS MPs tends to force
4438 * re-resolution more often due to its mac-truck-smash-the-namecache
4439 * method of tracking namespace changes.
4441 * The semantics for this call is that the passed ncp must be locked on
4442 * entry and will be locked on return. However, if we actually have to
4443 * resolve the mount point we temporarily unlock the entry in order to
4444 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
4445 * the unlock we have to recheck the flags after we relock.
4448 cache_resolve_mp(struct mount *mp, int adjgen)
4450 struct namecache *ncp = mp->mnt_ncmountpt.ncp;
4454 KKASSERT(mp != NULL);
4457 * If the ncp is already resolved we have nothing to do. However,
4458 * we do want to guarentee that a usable vnode is returned when
4459 * a vnode is present, so make sure it hasn't been reclaimed.
4461 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
4462 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
4463 _cache_setunresolved(ncp, adjgen);
4466 if (ncp->nc_flag & NCF_UNRESOLVED) {
4468 * ncp must be unlocked across the vfs_busy(), but
4469 * once busied lock ordering is ncp(s), then vnodes,
4470 * so we must relock the ncp before issuing the VFS_ROOT().
4473 while (vfs_busy(mp, 0))
4476 error = VFS_ROOT(mp, &vp);
4479 * recheck the ncp state after relocking.
4481 if (ncp->nc_flag & NCF_UNRESOLVED) {
4482 ncp->nc_error = error;
4484 _cache_setvp(mp, ncp, vp, adjgen);
4487 kprintf("[diagnostic] cache_resolve_mp: failed"
4488 " to resolve mount %p err=%d ncp=%p\n",
4490 _cache_setvp(mp, ncp, NULL, adjgen);
4492 } else if (error == 0) {
4497 return(ncp->nc_error);
4501 * Resolve the parent vnode
4504 cache_resolve_dvp(struct nchandle *nch, struct ucred *cred, struct vnode **dvpp)
4506 struct namecache *par_tmp;
4507 struct namecache *par;
4508 struct namecache *ncp;
4509 struct nchandle nctmp;
4517 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE);
4520 * Treat this as a mount point even if it has a parent (e.g.
4521 * null-mount). Return a NULL dvp and no error.
4523 if (ncp == mp->mnt_ncmountpt.ncp)
4527 * If the ncp was destroyed there is no parent directory, return
4530 if (ncp->nc_flag & NCF_DESTROYED)
4534 * No parent if at the root of a filesystem, no error. Typically
4535 * not applicable to null-mounts. This case should have been caught
4536 * in the above ncmountpt check.
4538 if (ncp->nc_parent == NULL)
4542 * Resolve the parent dvp.
4544 * The vp's of the parent directories in the chain are held via vhold()
4545 * due to the existance of the child, and should not disappear.
4546 * However, there are cases where they can disappear:
4548 * - due to filesystem I/O errors.
4549 * - due to NFS being stupid about tracking the namespace and
4550 * destroys the namespace for entire directories quite often.
4551 * - due to forced unmounts.
4552 * - due to an rmdir (parent will be marked DESTROYED)
4554 * When this occurs we have to track the chain backwards and resolve
4555 * it, looping until the resolver catches up to the current node. We
4556 * could recurse here but we might run ourselves out of kernel stack
4557 * so we do it in a more painful manner. This situation really should
4558 * not occur all that often, or if it does not have to go back too
4559 * many nodes to resolve the ncp.
4561 while ((dvp = cache_dvpref(ncp)) == NULL) {
4563 * This case can occur if a process is CD'd into a
4564 * directory which is then rmdir'd. If the parent is marked
4565 * destroyed there is no point trying to resolve it.
4567 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
4569 par = ncp->nc_parent;
4572 while ((par_tmp = par->nc_parent) != NULL &&
4573 par_tmp->nc_vp == NULL) {
4574 _cache_hold(par_tmp);
4575 _cache_lock(par_tmp);
4579 if (par->nc_parent == NULL) {
4580 kprintf("EXDEV case 2 %*.*s\n",
4581 par->nc_nlen, par->nc_nlen, par->nc_name);
4587 * The parent is not set in stone, ref and lock it to prevent
4588 * it from disappearing. Also note that due to renames it
4589 * is possible for our ncp to move and for par to no longer
4590 * be one of its parents. We resolve it anyway, the loop
4591 * will handle any moves.
4593 _cache_get(par); /* additional hold/lock */
4594 _cache_put(par); /* from earlier hold/lock */
4595 if (par == nch->mount->mnt_ncmountpt.ncp) {
4596 cache_resolve_mp(nch->mount, 1);
4597 } else if ((dvp = cache_dvpref(par)) == NULL) {
4598 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n",
4599 par->nc_nlen, par->nc_nlen, par->nc_name);
4603 if (par->nc_flag & NCF_UNRESOLVED) {
4606 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
4610 if ((error = par->nc_error) != 0) {
4611 if (par->nc_error != EAGAIN) {
4612 kprintf("EXDEV case 3 %*.*s error %d\n",
4613 par->nc_nlen, par->nc_nlen, par->nc_name,
4618 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
4619 par, par->nc_nlen, par->nc_nlen, par->nc_name);
4626 * We have a referenced dvp
4633 * Clean out negative cache entries when too many have accumulated.
4636 _cache_cleanneg(long count)
4638 struct pcpu_ncache *pn;
4639 struct namecache *ncp;
4640 static uint32_t neg_rover;
4644 n = neg_rover++; /* SMP heuristical, race ok */
4646 n = n % (uint32_t)ncpus;
4649 * Normalize vfscache_negs and count. count is sometimes based
4650 * on vfscache_negs. vfscache_negs is heuristical and can sometimes
4651 * have crazy values.
4653 vnegs = vfscache_negs;
4655 if (vnegs <= MINNEG)
4660 pn = &pcpu_ncache[n];
4661 spin_lock(&pn->neg_spin);
4662 count = pn->neg_count * count / vnegs + 1;
4663 spin_unlock(&pn->neg_spin);
4666 * Attempt to clean out the specified number of negative cache
4670 spin_lock(&pn->neg_spin);
4671 ncp = TAILQ_FIRST(&pn->neg_list);
4673 spin_unlock(&pn->neg_spin);
4676 TAILQ_REMOVE(&pn->neg_list, ncp, nc_vnode);
4677 TAILQ_INSERT_TAIL(&pn->neg_list, ncp, nc_vnode);
4679 spin_unlock(&pn->neg_spin);
4682 * This can race, so we must re-check that the ncp
4683 * is on the ncneg.list after successfully locking it.
4685 * Don't scrap actively referenced ncps. There should be
4686 * 3 refs. The natural ref, one from being on the neg list,
4689 * Recheck fields after successfully locking to ensure
4690 * that it is in-fact still on the negative list with no
4693 * WARNING! On the ncneglist scan any race against other
4694 * destructors (zaps or cache_inval_vp_quick() calls)
4695 * will have already unresolved the ncp and cause
4696 * us to drop instead of zap. This fine, if
4697 * our drop winds up being the last one it will
4700 if (_cache_lock_special(ncp) == 0) {
4701 if (ncp->nc_vp == NULL &&
4702 ncp->nc_refs == 3 &&
4703 (ncp->nc_flag & NCF_UNRESOLVED) == 0)
4705 ++pcpu_ncache[mycpu->gd_cpuid].clean_neg_count;
4719 * Clean out unresolved cache entries when too many have accumulated.
4720 * Resolved cache entries are cleaned out via the vnode reclamation
4721 * mechanism and by _cache_cleanneg().
4724 _cache_cleanpos(long ucount, long xcount)
4726 static volatile int rover;
4727 struct nchash_head *nchpp;
4728 struct namecache *ncp;
4733 * Don't burn too much cpu looking for stuff
4735 count = (ucount > xcount) ? ucount : xcount;
4739 * Attempt to clean out the specified number of cache entries.
4741 while (count > 0 && (ucount > 0 || xcount > 0)) {
4742 rover_copy = ++rover; /* MPSAFEENOUGH */
4744 nchpp = NCHHASH(rover_copy);
4746 if (TAILQ_FIRST(&nchpp->list) == NULL) {
4754 spin_lock(&nchpp->spin);
4755 ncp = TAILQ_FIRST(&nchpp->list);
4758 * Skip placeholder ncp's. Do not shift their
4759 * position in the list.
4761 while (ncp && (ncp->nc_flag & NCF_DUMMY))
4762 ncp = TAILQ_NEXT(ncp, nc_hash);
4766 * Move to end of list
4768 TAILQ_REMOVE(&nchpp->list, ncp, nc_hash);
4769 TAILQ_INSERT_TAIL(&nchpp->list, ncp, nc_hash);
4771 if (ncp->nc_refs != ncpbaserefs(ncp)) {
4773 * Do not destroy internal nodes that have
4774 * children or nodes which have thread
4778 } else if (ucount > 0 &&
4779 (ncp->nc_flag & NCF_UNRESOLVED))
4782 * Destroy unresolved nodes if asked.
4787 } else if (xcount > 0) {
4789 * Destroy any other node if asked.
4800 spin_unlock(&nchpp->spin);
4803 * Try to scap the ncp if we can do so non-blocking.
4804 * We must re-check nc_refs after locking, and it will
4805 * have one additional ref from above.
4808 if (_cache_lock_special(ncp) == 0) {
4809 if (ncp->nc_refs == 1 + ncpbaserefs(ncp)) {
4810 ++pcpu_ncache[mycpu->gd_cpuid].
4826 * This is a kitchen sink function to clean out ncps which we
4827 * tried to zap from cache_drop() but failed because we were
4828 * unable to acquire the parent lock.
4830 * Such entries can also be removed via cache_inval_vp(), such
4831 * as when unmounting.
4834 _cache_cleandefered(void)
4836 struct nchash_head *nchpp;
4837 struct namecache *ncp;
4838 struct namecache dummy;
4842 * Create a list iterator. DUMMY indicates that this is a list
4843 * iterator, DESTROYED prevents matches by lookup functions.
4846 pcpu_ncache[mycpu->gd_cpuid].numdefered = 0;
4847 bzero(&dummy, sizeof(dummy));
4848 dummy.nc_flag = NCF_DESTROYED | NCF_DUMMY;
4851 for (i = 0; i <= nchash; ++i) {
4852 nchpp = &nchashtbl[i];
4854 spin_lock(&nchpp->spin);
4855 TAILQ_INSERT_HEAD(&nchpp->list, &dummy, nc_hash);
4857 while ((ncp = TAILQ_NEXT(ncp, nc_hash)) != NULL) {
4858 if ((ncp->nc_flag & NCF_DEFEREDZAP) == 0)
4860 TAILQ_REMOVE(&nchpp->list, &dummy, nc_hash);
4861 TAILQ_INSERT_AFTER(&nchpp->list, ncp, &dummy, nc_hash);
4863 spin_unlock(&nchpp->spin);
4864 if (_cache_lock_nonblock(ncp) == 0) {
4865 ncp->nc_flag &= ~NCF_DEFEREDZAP;
4869 spin_lock(&nchpp->spin);
4872 TAILQ_REMOVE(&nchpp->list, &dummy, nc_hash);
4873 spin_unlock(&nchpp->spin);
4878 * Name cache initialization, from vfsinit() when we are booting
4883 struct pcpu_ncache *pn;
4888 * Per-cpu accounting and negative hit list
4890 pcpu_ncache = kmalloc(sizeof(*pcpu_ncache) * ncpus,
4891 M_VFSCACHEAUX, M_WAITOK|M_ZERO);
4892 for (i = 0; i < ncpus; ++i) {
4893 pn = &pcpu_ncache[i];
4894 TAILQ_INIT(&pn->neg_list);
4895 spin_init(&pn->neg_spin, "ncneg");
4896 spin_init(&pn->umount_spin, "ncumm");
4900 * Initialise per-cpu namecache effectiveness statistics.
4902 for (i = 0; i < ncpus; ++i) {
4903 gd = globaldata_find(i);
4904 gd->gd_nchstats = &nchstats[i];
4908 * Create a generous namecache hash table
4910 nchashtbl = hashinit_ext(vfs_inodehashsize(),
4911 sizeof(struct nchash_head),
4912 M_VFSCACHEAUX, &nchash);
4913 for (i = 0; i <= (int)nchash; ++i) {
4914 TAILQ_INIT(&nchashtbl[i].list);
4915 spin_init(&nchashtbl[i].spin, "nchinit_hash");
4917 for (i = 0; i < NCMOUNT_NUMCACHE; ++i)
4918 spin_init(&ncmount_cache[i].spin, "nchinit_cache");
4919 nclockwarn = 5 * hz;
4923 * Called from start_init() to bootstrap the root filesystem. Returns
4924 * a referenced, unlocked namecache record to serve as a root or the
4925 * root of the system.
4927 * Adjust our namecache counts
4930 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp)
4932 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid];
4934 atomic_add_long(&pn->vfscache_leafs, 1);
4935 atomic_add_long(&pn->vfscache_unres, 1);
4937 nch->ncp = cache_alloc(0);
4941 _cache_setvp(nch->mount, nch->ncp, vp, 1);
4945 * vfs_cache_setroot()
4947 * Create an association between the root of our namecache and
4948 * the root vnode. This routine may be called several times during
4951 * If the caller intends to save the returned namecache pointer somewhere
4952 * it must cache_hold() it.
4955 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch)
4958 struct nchandle onch;
4966 cache_zero(&rootnch);
4974 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
4975 * topology and is being removed as quickly as possible. The new VOP_N*()
4976 * API calls are required to make specific adjustments using the supplied
4977 * ncp pointers rather then just bogusly purging random vnodes.
4979 * Invalidate all namecache entries to a particular vnode as well as
4980 * any direct children of that vnode in the namecache. This is a
4981 * 'catch all' purge used by filesystems that do not know any better.
4983 * Note that the linkage between the vnode and its namecache entries will
4984 * be removed, but the namecache entries themselves might stay put due to
4985 * active references from elsewhere in the system or due to the existance of
4986 * the children. The namecache topology is left intact even if we do not
4987 * know what the vnode association is. Such entries will be marked
4991 cache_purge(struct vnode *vp)
4993 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
4996 __read_mostly static int disablecwd;
4997 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
5004 sys___getcwd(struct sysmsg *sysmsg, const struct __getcwd_args *uap)
5014 buflen = uap->buflen;
5017 if (buflen > MAXPATHLEN)
5018 buflen = MAXPATHLEN;
5020 buf = kmalloc(buflen, M_TEMP, M_WAITOK);
5021 bp = kern_getcwd(buf, buflen, &error);
5023 error = copyout(bp, uap->buf, strlen(bp) + 1);
5029 kern_getcwd(char *buf, size_t buflen, int *error)
5031 struct proc *p = curproc;
5033 int i, slash_prefixed;
5034 struct filedesc *fdp;
5035 struct nchandle nch;
5036 struct namecache *ncp;
5044 nch = fdp->fd_ncdir;
5049 while (ncp && (ncp != fdp->fd_nrdir.ncp ||
5050 nch.mount != fdp->fd_nrdir.mount)
5052 if (ncp->nc_flag & NCF_DESTROYED) {
5058 * While traversing upwards if we encounter the root
5059 * of the current mount we have to skip to the mount point
5060 * in the underlying filesystem.
5062 if (ncp == nch.mount->mnt_ncmountpt.ncp) {
5063 nch = nch.mount->mnt_ncmounton;
5072 * Prepend the path segment
5074 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
5080 *--bp = ncp->nc_name[i];
5091 * Go up a directory. This isn't a mount point so we don't
5092 * have to check again.
5094 while ((nch.ncp = ncp->nc_parent) != NULL) {
5095 if (ncp_shared_lock_disable)
5098 _cache_lock_shared(ncp);
5099 if (nch.ncp != ncp->nc_parent) {
5103 _cache_hold(nch.ncp);
5115 if (!slash_prefixed) {
5131 * Thus begins the fullpath magic.
5133 * The passed nchp is referenced but not locked.
5135 __read_mostly static int disablefullpath;
5136 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
5137 &disablefullpath, 0,
5138 "Disable fullpath lookups");
5141 cache_fullpath(struct proc *p, struct nchandle *nchp, struct nchandle *nchbase,
5142 char **retbuf, char **freebuf, int guess)
5144 struct nchandle fd_nrdir;
5145 struct nchandle nch;
5146 struct namecache *ncp;
5147 struct mount *mp, *new_mp;
5156 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK);
5157 bp = buf + MAXPATHLEN - 1;
5160 fd_nrdir = *nchbase;
5162 fd_nrdir = p->p_fd->fd_nrdir;
5172 while (ncp && (ncp != fd_nrdir.ncp || mp != fd_nrdir.mount)) {
5176 * If we are asked to guess the upwards path, we do so whenever
5177 * we encounter an ncp marked as a mountpoint. We try to find
5178 * the actual mountpoint by finding the mountpoint with this
5181 if (guess && (ncp->nc_flag & NCF_ISMOUNTPT)) {
5182 new_mp = mount_get_by_nc(ncp);
5185 * While traversing upwards if we encounter the root
5186 * of the current mount we have to skip to the mount point.
5188 if (ncp == mp->mnt_ncmountpt.ncp) {
5192 nch = new_mp->mnt_ncmounton;
5202 * Prepend the path segment
5204 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
5210 *--bp = ncp->nc_name[i];
5221 * Go up a directory. This isn't a mount point so we don't
5222 * have to check again.
5224 * We can only safely access nc_parent with ncp held locked.
5226 while ((nch.ncp = ncp->nc_parent) != NULL) {
5227 _cache_lock_shared(ncp);
5228 if (nch.ncp != ncp->nc_parent) {
5232 _cache_hold(nch.ncp);
5245 if (!slash_prefixed) {
5263 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf,
5264 char **freebuf, int guess)
5266 struct namecache *ncp;
5267 struct nchandle nch;
5271 if (disablefullpath)
5277 /* vn is NULL, client wants us to use p->p_textvp */
5279 if ((vn = p->p_textvp) == NULL)
5282 spin_lock_shared(&vn->v_spin);
5283 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
5288 spin_unlock_shared(&vn->v_spin);
5292 spin_unlock_shared(&vn->v_spin);
5295 nch.mount = vn->v_mount;
5296 error = cache_fullpath(p, &nch, NULL, retbuf, freebuf, guess);
5302 vfscache_rollup_cpu(struct globaldata *gd)
5304 struct pcpu_ncache *pn;
5307 if (pcpu_ncache == NULL)
5309 pn = &pcpu_ncache[gd->gd_cpuid];
5312 * namecache statistics
5314 if (pn->vfscache_count) {
5315 count = atomic_swap_long(&pn->vfscache_count, 0);
5316 atomic_add_long(&vfscache_count, count);
5318 if (pn->vfscache_leafs) {
5319 count = atomic_swap_long(&pn->vfscache_leafs, 0);
5320 atomic_add_long(&vfscache_leafs, count);
5322 if (pn->vfscache_unres) {
5323 count = atomic_swap_long(&pn->vfscache_unres, 0);
5324 atomic_add_long(&vfscache_unres, count);
5326 if (pn->vfscache_negs) {
5327 count = atomic_swap_long(&pn->vfscache_negs, 0);
5328 atomic_add_long(&vfscache_negs, count);
5332 * hysteresis based cleanings
5334 if (pn->inv_kid_quick_count) {
5335 count = atomic_swap_long(&pn->inv_kid_quick_count, 0);
5336 atomic_add_long(&inv_kid_quick_count, count);
5338 if (pn->inv_ncp_quick_count) {
5339 count = atomic_swap_long(&pn->inv_ncp_quick_count, 0);
5340 atomic_add_long(&inv_ncp_quick_count, count);
5342 if (pn->clean_pos_count) {
5343 count = atomic_swap_long(&pn->clean_pos_count, 0);
5344 atomic_add_long(&clean_pos_count, count);
5346 if (pn->clean_neg_count) {
5347 count = atomic_swap_long(&pn->clean_neg_count, 0);
5348 atomic_add_long(&clean_neg_count, count);
5351 if (pn->numdefered) {
5352 count = atomic_swap_long(&pn->numdefered, 0);
5353 atomic_add_long(&numdefered, count);