kernel - Provide descriptions for lwkt.* and debug.* sysctl's
[dragonfly.git] / sys / kern / vfs_cache.c
CommitLineData
984263bc 1/*
2247fe02 2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
8c10bfcf
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
984263bc
MD
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
36 *
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
984263bc
MD
67 */
68
69#include <sys/param.h>
70#include <sys/systm.h>
71#include <sys/kernel.h>
72#include <sys/sysctl.h>
73#include <sys/mount.h>
74#include <sys/vnode.h>
984263bc
MD
75#include <sys/malloc.h>
76#include <sys/sysproto.h>
f63911bf 77#include <sys/spinlock.h>
984263bc 78#include <sys/proc.h>
dadab5e9 79#include <sys/namei.h>
690a3127 80#include <sys/nlookup.h>
984263bc
MD
81#include <sys/filedesc.h>
82#include <sys/fnv_hash.h>
24e51f36 83#include <sys/globaldata.h>
63f58b90 84#include <sys/kern_syscall.h>
fad57d0e 85#include <sys/dirent.h>
8c361dda 86#include <ddb/ddb.h>
984263bc 87
3c37c940 88#include <sys/sysref2.h>
f63911bf 89#include <sys/spinlock2.h>
684a93c4 90#include <sys/mplock2.h>
3c37c940 91
bf40a153
MD
92#define MAX_RECURSION_DEPTH 64
93
984263bc 94/*
7ea21ed1 95 * Random lookups in the cache are accomplished with a hash table using
2247fe02
MD
96 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock.
97 *
98 * Negative entries may exist and correspond to resolved namecache
99 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT
100 * will be set if the entry corresponds to a whited-out directory entry
101 * (verses simply not finding the entry at all). ncneglist is locked
102 * with a global spinlock (ncspin).
103 *
104 * MPSAFE RULES:
105 *
106 * (1) A ncp must be referenced before it can be locked.
107 *
108 * (2) A ncp must be locked in order to modify it.
109 *
110 * (3) ncp locks are always ordered child -> parent. That may seem
111 * backwards but forward scans use the hash table and thus can hold
112 * the parent unlocked when traversing downward.
984263bc 113 *
2247fe02
MD
114 * This allows insert/rename/delete/dot-dot and other operations
115 * to use ncp->nc_parent links.
984263bc 116 *
2247fe02
MD
117 * This also prevents a locked up e.g. NFS node from creating a
118 * chain reaction all the way back to the root vnode / namecache.
119 *
120 * (4) parent linkages require both the parent and child to be locked.
984263bc
MD
121 */
122
123/*
124 * Structures associated with name cacheing.
125 */
8987aad7 126#define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
f517a1bb 127#define MINNEG 1024
9e10d70b 128#define MINPOS 1024
8987aad7 129
24e51f36
HP
130MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
131
f63911bf
MD
132LIST_HEAD(nchash_list, namecache);
133
134struct nchash_head {
65870584
MD
135 struct nchash_list list;
136 struct spinlock spin;
f63911bf
MD
137};
138
139static struct nchash_head *nchashtbl;
140static struct namecache_list ncneglist;
141static struct spinlock ncspin;
8987aad7 142
fad57d0e
MD
143/*
144 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
145 * to create the namecache infrastructure leading to a dangling vnode.
146 *
147 * 0 Only errors are reported
148 * 1 Successes are reported
149 * 2 Successes + the whole directory scan is reported
150 * 3 Force the directory scan code run as if the parent vnode did not
151 * have a namecache record, even if it does have one.
152 */
153static int ncvp_debug;
0c52fa62
SG
154SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0,
155 "Namecache debug level (0-3)");
fad57d0e 156
984263bc 157static u_long nchash; /* size of hash table */
0c52fa62
SG
158SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
159 "Size of namecache hash table");
8987aad7 160
f63911bf 161static int ncnegfactor = 16; /* ratio of negative entries */
0c52fa62
SG
162SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
163 "Ratio of namecache negative entries");
8987aad7 164
fc21741a 165static int nclockwarn; /* warn on locked entries in ticks */
0c52fa62
SG
166SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0,
167 "Warn on locked namecache entries in ticks");
fc21741a 168
65870584 169static int numdefered; /* number of cache entries allocated */
0c52fa62
SG
170SYSCTL_INT(_debug, OID_AUTO, numdefered, CTLFLAG_RD, &numdefered, 0,
171 "Number of cache entries allocated");
65870584 172
9e10d70b 173static int ncposlimit; /* number of cache entries allocated */
0c52fa62
SG
174SYSCTL_INT(_debug, OID_AUTO, ncposlimit, CTLFLAG_RW, &ncposlimit, 0,
175 "Number of cache entries allocated");
9e10d70b 176
0c52fa62
SG
177SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode),
178 "sizeof(struct vnode)");
179SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache),
180 "sizeof(struct namecache)");
984263bc 181
2f3ef9b4 182int cache_mpsafe = 1;
2247fe02
MD
183SYSCTL_INT(_vfs, OID_AUTO, cache_mpsafe, CTLFLAG_RW, &cache_mpsafe, 0, "");
184
28623bf9 185static int cache_resolve_mp(struct mount *mp);
5312fa43 186static struct vnode *cache_dvpref(struct namecache *ncp);
28623bf9
MD
187static void _cache_lock(struct namecache *ncp);
188static void _cache_setunresolved(struct namecache *ncp);
65870584 189static void _cache_cleanneg(int count);
9e10d70b 190static void _cache_cleanpos(int count);
65870584 191static void _cache_cleandefered(void);
646a1cda 192
984263bc
MD
193/*
194 * The new name cache statistics
195 */
196SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
197#define STATNODE(mode, name, var) \
198 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
7a0f8be9
MD
199#define STATNODE_INT(mode, name, var) \
200 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
201static int numneg; STATNODE_INT(CTLFLAG_RD, numneg, &numneg);
202static int numcache; STATNODE_INT(CTLFLAG_RD, numcache, &numcache);
984263bc
MD
203static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
204static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
205static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
206static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
207static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
208static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
209static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
210static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
211static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
212static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
213
24e51f36
HP
214struct nchstats nchstats[SMP_MAXCPU];
215/*
216 * Export VFS cache effectiveness statistics to user-land.
217 *
218 * The statistics are left for aggregation to user-land so
219 * neat things can be achieved, like observing per-CPU cache
220 * distribution.
221 */
222static int
3736bb9b 223sysctl_nchstats(SYSCTL_HANDLER_ARGS)
24e51f36
HP
224{
225 struct globaldata *gd;
226 int i, error;
227
228 error = 0;
229 for (i = 0; i < ncpus; ++i) {
230 gd = globaldata_find(i);
231 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
232 sizeof(struct nchstats))))
233 break;
234 }
984263bc 235
24e51f36
HP
236 return (error);
237}
238SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
3736bb9b 239 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
984263bc 240
65870584 241static struct namecache *cache_zap(struct namecache *ncp, int nonblock);
f63911bf
MD
242
243/*
244 * Namespace locking. The caller must already hold a reference to the
245 * namecache structure in order to lock/unlock it. This function prevents
246 * the namespace from being created or destroyed by accessors other then
247 * the lock holder.
248 *
249 * Note that holding a locked namecache structure prevents other threads
250 * from making namespace changes (e.g. deleting or creating), prevents
251 * vnode association state changes by other threads, and prevents the
252 * namecache entry from being resolved or unresolved by other threads.
253 *
254 * The lock owner has full authority to associate/disassociate vnodes
255 * and resolve/unresolve the locked ncp.
256 *
2247fe02
MD
257 * The primary lock field is nc_exlocks. nc_locktd is set after the
258 * fact (when locking) or cleared prior to unlocking.
259 *
f63911bf
MD
260 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
261 * or recycled, but it does NOT help you if the vnode had already
262 * initiated a recyclement. If this is important, use cache_get()
263 * rather then cache_lock() (and deal with the differences in the
264 * way the refs counter is handled). Or, alternatively, make an
265 * unconditional call to cache_validate() or cache_resolve()
266 * after cache_lock() returns.
2247fe02
MD
267 *
268 * MPSAFE
f63911bf
MD
269 */
270static
271void
272_cache_lock(struct namecache *ncp)
273{
274 thread_t td;
f63911bf
MD
275 int didwarn;
276 int error;
2247fe02 277 u_int count;
f63911bf
MD
278
279 KKASSERT(ncp->nc_refs != 0);
280 didwarn = 0;
281 td = curthread;
282
283 for (;;) {
2247fe02 284 count = ncp->nc_exlocks;
f63911bf 285
2247fe02
MD
286 if (count == 0) {
287 if (atomic_cmpset_int(&ncp->nc_exlocks, 0, 1)) {
f63911bf
MD
288 /*
289 * The vp associated with a locked ncp must
290 * be held to prevent it from being recycled.
291 *
292 * WARNING! If VRECLAIMED is set the vnode
293 * could already be in the middle of a recycle.
294 * Callers must use cache_vref() or
295 * cache_vget() on the locked ncp to
296 * validate the vp or set the cache entry
297 * to unresolved.
2247fe02
MD
298 *
299 * NOTE! vhold() is allowed if we hold a
300 * lock on the ncp (which we do).
f63911bf 301 */
2247fe02 302 ncp->nc_locktd = td;
f63911bf
MD
303 if (ncp->nc_vp)
304 vhold(ncp->nc_vp); /* MPSAFE */
305 break;
306 }
2247fe02
MD
307 /* cmpset failed */
308 continue;
309 }
310 if (ncp->nc_locktd == td) {
311 if (atomic_cmpset_int(&ncp->nc_exlocks, count,
312 count + 1)) {
313 break;
314 }
315 /* cmpset failed */
f63911bf
MD
316 continue;
317 }
f63911bf 318 tsleep_interlock(ncp, 0);
2247fe02
MD
319 if (atomic_cmpset_int(&ncp->nc_exlocks, count,
320 count | NC_EXLOCK_REQ) == 0) {
321 /* cmpset failed */
f63911bf 322 continue;
2247fe02 323 }
f63911bf
MD
324 error = tsleep(ncp, PINTERLOCKED, "clock", nclockwarn);
325 if (error == EWOULDBLOCK) {
2247fe02
MD
326 if (didwarn == 0) {
327 didwarn = ticks;
328 kprintf("[diagnostic] cache_lock: blocked "
329 "on %p",
330 ncp);
331 kprintf(" \"%*.*s\"\n",
332 ncp->nc_nlen, ncp->nc_nlen,
333 ncp->nc_name);
334 }
f63911bf
MD
335 }
336 }
2247fe02
MD
337 if (didwarn) {
338 kprintf("[diagnostic] cache_lock: unblocked %*.*s after "
339 "%d secs\n",
340 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name,
341 (int)(ticks - didwarn) / hz);
f63911bf
MD
342 }
343}
344
2247fe02 345/*
65870584
MD
346 * NOTE: nc_refs may be zero if the ncp is interlocked by circumstance,
347 * such as the case where one of its children is locked.
348 *
2247fe02
MD
349 * MPSAFE
350 */
f63911bf
MD
351static
352int
353_cache_lock_nonblock(struct namecache *ncp)
354{
355 thread_t td;
2247fe02 356 u_int count;
f63911bf 357
f63911bf
MD
358 td = curthread;
359
360 for (;;) {
2247fe02 361 count = ncp->nc_exlocks;
f63911bf 362
2247fe02
MD
363 if (count == 0) {
364 if (atomic_cmpset_int(&ncp->nc_exlocks, 0, 1)) {
f63911bf
MD
365 /*
366 * The vp associated with a locked ncp must
367 * be held to prevent it from being recycled.
368 *
369 * WARNING! If VRECLAIMED is set the vnode
370 * could already be in the middle of a recycle.
371 * Callers must use cache_vref() or
372 * cache_vget() on the locked ncp to
373 * validate the vp or set the cache entry
374 * to unresolved.
2247fe02
MD
375 *
376 * NOTE! vhold() is allowed if we hold a
377 * lock on the ncp (which we do).
f63911bf 378 */
2247fe02 379 ncp->nc_locktd = td;
f63911bf
MD
380 if (ncp->nc_vp)
381 vhold(ncp->nc_vp); /* MPSAFE */
382 break;
383 }
2247fe02
MD
384 /* cmpset failed */
385 continue;
386 }
387 if (ncp->nc_locktd == td) {
388 if (atomic_cmpset_int(&ncp->nc_exlocks, count,
389 count + 1)) {
390 break;
391 }
392 /* cmpset failed */
f63911bf
MD
393 continue;
394 }
395 return(EWOULDBLOCK);
396 }
397 return(0);
398}
399
400/*
401 * Helper function
402 *
403 * NOTE: nc_refs can be 0 (degenerate case during _cache_drop).
2247fe02 404 *
65870584 405 * nc_locktd must be NULLed out prior to nc_exlocks getting cleared.
2247fe02
MD
406 *
407 * MPSAFE
f63911bf
MD
408 */
409static
410void
411_cache_unlock(struct namecache *ncp)
412{
413 thread_t td __debugvar = curthread;
2247fe02 414 u_int count;
f63911bf
MD
415
416 KKASSERT(ncp->nc_refs >= 0);
417 KKASSERT(ncp->nc_exlocks > 0);
418 KKASSERT(ncp->nc_locktd == td);
419
2247fe02
MD
420 count = ncp->nc_exlocks;
421 if ((count & ~NC_EXLOCK_REQ) == 1) {
422 ncp->nc_locktd = NULL;
f63911bf
MD
423 if (ncp->nc_vp)
424 vdrop(ncp->nc_vp);
2247fe02
MD
425 }
426 for (;;) {
427 if ((count & ~NC_EXLOCK_REQ) == 1) {
428 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 0)) {
429 if (count & NC_EXLOCK_REQ)
430 wakeup(ncp);
431 break;
432 }
433 } else {
434 if (atomic_cmpset_int(&ncp->nc_exlocks, count,
435 count - 1)) {
436 break;
437 }
f63911bf 438 }
2247fe02 439 count = ncp->nc_exlocks;
f63911bf
MD
440 }
441}
442
984263bc
MD
443
444/*
7ea21ed1
MD
445 * cache_hold() and cache_drop() prevent the premature deletion of a
446 * namecache entry but do not prevent operations (such as zapping) on
447 * that namecache entry.
5b287bba 448 *
36e90efd
MD
449 * This routine may only be called from outside this source module if
450 * nc_refs is already at least 1.
5b287bba 451 *
36e90efd
MD
452 * This is a rare case where callers are allowed to hold a spinlock,
453 * so we can't ourselves.
61f96b6f
MD
454 *
455 * MPSAFE
984263bc 456 */
7ea21ed1
MD
457static __inline
458struct namecache *
bc0c094e 459_cache_hold(struct namecache *ncp)
7ea21ed1 460{
5b287bba 461 atomic_add_int(&ncp->nc_refs, 1);
7ea21ed1
MD
462 return(ncp);
463}
464
8c361dda 465/*
f63911bf
MD
466 * Drop a cache entry, taking care to deal with races.
467 *
468 * For potential 1->0 transitions we must hold the ncp lock to safely
469 * test its flags. An unresolved entry with no children must be zapped
470 * to avoid leaks.
471 *
472 * The call to cache_zap() itself will handle all remaining races and
473 * will decrement the ncp's refs regardless. If we are resolved or
474 * have children nc_refs can safely be dropped to 0 without having to
475 * zap the entry.
476 *
477 * NOTE: cache_zap() will re-check nc_refs and nc_list in a MPSAFE fashion.
478 *
479 * NOTE: cache_zap() may return a non-NULL referenced parent which must
480 * be dropped in a loop.
2247fe02
MD
481 *
482 * MPSAFE
8c361dda 483 */
7ea21ed1
MD
484static __inline
485void
bc0c094e 486_cache_drop(struct namecache *ncp)
7ea21ed1 487{
f63911bf
MD
488 int refs;
489
490 while (ncp) {
491 KKASSERT(ncp->nc_refs > 0);
492 refs = ncp->nc_refs;
493
494 if (refs == 1) {
495 if (_cache_lock_nonblock(ncp) == 0) {
055f5cc8 496 ncp->nc_flag &= ~NCF_DEFEREDZAP;
f63911bf
MD
497 if ((ncp->nc_flag & NCF_UNRESOLVED) &&
498 TAILQ_EMPTY(&ncp->nc_list)) {
65870584 499 ncp = cache_zap(ncp, 1);
f63911bf
MD
500 continue;
501 }
502 if (atomic_cmpset_int(&ncp->nc_refs, 1, 0)) {
503 _cache_unlock(ncp);
504 break;
505 }
506 _cache_unlock(ncp);
507 }
508 } else {
509 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1))
510 break;
511 }
2247fe02 512 cpu_pause();
f517a1bb 513 }
7ea21ed1 514}
8987aad7 515
690a3127 516/*
2247fe02
MD
517 * Link a new namecache entry to its parent and to the hash table. Be
518 * careful to avoid races if vhold() blocks in the future.
519 *
520 * Both ncp and par must be referenced and locked.
521 *
522 * NOTE: The hash table spinlock is likely held during this call, we
523 * can't do anything fancy.
f63911bf 524 *
2247fe02 525 * MPSAFE
690a3127
MD
526 */
527static void
2247fe02
MD
528_cache_link_parent(struct namecache *ncp, struct namecache *par,
529 struct nchash_head *nchpp)
690a3127
MD
530{
531 KKASSERT(ncp->nc_parent == NULL);
532 ncp->nc_parent = par;
2247fe02 533 ncp->nc_head = nchpp;
aabd5ce8
MD
534
535 /*
536 * Set inheritance flags. Note that the parent flags may be
537 * stale due to getattr potentially not having been run yet
538 * (it gets run during nlookup()'s).
539 */
540 ncp->nc_flag &= ~(NCF_SF_PNOCACHE | NCF_UF_PCACHE);
541 if (par->nc_flag & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE))
542 ncp->nc_flag |= NCF_SF_PNOCACHE;
543 if (par->nc_flag & (NCF_UF_CACHE | NCF_UF_PCACHE))
544 ncp->nc_flag |= NCF_UF_PCACHE;
545
2247fe02
MD
546 LIST_INSERT_HEAD(&nchpp->list, ncp, nc_hash);
547
690a3127
MD
548 if (TAILQ_EMPTY(&par->nc_list)) {
549 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
21739618
MD
550 /*
551 * Any vp associated with an ncp which has children must
55361147 552 * be held to prevent it from being recycled.
21739618 553 */
690a3127 554 if (par->nc_vp)
2247fe02 555 vhold(par->nc_vp);
690a3127
MD
556 } else {
557 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
558 }
559}
560
561/*
2247fe02
MD
562 * Remove the parent and hash associations from a namecache structure.
563 * If this is the last child of the parent the cache_drop(par) will
564 * attempt to recursively zap the parent.
565 *
566 * ncp must be locked. This routine will acquire a temporary lock on
567 * the parent as wlel as the appropriate hash chain.
f63911bf 568 *
2247fe02 569 * MPSAFE
690a3127
MD
570 */
571static void
f63911bf 572_cache_unlink_parent(struct namecache *ncp)
690a3127
MD
573{
574 struct namecache *par;
f63911bf 575 struct vnode *dropvp;
690a3127
MD
576
577 if ((par = ncp->nc_parent) != NULL) {
2247fe02 578 KKASSERT(ncp->nc_parent == par);
f63911bf 579 _cache_hold(par);
2247fe02 580 _cache_lock(par);
287a8577 581 spin_lock(&ncp->nc_head->spin);
2247fe02 582 LIST_REMOVE(ncp, nc_hash);
690a3127 583 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
f63911bf 584 dropvp = NULL;
690a3127 585 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
f63911bf 586 dropvp = par->nc_vp;
287a8577 587 spin_unlock(&ncp->nc_head->spin);
2247fe02
MD
588 ncp->nc_parent = NULL;
589 ncp->nc_head = NULL;
590 _cache_unlock(par);
28623bf9 591 _cache_drop(par);
f63911bf
MD
592
593 /*
594 * We can only safely vdrop with no spinlocks held.
595 */
596 if (dropvp)
597 vdrop(dropvp);
690a3127
MD
598 }
599}
600
601/*
fad57d0e
MD
602 * Allocate a new namecache structure. Most of the code does not require
603 * zero-termination of the string but it makes vop_compat_ncreate() easier.
2247fe02
MD
604 *
605 * MPSAFE
690a3127
MD
606 */
607static struct namecache *
524c845c 608cache_alloc(int nlen)
690a3127
MD
609{
610 struct namecache *ncp;
611
efda3bd0 612 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
524c845c 613 if (nlen)
efda3bd0 614 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK);
524c845c 615 ncp->nc_nlen = nlen;
690a3127
MD
616 ncp->nc_flag = NCF_UNRESOLVED;
617 ncp->nc_error = ENOTCONN; /* needs to be resolved */
8c361dda 618 ncp->nc_refs = 1;
e4bff3c8 619
690a3127 620 TAILQ_INIT(&ncp->nc_list);
28623bf9 621 _cache_lock(ncp);
690a3127
MD
622 return(ncp);
623}
624
f63911bf
MD
625/*
626 * Can only be called for the case where the ncp has never been
627 * associated with anything (so no spinlocks are needed).
2247fe02
MD
628 *
629 * MPSAFE
f63911bf 630 */
8c361dda 631static void
28623bf9 632_cache_free(struct namecache *ncp)
8c361dda
MD
633{
634 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1);
635 if (ncp->nc_name)
efda3bd0
MD
636 kfree(ncp->nc_name, M_VFSCACHE);
637 kfree(ncp, M_VFSCACHE);
8c361dda 638}
690a3127 639
2247fe02
MD
640/*
641 * MPSAFE
642 */
28623bf9
MD
643void
644cache_zero(struct nchandle *nch)
645{
646 nch->ncp = NULL;
647 nch->mount = NULL;
648}
649
690a3127
MD
650/*
651 * Ref and deref a namecache structure.
5b287bba 652 *
2247fe02
MD
653 * The caller must specify a stable ncp pointer, typically meaning the
654 * ncp is already referenced but this can also occur indirectly through
655 * e.g. holding a lock on a direct child.
656 *
657 * WARNING: Caller may hold an unrelated read spinlock, which means we can't
658 * use read spinlocks here.
61f96b6f
MD
659 *
660 * MPSAFE if nch is
690a3127 661 */
28623bf9
MD
662struct nchandle *
663cache_hold(struct nchandle *nch)
bc0c094e 664{
28623bf9 665 _cache_hold(nch->ncp);
61f96b6f 666 atomic_add_int(&nch->mount->mnt_refs, 1);
28623bf9 667 return(nch);
bc0c094e
MD
668}
669
61f96b6f
MD
670/*
671 * Create a copy of a namecache handle for an already-referenced
672 * entry.
673 *
674 * MPSAFE if nch is
675 */
bc0c094e 676void
28623bf9 677cache_copy(struct nchandle *nch, struct nchandle *target)
bc0c094e 678{
28623bf9 679 *target = *nch;
cf37bc1a
MD
680 if (target->ncp)
681 _cache_hold(target->ncp);
61f96b6f 682 atomic_add_int(&nch->mount->mnt_refs, 1);
28623bf9
MD
683}
684
61f96b6f
MD
685/*
686 * MPSAFE if nch is
687 */
28623bf9
MD
688void
689cache_changemount(struct nchandle *nch, struct mount *mp)
690{
61f96b6f 691 atomic_add_int(&nch->mount->mnt_refs, -1);
28623bf9 692 nch->mount = mp;
61f96b6f 693 atomic_add_int(&nch->mount->mnt_refs, 1);
28623bf9
MD
694}
695
2247fe02
MD
696/*
697 * MPSAFE
698 */
28623bf9
MD
699void
700cache_drop(struct nchandle *nch)
701{
61f96b6f 702 atomic_add_int(&nch->mount->mnt_refs, -1);
28623bf9
MD
703 _cache_drop(nch->ncp);
704 nch->ncp = NULL;
705 nch->mount = NULL;
bc0c094e
MD
706}
707
2247fe02
MD
708/*
709 * MPSAFE
710 */
28623bf9
MD
711void
712cache_lock(struct nchandle *nch)
713{
714 _cache_lock(nch->ncp);
715}
716
2247fe02
MD
717/*
718 * Relock nch1 given an unlocked nch1 and a locked nch2. The caller
719 * is responsible for checking both for validity on return as they
720 * may have become invalid.
721 *
722 * We have to deal with potential deadlocks here, just ping pong
723 * the lock until we get it (we will always block somewhere when
724 * looping so this is not cpu-intensive).
725 *
726 * which = 0 nch1 not locked, nch2 is locked
727 * which = 1 nch1 is locked, nch2 is not locked
728 */
729void
730cache_relock(struct nchandle *nch1, struct ucred *cred1,
731 struct nchandle *nch2, struct ucred *cred2)
732{
733 int which;
734
735 which = 0;
736
737 for (;;) {
738 if (which == 0) {
739 if (cache_lock_nonblock(nch1) == 0) {
740 cache_resolve(nch1, cred1);
741 break;
742 }
743 cache_unlock(nch2);
744 cache_lock(nch1);
745 cache_resolve(nch1, cred1);
746 which = 1;
747 } else {
748 if (cache_lock_nonblock(nch2) == 0) {
749 cache_resolve(nch2, cred2);
750 break;
751 }
752 cache_unlock(nch1);
753 cache_lock(nch2);
754 cache_resolve(nch2, cred2);
755 which = 0;
756 }
757 }
758}
759
760/*
761 * MPSAFE
762 */
28623bf9
MD
763int
764cache_lock_nonblock(struct nchandle *nch)
765{
766 return(_cache_lock_nonblock(nch->ncp));
767}
768
14c92d03 769
2247fe02
MD
770/*
771 * MPSAFE
772 */
28623bf9
MD
773void
774cache_unlock(struct nchandle *nch)
775{
776 _cache_unlock(nch->ncp);
777}
778
14c92d03 779/*
690a3127 780 * ref-and-lock, unlock-and-deref functions.
9b1b3591
MD
781 *
782 * This function is primarily used by nlookup. Even though cache_lock
783 * holds the vnode, it is possible that the vnode may have already
f63911bf
MD
784 * initiated a recyclement.
785 *
786 * We want cache_get() to return a definitively usable vnode or a
787 * definitively unresolved ncp.
2247fe02
MD
788 *
789 * MPSAFE
14c92d03 790 */
28623bf9 791static
21739618 792struct namecache *
28623bf9 793_cache_get(struct namecache *ncp)
690a3127
MD
794{
795 _cache_hold(ncp);
28623bf9 796 _cache_lock(ncp);
9b1b3591 797 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
28623bf9 798 _cache_setunresolved(ncp);
21739618 799 return(ncp);
690a3127
MD
800}
801
28623bf9 802/*
2247fe02 803 * This is a special form of _cache_lock() which only succeeds if
f63911bf
MD
804 * it can get a pristine, non-recursive lock. The caller must have
805 * already ref'd the ncp.
806 *
807 * On success the ncp will be locked, on failure it will not. The
808 * ref count does not change either way.
809 *
2247fe02 810 * We want _cache_lock_special() (on success) to return a definitively
f63911bf 811 * usable vnode or a definitively unresolved ncp.
2247fe02
MD
812 *
813 * MPSAFE
f63911bf
MD
814 */
815static int
2247fe02 816_cache_lock_special(struct namecache *ncp)
f63911bf
MD
817{
818 if (_cache_lock_nonblock(ncp) == 0) {
2247fe02 819 if ((ncp->nc_exlocks & ~NC_EXLOCK_REQ) == 1) {
f63911bf
MD
820 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
821 _cache_setunresolved(ncp);
822 return(0);
823 }
824 _cache_unlock(ncp);
825 }
826 return(EWOULDBLOCK);
827}
828
829
830/*
831 * NOTE: The same nchandle can be passed for both arguments.
2247fe02
MD
832 *
833 * MPSAFE
28623bf9
MD
834 */
835void
836cache_get(struct nchandle *nch, struct nchandle *target)
837{
f63911bf 838 KKASSERT(nch->ncp->nc_refs > 0);
28623bf9
MD
839 target->mount = nch->mount;
840 target->ncp = _cache_get(nch->ncp);
61f96b6f 841 atomic_add_int(&target->mount->mnt_refs, 1);
28623bf9
MD
842}
843
2247fe02
MD
844/*
845 * MPSAFE
846 */
28623bf9 847static __inline
690a3127 848void
28623bf9 849_cache_put(struct namecache *ncp)
14c92d03 850{
28623bf9 851 _cache_unlock(ncp);
14c92d03
MD
852 _cache_drop(ncp);
853}
854
2247fe02
MD
855/*
856 * MPSAFE
857 */
28623bf9
MD
858void
859cache_put(struct nchandle *nch)
860{
61f96b6f 861 atomic_add_int(&nch->mount->mnt_refs, -1);
28623bf9
MD
862 _cache_put(nch->ncp);
863 nch->ncp = NULL;
864 nch->mount = NULL;
865}
866
690a3127 867/*
690a3127
MD
868 * Resolve an unresolved ncp by associating a vnode with it. If the
869 * vnode is NULL, a negative cache entry is created.
870 *
871 * The ncp should be locked on entry and will remain locked on return.
2247fe02
MD
872 *
873 * MPSAFE
690a3127 874 */
28623bf9 875static
690a3127 876void
4b5bbb78 877_cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp)
ce6da7e4 878{
690a3127 879 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
2247fe02 880
ce6da7e4 881 if (vp != NULL) {
21739618
MD
882 /*
883 * Any vp associated with an ncp which has children must
55361147 884 * be held. Any vp associated with a locked ncp must be held.
21739618
MD
885 */
886 if (!TAILQ_EMPTY(&ncp->nc_list))
887 vhold(vp);
287a8577 888 spin_lock(&vp->v_spinlock);
f63911bf 889 ncp->nc_vp = vp;
ce6da7e4 890 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
287a8577 891 spin_unlock(&vp->v_spinlock);
55361147
MD
892 if (ncp->nc_exlocks)
893 vhold(vp);
21739618
MD
894
895 /*
3c37c940 896 * Set auxiliary flags
21739618 897 */
690a3127
MD
898 switch(vp->v_type) {
899 case VDIR:
21739618
MD
900 ncp->nc_flag |= NCF_ISDIR;
901 break;
690a3127 902 case VLNK:
21739618
MD
903 ncp->nc_flag |= NCF_ISSYMLINK;
904 /* XXX cache the contents of the symlink */
905 break;
690a3127 906 default:
21739618 907 break;
690a3127 908 }
f63911bf 909 atomic_add_int(&numcache, 1);
21739618 910 ncp->nc_error = 0;
ce6da7e4 911 } else {
4b5bbb78
MD
912 /*
913 * When creating a negative cache hit we set the
914 * namecache_gen. A later resolve will clean out the
915 * negative cache hit if the mount point's namecache_gen
916 * has changed. Used by devfs, could also be used by
917 * other remote FSs.
918 */
f63911bf 919 ncp->nc_vp = NULL;
287a8577 920 spin_lock(&ncspin);
1345c2b6 921 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
ce6da7e4 922 ++numneg;
287a8577 923 spin_unlock(&ncspin);
21739618 924 ncp->nc_error = ENOENT;
4b5bbb78
MD
925 if (mp)
926 ncp->nc_namecache_gen = mp->mnt_namecache_gen;
ce6da7e4 927 }
65870584 928 ncp->nc_flag &= ~(NCF_UNRESOLVED | NCF_DEFEREDZAP);
ce6da7e4
MD
929}
930
2247fe02
MD
931/*
932 * MPSAFE
933 */
fad57d0e 934void
28623bf9 935cache_setvp(struct nchandle *nch, struct vnode *vp)
fad57d0e 936{
4b5bbb78 937 _cache_setvp(nch->mount, nch->ncp, vp);
28623bf9
MD
938}
939
2247fe02
MD
940/*
941 * MPSAFE
942 */
28623bf9
MD
943void
944cache_settimeout(struct nchandle *nch, int nticks)
945{
946 struct namecache *ncp = nch->ncp;
947
fad57d0e
MD
948 if ((ncp->nc_timeout = ticks + nticks) == 0)
949 ncp->nc_timeout = 1;
950}
951
690a3127
MD
952/*
953 * Disassociate the vnode or negative-cache association and mark a
954 * namecache entry as unresolved again. Note that the ncp is still
955 * left in the hash table and still linked to its parent.
956 *
67773eb3
MD
957 * The ncp should be locked and refd on entry and will remain locked and refd
958 * on return.
8c361dda
MD
959 *
960 * This routine is normally never called on a directory containing children.
961 * However, NFS often does just that in its rename() code as a cop-out to
962 * avoid complex namespace operations. This disconnects a directory vnode
963 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
964 * sync.
2247fe02
MD
965 *
966 * MPSAFE
690a3127 967 */
28623bf9 968static
690a3127 969void
28623bf9 970_cache_setunresolved(struct namecache *ncp)
14c92d03 971{
690a3127 972 struct vnode *vp;
14c92d03 973
690a3127
MD
974 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
975 ncp->nc_flag |= NCF_UNRESOLVED;
fad57d0e 976 ncp->nc_timeout = 0;
690a3127 977 ncp->nc_error = ENOTCONN;
690a3127 978 if ((vp = ncp->nc_vp) != NULL) {
f63911bf 979 atomic_add_int(&numcache, -1);
287a8577 980 spin_lock(&vp->v_spinlock);
fad57d0e 981 ncp->nc_vp = NULL;
690a3127 982 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
287a8577 983 spin_unlock(&vp->v_spinlock);
55361147
MD
984
985 /*
986 * Any vp associated with an ncp with children is
987 * held by that ncp. Any vp associated with a locked
988 * ncp is held by that ncp. These conditions must be
989 * undone when the vp is cleared out from the ncp.
990 */
690a3127
MD
991 if (!TAILQ_EMPTY(&ncp->nc_list))
992 vdrop(vp);
55361147
MD
993 if (ncp->nc_exlocks)
994 vdrop(vp);
690a3127 995 } else {
287a8577 996 spin_lock(&ncspin);
690a3127
MD
997 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
998 --numneg;
287a8577 999 spin_unlock(&ncspin);
690a3127 1000 }
d98152a8 1001 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK);
8e005a45
MD
1002 }
1003}
8c361dda 1004
4b5bbb78
MD
1005/*
1006 * The cache_nresolve() code calls this function to automatically
1007 * set a resolved cache element to unresolved if it has timed out
1008 * or if it is a negative cache hit and the mount point namecache_gen
1009 * has changed.
2247fe02
MD
1010 *
1011 * MPSAFE
4b5bbb78
MD
1012 */
1013static __inline void
1014_cache_auto_unresolve(struct mount *mp, struct namecache *ncp)
1015{
1016 /*
1017 * Already in an unresolved state, nothing to do.
1018 */
1019 if (ncp->nc_flag & NCF_UNRESOLVED)
1020 return;
1021
1022 /*
1023 * Try to zap entries that have timed out. We have
1024 * to be careful here because locked leafs may depend
1025 * on the vnode remaining intact in a parent, so only
1026 * do this under very specific conditions.
1027 */
1028 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 &&
1029 TAILQ_EMPTY(&ncp->nc_list)) {
1030 _cache_setunresolved(ncp);
1031 return;
1032 }
1033
1034 /*
1035 * If a resolved negative cache hit is invalid due to
1036 * the mount's namecache generation being bumped, zap it.
1037 */
1038 if (ncp->nc_vp == NULL &&
1039 ncp->nc_namecache_gen != mp->mnt_namecache_gen) {
1040 _cache_setunresolved(ncp);
1041 return;
1042 }
1043}
1044
2247fe02
MD
1045/*
1046 * MPSAFE
1047 */
1d505369 1048void
28623bf9 1049cache_setunresolved(struct nchandle *nch)
1d505369 1050{
28623bf9 1051 _cache_setunresolved(nch->ncp);
1d505369
MD
1052}
1053
1054/*
28623bf9
MD
1055 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
1056 * looking for matches. This flag tells the lookup code when it must
1057 * check for a mount linkage and also prevents the directories in question
1058 * from being deleted or renamed.
2247fe02
MD
1059 *
1060 * MPSAFE
1d505369 1061 */
28623bf9
MD
1062static
1063int
1064cache_clrmountpt_callback(struct mount *mp, void *data)
1065{
1066 struct nchandle *nch = data;
1067
1068 if (mp->mnt_ncmounton.ncp == nch->ncp)
1069 return(1);
1070 if (mp->mnt_ncmountpt.ncp == nch->ncp)
1071 return(1);
1072 return(0);
1073}
1074
2247fe02
MD
1075/*
1076 * MPSAFE
1077 */
1d505369 1078void
28623bf9 1079cache_clrmountpt(struct nchandle *nch)
1d505369 1080{
28623bf9
MD
1081 int count;
1082
1083 count = mountlist_scan(cache_clrmountpt_callback, nch,
1084 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
1085 if (count == 0)
1086 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT;
1d505369
MD
1087}
1088
1089/*
e09206ba
MD
1090 * Invalidate portions of the namecache topology given a starting entry.
1091 * The passed ncp is set to an unresolved state and:
8e005a45 1092 *
2247fe02
MD
1093 * The passed ncp must be referencxed and locked. The routine may unlock
1094 * and relock ncp several times, and will recheck the children and loop
1095 * to catch races. When done the passed ncp will be returned with the
1096 * reference and lock intact.
e09206ba
MD
1097 *
1098 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
1099 * that the physical underlying nodes have been
1100 * destroyed... as in deleted. For example, when
1101 * a directory is removed. This will cause record
1102 * lookups on the name to no longer be able to find
1103 * the record and tells the resolver to return failure
1104 * rather then trying to resolve through the parent.
1105 *
1106 * The topology itself, including ncp->nc_name,
1107 * remains intact.
1108 *
1109 * This only applies to the passed ncp, if CINV_CHILDREN
1110 * is specified the children are not flagged.
1111 *
1112 * CINV_CHILDREN - Set all children (recursively) to an unresolved
1113 * state as well.
1114 *
1115 * Note that this will also have the side effect of
1116 * cleaning out any unreferenced nodes in the topology
1117 * from the leaves up as the recursion backs out.
1118 *
2247fe02
MD
1119 * Note that the topology for any referenced nodes remains intact, but
1120 * the nodes will be marked as having been destroyed and will be set
1121 * to an unresolved state.
25cb3304
MD
1122 *
1123 * It is possible for cache_inval() to race a cache_resolve(), meaning that
1124 * the namecache entry may not actually be invalidated on return if it was
1125 * revalidated while recursing down into its children. This code guarentees
1126 * that the node(s) will go through an invalidation cycle, but does not
1127 * guarentee that they will remain in an invalidated state.
1128 *
1129 * Returns non-zero if a revalidation was detected during the invalidation
1130 * recursion, zero otherwise. Note that since only the original ncp is
1131 * locked the revalidation ultimately can only indicate that the original ncp
1132 * *MIGHT* no have been reresolved.
bf40a153
MD
1133 *
1134 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
1135 * have to avoid blowing out the kernel stack. We do this by saving the
1136 * deep namecache node and aborting the recursion, then re-recursing at that
1137 * node using a depth-first algorithm in order to allow multiple deep
1138 * recursions to chain through each other, then we restart the invalidation
1139 * from scratch.
2247fe02
MD
1140 *
1141 * MPSAFE
8e005a45 1142 */
bf40a153
MD
1143
1144struct cinvtrack {
1145 struct namecache *resume_ncp;
1146 int depth;
1147};
1148
28623bf9 1149static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *);
bf40a153 1150
28623bf9 1151static
25cb3304 1152int
28623bf9 1153_cache_inval(struct namecache *ncp, int flags)
8e005a45 1154{
bf40a153
MD
1155 struct cinvtrack track;
1156 struct namecache *ncp2;
1157 int r;
1158
1159 track.depth = 0;
1160 track.resume_ncp = NULL;
1161
1162 for (;;) {
28623bf9 1163 r = _cache_inval_internal(ncp, flags, &track);
bf40a153
MD
1164 if (track.resume_ncp == NULL)
1165 break;
6ea70f76 1166 kprintf("Warning: deep namecache recursion at %s\n",
bf40a153 1167 ncp->nc_name);
28623bf9 1168 _cache_unlock(ncp);
bf40a153
MD
1169 while ((ncp2 = track.resume_ncp) != NULL) {
1170 track.resume_ncp = NULL;
28623bf9
MD
1171 _cache_lock(ncp2);
1172 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY,
bf40a153 1173 &track);
28623bf9 1174 _cache_put(ncp2);
bf40a153 1175 }
28623bf9 1176 _cache_lock(ncp);
bf40a153
MD
1177 }
1178 return(r);
1179}
1180
28623bf9
MD
1181int
1182cache_inval(struct nchandle *nch, int flags)
1183{
1184 return(_cache_inval(nch->ncp, flags));
1185}
1186
2247fe02
MD
1187/*
1188 * Helper for _cache_inval(). The passed ncp is refd and locked and
1189 * remains that way on return, but may be unlocked/relocked multiple
1190 * times by the routine.
1191 */
bf40a153 1192static int
28623bf9 1193_cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track)
bf40a153 1194{
8e005a45 1195 struct namecache *kid;
b8997912 1196 struct namecache *nextkid;
25cb3304 1197 int rcnt = 0;
8e005a45 1198
e09206ba 1199 KKASSERT(ncp->nc_exlocks);
25cb3304 1200
28623bf9 1201 _cache_setunresolved(ncp);
e09206ba
MD
1202 if (flags & CINV_DESTROY)
1203 ncp->nc_flag |= NCF_DESTROYED;
e09206ba
MD
1204 if ((flags & CINV_CHILDREN) &&
1205 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL
1206 ) {
f63911bf 1207 _cache_hold(kid);
bf40a153
MD
1208 if (++track->depth > MAX_RECURSION_DEPTH) {
1209 track->resume_ncp = ncp;
28623bf9 1210 _cache_hold(ncp);
bf40a153
MD
1211 ++rcnt;
1212 }
28623bf9 1213 _cache_unlock(ncp);
b8997912 1214 while (kid) {
bf40a153 1215 if (track->resume_ncp) {
28623bf9 1216 _cache_drop(kid);
bf40a153
MD
1217 break;
1218 }
b8997912 1219 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
28623bf9 1220 _cache_hold(nextkid);
e09206ba
MD
1221 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
1222 TAILQ_FIRST(&kid->nc_list)
b8997912 1223 ) {
28623bf9
MD
1224 _cache_lock(kid);
1225 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track);
1226 _cache_unlock(kid);
b8997912 1227 }
28623bf9 1228 _cache_drop(kid);
fad57d0e 1229 kid = nextkid;
8e005a45 1230 }
bf40a153 1231 --track->depth;
28623bf9 1232 _cache_lock(ncp);
8e005a45 1233 }
25cb3304
MD
1234
1235 /*
1236 * Someone could have gotten in there while ncp was unlocked,
1237 * retry if so.
1238 */
1239 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1240 ++rcnt;
1241 return (rcnt);
8e005a45
MD
1242}
1243
e09206ba 1244/*
25cb3304
MD
1245 * Invalidate a vnode's namecache associations. To avoid races against
1246 * the resolver we do not invalidate a node which we previously invalidated
1247 * but which was then re-resolved while we were in the invalidation loop.
1248 *
1249 * Returns non-zero if any namecache entries remain after the invalidation
1250 * loop completed.
2aefb2c5 1251 *
f63911bf
MD
1252 * NOTE: Unlike the namecache topology which guarentees that ncp's will not
1253 * be ripped out of the topology while held, the vnode's v_namecache
1254 * list has no such restriction. NCP's can be ripped out of the list
1255 * at virtually any time if not locked, even if held.
1256 *
1257 * In addition, the v_namecache list itself must be locked via
1258 * the vnode's spinlock.
2247fe02
MD
1259 *
1260 * MPSAFE
e09206ba 1261 */
25cb3304 1262int
6b008938 1263cache_inval_vp(struct vnode *vp, int flags)
8e005a45
MD
1264{
1265 struct namecache *ncp;
25cb3304
MD
1266 struct namecache *next;
1267
2aefb2c5 1268restart:
287a8577 1269 spin_lock(&vp->v_spinlock);
25cb3304
MD
1270 ncp = TAILQ_FIRST(&vp->v_namecache);
1271 if (ncp)
28623bf9 1272 _cache_hold(ncp);
25cb3304 1273 while (ncp) {
f63911bf 1274 /* loop entered with ncp held and vp spin-locked */
2aefb2c5 1275 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
28623bf9 1276 _cache_hold(next);
287a8577 1277 spin_unlock(&vp->v_spinlock);
28623bf9 1278 _cache_lock(ncp);
2aefb2c5 1279 if (ncp->nc_vp != vp) {
6ea70f76 1280 kprintf("Warning: cache_inval_vp: race-A detected on "
2aefb2c5 1281 "%s\n", ncp->nc_name);
28623bf9 1282 _cache_put(ncp);
69313361 1283 if (next)
28623bf9 1284 _cache_drop(next);
2aefb2c5
MD
1285 goto restart;
1286 }
28623bf9
MD
1287 _cache_inval(ncp, flags);
1288 _cache_put(ncp); /* also releases reference */
25cb3304 1289 ncp = next;
287a8577 1290 spin_lock(&vp->v_spinlock);
2aefb2c5 1291 if (ncp && ncp->nc_vp != vp) {
287a8577 1292 spin_unlock(&vp->v_spinlock);
6ea70f76 1293 kprintf("Warning: cache_inval_vp: race-B detected on "
2aefb2c5 1294 "%s\n", ncp->nc_name);
28623bf9 1295 _cache_drop(ncp);
2aefb2c5
MD
1296 goto restart;
1297 }
690a3127 1298 }
287a8577 1299 spin_unlock(&vp->v_spinlock);
25cb3304 1300 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
5c6c3cac
MD
1301}
1302
1303/*
1304 * This routine is used instead of the normal cache_inval_vp() when we
1305 * are trying to recycle otherwise good vnodes.
1306 *
1307 * Return 0 on success, non-zero if not all namecache records could be
1308 * disassociated from the vnode (for various reasons).
2247fe02
MD
1309 *
1310 * MPSAFE
5c6c3cac
MD
1311 */
1312int
1313cache_inval_vp_nonblock(struct vnode *vp)
1314{
1315 struct namecache *ncp;
1316 struct namecache *next;
1317
287a8577 1318 spin_lock(&vp->v_spinlock);
5c6c3cac
MD
1319 ncp = TAILQ_FIRST(&vp->v_namecache);
1320 if (ncp)
1321 _cache_hold(ncp);
1322 while (ncp) {
1323 /* loop entered with ncp held */
1324 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1325 _cache_hold(next);
287a8577 1326 spin_unlock(&vp->v_spinlock);
5c6c3cac
MD
1327 if (_cache_lock_nonblock(ncp)) {
1328 _cache_drop(ncp);
1329 if (next)
1330 _cache_drop(next);
2247fe02 1331 goto done;
5c6c3cac
MD
1332 }
1333 if (ncp->nc_vp != vp) {
1334 kprintf("Warning: cache_inval_vp: race-A detected on "
1335 "%s\n", ncp->nc_name);
1336 _cache_put(ncp);
1337 if (next)
1338 _cache_drop(next);
2247fe02 1339 goto done;
5c6c3cac
MD
1340 }
1341 _cache_inval(ncp, 0);
1342 _cache_put(ncp); /* also releases reference */
1343 ncp = next;
287a8577 1344 spin_lock(&vp->v_spinlock);
5c6c3cac 1345 if (ncp && ncp->nc_vp != vp) {
287a8577 1346 spin_unlock(&vp->v_spinlock);
5c6c3cac
MD
1347 kprintf("Warning: cache_inval_vp: race-B detected on "
1348 "%s\n", ncp->nc_name);
1349 _cache_drop(ncp);
2247fe02 1350 goto done;
5c6c3cac
MD
1351 }
1352 }
287a8577 1353 spin_unlock(&vp->v_spinlock);
2247fe02 1354done:
5c6c3cac 1355 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
14c92d03 1356}
14c92d03 1357
984263bc 1358/*
fad57d0e 1359 * The source ncp has been renamed to the target ncp. Both fncp and tncp
227cf16d
MD
1360 * must be locked. The target ncp is destroyed (as a normal rename-over
1361 * would destroy the target file or directory).
fad57d0e 1362 *
227cf16d
MD
1363 * Because there may be references to the source ncp we cannot copy its
1364 * contents to the target. Instead the source ncp is relinked as the target
1365 * and the target ncp is removed from the namecache topology.
2247fe02
MD
1366 *
1367 * MPSAFE
fad57d0e
MD
1368 */
1369void
28623bf9 1370cache_rename(struct nchandle *fnch, struct nchandle *tnch)
fad57d0e 1371{
28623bf9
MD
1372 struct namecache *fncp = fnch->ncp;
1373 struct namecache *tncp = tnch->ncp;
2247fe02
MD
1374 struct namecache *tncp_par;
1375 struct nchash_head *nchpp;
1376 u_int32_t hash;
227cf16d 1377 char *oname;
fad57d0e 1378
2247fe02
MD
1379 /*
1380 * Rename fncp (unlink)
1381 */
f63911bf 1382 _cache_unlink_parent(fncp);
227cf16d
MD
1383 oname = fncp->nc_name;
1384 fncp->nc_name = tncp->nc_name;
1385 fncp->nc_nlen = tncp->nc_nlen;
2247fe02
MD
1386 tncp_par = tncp->nc_parent;
1387 _cache_hold(tncp_par);
1388 _cache_lock(tncp_par);
1389
1390 /*
1391 * Rename fncp (relink)
1392 */
1393 hash = fnv_32_buf(fncp->nc_name, fncp->nc_nlen, FNV1_32_INIT);
1394 hash = fnv_32_buf(&tncp_par, sizeof(tncp_par), hash);
1395 nchpp = NCHHASH(hash);
1396
287a8577 1397 spin_lock(&nchpp->spin);
2247fe02 1398 _cache_link_parent(fncp, tncp_par, nchpp);
287a8577 1399 spin_unlock(&nchpp->spin);
2247fe02
MD
1400
1401 _cache_put(tncp_par);
1402
1403 /*
1404 * Get rid of the overwritten tncp (unlink)
1405 */
1406 _cache_setunresolved(tncp);
1407 _cache_unlink_parent(tncp);
227cf16d
MD
1408 tncp->nc_name = NULL;
1409 tncp->nc_nlen = 0;
f63911bf 1410
227cf16d
MD
1411 if (oname)
1412 kfree(oname, M_VFSCACHE);
fad57d0e
MD
1413}
1414
1415/*
21739618 1416 * vget the vnode associated with the namecache entry. Resolve the namecache
2247fe02 1417 * entry if necessary. The passed ncp must be referenced and locked.
21739618
MD
1418 *
1419 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
1420 * (depending on the passed lk_type) will be returned in *vpp with an error
1421 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
1422 * most typical error is ENOENT, meaning that the ncp represents a negative
1423 * cache hit and there is no vnode to retrieve, but other errors can occur
1424 * too.
1425 *
2247fe02
MD
1426 * The vget() can race a reclaim. If this occurs we re-resolve the
1427 * namecache entry.
1428 *
1429 * There are numerous places in the kernel where vget() is called on a
1430 * vnode while one or more of its namecache entries is locked. Releasing
1431 * a vnode never deadlocks against locked namecache entries (the vnode
1432 * will not get recycled while referenced ncp's exist). This means we
1433 * can safely acquire the vnode. In fact, we MUST NOT release the ncp
1434 * lock when acquiring the vp lock or we might cause a deadlock.
1435 *
1436 * MPSAFE
21739618
MD
1437 */
1438int
28623bf9 1439cache_vget(struct nchandle *nch, struct ucred *cred,
21739618
MD
1440 int lk_type, struct vnode **vpp)
1441{
28623bf9 1442 struct namecache *ncp;
21739618
MD
1443 struct vnode *vp;
1444 int error;
1445
28623bf9 1446 ncp = nch->ncp;
2247fe02 1447 KKASSERT(ncp->nc_locktd == curthread);
21739618
MD
1448again:
1449 vp = NULL;
2247fe02 1450 if (ncp->nc_flag & NCF_UNRESOLVED)
28623bf9 1451 error = cache_resolve(nch, cred);
2247fe02 1452 else
21739618 1453 error = 0;
2247fe02 1454
21739618 1455 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
87de5057 1456 error = vget(vp, lk_type);
21739618 1457 if (error) {
2247fe02
MD
1458 /*
1459 * VRECLAIM race
1460 */
1461 if (error == ENOENT) {
1462 kprintf("Warning: vnode reclaim race detected "
1463 "in cache_vget on %p (%s)\n",
1464 vp, ncp->nc_name);
1465 _cache_setunresolved(ncp);
21739618 1466 goto again;
2247fe02
MD
1467 }
1468
1469 /*
1470 * Not a reclaim race, some other error.
1471 */
1472 KKASSERT(ncp->nc_vp == vp);
21739618 1473 vp = NULL;
2247fe02
MD
1474 } else {
1475 KKASSERT(ncp->nc_vp == vp);
1476 KKASSERT((vp->v_flag & VRECLAIMED) == 0);
21739618
MD
1477 }
1478 }
1479 if (error == 0 && vp == NULL)
1480 error = ENOENT;
1481 *vpp = vp;
1482 return(error);
1483}
1484
1485int
28623bf9 1486cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp)
21739618 1487{
28623bf9 1488 struct namecache *ncp;
21739618
MD
1489 struct vnode *vp;
1490 int error;
1491
28623bf9 1492 ncp = nch->ncp;
2247fe02 1493 KKASSERT(ncp->nc_locktd == curthread);
21739618
MD
1494again:
1495 vp = NULL;
2247fe02 1496 if (ncp->nc_flag & NCF_UNRESOLVED)
28623bf9 1497 error = cache_resolve(nch, cred);
2247fe02 1498 else
21739618 1499 error = 0;
2247fe02 1500
21739618 1501 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
2247fe02
MD
1502 error = vget(vp, LK_SHARED);
1503 if (error) {
1504 /*
1505 * VRECLAIM race
1506 */
3c37c940 1507 if (error == ENOENT) {
2247fe02
MD
1508 kprintf("Warning: vnode reclaim race detected "
1509 "in cache_vget on %p (%s)\n",
1510 vp, ncp->nc_name);
3c37c940 1511 _cache_setunresolved(ncp);
3c37c940
MD
1512 goto again;
1513 }
2247fe02
MD
1514
1515 /*
1516 * Not a reclaim race, some other error.
1517 */
1518 KKASSERT(ncp->nc_vp == vp);
1519 vp = NULL;
3c37c940 1520 } else {
2247fe02
MD
1521 KKASSERT(ncp->nc_vp == vp);
1522 KKASSERT((vp->v_flag & VRECLAIMED) == 0);
3c37c940
MD
1523 /* caller does not want a lock */
1524 vn_unlock(vp);
21739618
MD
1525 }
1526 }
1527 if (error == 0 && vp == NULL)
1528 error = ENOENT;
1529 *vpp = vp;
1530 return(error);
1531}
1532
dc1be39c 1533/*
c0c70b27 1534 * Return a referenced vnode representing the parent directory of
f63911bf
MD
1535 * ncp.
1536 *
1537 * Because the caller has locked the ncp it should not be possible for
1538 * the parent ncp to go away. However, the parent can unresolve its
1539 * dvp at any time so we must be able to acquire a lock on the parent
1540 * to safely access nc_vp.
5312fa43 1541 *
f63911bf
MD
1542 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock,
1543 * so use vhold()/vdrop() while holding the lock to prevent dvp from
1544 * getting destroyed.
2247fe02
MD
1545 *
1546 * MPSAFE - Note vhold() is allowed when dvp has 0 refs if we hold a
1547 * lock on the ncp in question..
c0c70b27 1548 */
5312fa43 1549static struct vnode *
c0c70b27
MD
1550cache_dvpref(struct namecache *ncp)
1551{
5312fa43 1552 struct namecache *par;
c0c70b27 1553 struct vnode *dvp;
c0c70b27 1554
5312fa43
MD
1555 dvp = NULL;
1556 if ((par = ncp->nc_parent) != NULL) {
f63911bf 1557 _cache_hold(par);
2247fe02
MD
1558 _cache_lock(par);
1559 if ((par->nc_flag & NCF_UNRESOLVED) == 0) {
1560 if ((dvp = par->nc_vp) != NULL)
1561 vhold(dvp);
1562 }
1563 _cache_unlock(par);
1564 if (dvp) {
1565 if (vget(dvp, LK_SHARED) == 0) {
1566 vn_unlock(dvp);
1567 vdrop(dvp);
1568 /* return refd, unlocked dvp */
1569 } else {
1570 vdrop(dvp);
1571 dvp = NULL;
5312fa43
MD
1572 }
1573 }
f63911bf 1574 _cache_drop(par);
5312fa43
MD
1575 }
1576 return(dvp);
c0c70b27
MD
1577}
1578
1579/*
fad57d0e
MD
1580 * Convert a directory vnode to a namecache record without any other
1581 * knowledge of the topology. This ONLY works with directory vnodes and
1582 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
1583 * returned ncp (if not NULL) will be held and unlocked.
1584 *
1585 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
1586 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
1587 * for dvp. This will fail only if the directory has been deleted out from
1588 * under the caller.
1589 *
1590 * Callers must always check for a NULL return no matter the value of 'makeit'.
a0d57516
MD
1591 *
1592 * To avoid underflowing the kernel stack each recursive call increments
1593 * the makeit variable.
fad57d0e
MD
1594 */
1595
28623bf9 1596static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
33387738 1597 struct vnode *dvp, char *fakename);
a0d57516 1598static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
cc4c3b52 1599 struct vnode **saved_dvp);
fad57d0e 1600
28623bf9
MD
1601int
1602cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit,
1603 struct nchandle *nch)
fad57d0e 1604{
cc4c3b52 1605 struct vnode *saved_dvp;
fad57d0e 1606 struct vnode *pvp;
33387738 1607 char *fakename;
fad57d0e
MD
1608 int error;
1609
28623bf9
MD
1610 nch->ncp = NULL;
1611 nch->mount = dvp->v_mount;
cc4c3b52 1612 saved_dvp = NULL;
33387738 1613 fakename = NULL;
a0d57516 1614
fad57d0e 1615 /*
269a08e4
MD
1616 * Handle the makeit == 0 degenerate case
1617 */
1618 if (makeit == 0) {
287a8577 1619 spin_lock(&dvp->v_spinlock);
269a08e4
MD
1620 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
1621 if (nch->ncp)
1622 cache_hold(nch);
287a8577 1623 spin_unlock(&dvp->v_spinlock);
269a08e4
MD
1624 }
1625
1626 /*
f63911bf 1627 * Loop until resolution, inside code will break out on error.
fad57d0e 1628 */
f63911bf
MD
1629 while (makeit) {
1630 /*
1631 * Break out if we successfully acquire a working ncp.
1632 */
287a8577 1633 spin_lock(&dvp->v_spinlock);
28623bf9 1634 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
f63911bf
MD
1635 if (nch->ncp) {
1636 cache_hold(nch);
287a8577 1637 spin_unlock(&dvp->v_spinlock);
f63911bf
MD
1638 break;
1639 }
287a8577 1640 spin_unlock(&dvp->v_spinlock);
fad57d0e 1641
fad57d0e
MD
1642 /*
1643 * If dvp is the root of its filesystem it should already
1644 * have a namecache pointer associated with it as a side
1645 * effect of the mount, but it may have been disassociated.
1646 */
1647 if (dvp->v_flag & VROOT) {
28623bf9
MD
1648 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp);
1649 error = cache_resolve_mp(nch->mount);
1650 _cache_put(nch->ncp);
fad57d0e 1651 if (ncvp_debug) {
6ea70f76 1652 kprintf("cache_fromdvp: resolve root of mount %p error %d",
fad57d0e
MD
1653 dvp->v_mount, error);
1654 }
1655 if (error) {
1656 if (ncvp_debug)
6ea70f76 1657 kprintf(" failed\n");
28623bf9 1658 nch->ncp = NULL;
fad57d0e
MD
1659 break;
1660 }
1661 if (ncvp_debug)
6ea70f76 1662 kprintf(" succeeded\n");
fad57d0e
MD
1663 continue;
1664 }
1665
1666 /*
a0d57516
MD
1667 * If we are recursed too deeply resort to an O(n^2)
1668 * algorithm to resolve the namecache topology. The
cc4c3b52 1669 * resolved pvp is left referenced in saved_dvp to
a0d57516
MD
1670 * prevent the tree from being destroyed while we loop.
1671 */
1672 if (makeit > 20) {
cc4c3b52 1673 error = cache_fromdvp_try(dvp, cred, &saved_dvp);
a0d57516 1674 if (error) {
6ea70f76 1675 kprintf("lookupdotdot(longpath) failed %d "
a0d57516 1676 "dvp %p\n", error, dvp);
1142bff7 1677 nch->ncp = NULL;
a0d57516
MD
1678 break;
1679 }
1680 continue;
1681 }
1682
1683 /*
fad57d0e
MD
1684 * Get the parent directory and resolve its ncp.
1685 */
33387738
MD
1686 if (fakename) {
1687 kfree(fakename, M_TEMP);
1688 fakename = NULL;
1689 }
1690 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
1691 &fakename);
fad57d0e 1692 if (error) {
6ea70f76 1693 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp);
fad57d0e
MD
1694 break;
1695 }
a11aaa81 1696 vn_unlock(pvp);
fad57d0e
MD
1697
1698 /*
1142bff7
MD
1699 * Reuse makeit as a recursion depth counter. On success
1700 * nch will be fully referenced.
fad57d0e 1701 */
28623bf9 1702 cache_fromdvp(pvp, cred, makeit + 1, nch);
fad57d0e 1703 vrele(pvp);
28623bf9 1704 if (nch->ncp == NULL)
fad57d0e
MD
1705 break;
1706
1707 /*
1708 * Do an inefficient scan of pvp (embodied by ncp) to look
1709 * for dvp. This will create a namecache record for dvp on
1710 * success. We loop up to recheck on success.
1711 *
1712 * ncp and dvp are both held but not locked.
1713 */
33387738 1714 error = cache_inefficient_scan(nch, cred, dvp, fakename);
fad57d0e 1715 if (error) {
6ea70f76 1716 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
28623bf9 1717 pvp, nch->ncp->nc_name, dvp);
1142bff7
MD
1718 cache_drop(nch);
1719 /* nch was NULLed out, reload mount */
1720 nch->mount = dvp->v_mount;
fad57d0e
MD
1721 break;
1722 }
1723 if (ncvp_debug) {
6ea70f76 1724 kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
28623bf9 1725 pvp, nch->ncp->nc_name);
fad57d0e 1726 }
1142bff7
MD
1727 cache_drop(nch);
1728 /* nch was NULLed out, reload mount */
1729 nch->mount = dvp->v_mount;
fad57d0e 1730 }
28623bf9
MD
1731
1732 /*
f63911bf 1733 * If nch->ncp is non-NULL it will have been held already.
28623bf9 1734 */
f63911bf
MD
1735 if (fakename)
1736 kfree(fakename, M_TEMP);
cc4c3b52
MD
1737 if (saved_dvp)
1738 vrele(saved_dvp);
28623bf9
MD
1739 if (nch->ncp)
1740 return (0);
1741 return (EINVAL);
fad57d0e
MD
1742}
1743
1744/*
a0d57516
MD
1745 * Go up the chain of parent directories until we find something
1746 * we can resolve into the namecache. This is very inefficient.
1747 */
1748static
1749int
1750cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
cc4c3b52 1751 struct vnode **saved_dvp)
a0d57516 1752{
28623bf9 1753 struct nchandle nch;
a0d57516
MD
1754 struct vnode *pvp;
1755 int error;
1756 static time_t last_fromdvp_report;
33387738 1757 char *fakename;
a0d57516
MD
1758
1759 /*
1760 * Loop getting the parent directory vnode until we get something we
1761 * can resolve in the namecache.
1762 */
1763 vref(dvp);
28623bf9 1764 nch.mount = dvp->v_mount;
1142bff7 1765 nch.ncp = NULL;
33387738 1766 fakename = NULL;
28623bf9 1767
a0d57516 1768 for (;;) {
33387738
MD
1769 if (fakename) {
1770 kfree(fakename, M_TEMP);
1771 fakename = NULL;
1772 }
1773 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
1774 &fakename);
a0d57516
MD
1775 if (error) {
1776 vrele(dvp);
33387738 1777 break;
a0d57516 1778 }
a11aaa81 1779 vn_unlock(pvp);
287a8577 1780 spin_lock(&pvp->v_spinlock);
28623bf9
MD
1781 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
1782 _cache_hold(nch.ncp);
287a8577 1783 spin_unlock(&pvp->v_spinlock);
a0d57516
MD
1784 vrele(pvp);
1785 break;
1786 }
287a8577 1787 spin_unlock(&pvp->v_spinlock);
a0d57516 1788 if (pvp->v_flag & VROOT) {
28623bf9
MD
1789 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp);
1790 error = cache_resolve_mp(nch.mount);
1791 _cache_unlock(nch.ncp);
a0d57516
MD
1792 vrele(pvp);
1793 if (error) {
28623bf9 1794 _cache_drop(nch.ncp);
1142bff7 1795 nch.ncp = NULL;
a0d57516 1796 vrele(dvp);
a0d57516
MD
1797 }
1798 break;
1799 }
1800 vrele(dvp);
1801 dvp = pvp;
1802 }
33387738
MD
1803 if (error == 0) {
1804 if (last_fromdvp_report != time_second) {
1805 last_fromdvp_report = time_second;
1806 kprintf("Warning: extremely inefficient path "
1807 "resolution on %s\n",
1808 nch.ncp->nc_name);
1809 }
1810 error = cache_inefficient_scan(&nch, cred, dvp, fakename);
cc4c3b52 1811
33387738
MD
1812 /*
1813 * Hopefully dvp now has a namecache record associated with
1814 * it. Leave it referenced to prevent the kernel from
1815 * recycling the vnode. Otherwise extremely long directory
1816 * paths could result in endless recycling.
1817 */
1818 if (*saved_dvp)
1819 vrele(*saved_dvp);
1820 *saved_dvp = dvp;
1142bff7 1821 _cache_drop(nch.ncp);
33387738
MD
1822 }
1823 if (fakename)
1824 kfree(fakename, M_TEMP);
a0d57516
MD
1825 return (error);
1826}
1827
a0d57516 1828/*
fad57d0e
MD
1829 * Do an inefficient scan of the directory represented by ncp looking for
1830 * the directory vnode dvp. ncp must be held but not locked on entry and
1831 * will be held on return. dvp must be refd but not locked on entry and
1832 * will remain refd on return.
1833 *
1834 * Why do this at all? Well, due to its stateless nature the NFS server
1835 * converts file handles directly to vnodes without necessarily going through
1836 * the namecache ops that would otherwise create the namecache topology
1837 * leading to the vnode. We could either (1) Change the namecache algorithms
1838 * to allow disconnect namecache records that are re-merged opportunistically,
1839 * or (2) Make the NFS server backtrack and scan to recover a connected
1840 * namecache topology in order to then be able to issue new API lookups.
1841 *
1842 * It turns out that (1) is a huge mess. It takes a nice clean set of
1843 * namecache algorithms and introduces a lot of complication in every subsystem
1844 * that calls into the namecache to deal with the re-merge case, especially
1845 * since we are using the namecache to placehold negative lookups and the
1846 * vnode might not be immediately assigned. (2) is certainly far less
1847 * efficient then (1), but since we are only talking about directories here
1848 * (which are likely to remain cached), the case does not actually run all
1849 * that often and has the supreme advantage of not polluting the namecache
1850 * algorithms.
33387738
MD
1851 *
1852 * If a fakename is supplied just construct a namecache entry using the
1853 * fake name.
fad57d0e
MD
1854 */
1855static int
28623bf9 1856cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
33387738 1857 struct vnode *dvp, char *fakename)
fad57d0e
MD
1858{
1859 struct nlcomponent nlc;
28623bf9 1860 struct nchandle rncp;
fad57d0e
MD
1861 struct dirent *den;
1862 struct vnode *pvp;
1863 struct vattr vat;
1864 struct iovec iov;
1865 struct uio uio;
fad57d0e
MD
1866 int blksize;
1867 int eofflag;
4d22f42a 1868 int bytes;
fad57d0e
MD
1869 char *rbuf;
1870 int error;
fad57d0e
MD
1871
1872 vat.va_blocksize = 0;
87de5057 1873 if ((error = VOP_GETATTR(dvp, &vat)) != 0)
fad57d0e 1874 return (error);
2247fe02
MD
1875 cache_lock(nch);
1876 error = cache_vref(nch, cred, &pvp);
1877 cache_unlock(nch);
1878 if (error)
fad57d0e 1879 return (error);
973c11b9
MD
1880 if (ncvp_debug) {
1881 kprintf("inefficient_scan: directory iosize %ld "
1882 "vattr fileid = %lld\n",
1883 vat.va_blocksize,
1884 (long long)vat.va_fileid);
1885 }
33387738
MD
1886
1887 /*
1888 * Use the supplied fakename if not NULL. Fake names are typically
1889 * not in the actual filesystem hierarchy. This is used by HAMMER
1890 * to glue @@timestamp recursions together.
1891 */
1892 if (fakename) {
1893 nlc.nlc_nameptr = fakename;
1894 nlc.nlc_namelen = strlen(fakename);
1895 rncp = cache_nlookup(nch, &nlc);
1896 goto done;
1897 }
1898
fad57d0e
MD
1899 if ((blksize = vat.va_blocksize) == 0)
1900 blksize = DEV_BSIZE;
efda3bd0 1901 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK);
28623bf9 1902 rncp.ncp = NULL;
fad57d0e
MD
1903
1904 eofflag = 0;
1905 uio.uio_offset = 0;
fad57d0e 1906again:
fad57d0e
MD
1907 iov.iov_base = rbuf;
1908 iov.iov_len = blksize;
1909 uio.uio_iov = &iov;
1910 uio.uio_iovcnt = 1;
1911 uio.uio_resid = blksize;
1912 uio.uio_segflg = UIO_SYSSPACE;
1913 uio.uio_rw = UIO_READ;
1914 uio.uio_td = curthread;
1915
fad57d0e 1916 if (ncvp_debug >= 2)
6ea70f76 1917 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
4d22f42a 1918 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
fad57d0e 1919 if (error == 0) {
4d22f42a
MD
1920 den = (struct dirent *)rbuf;
1921 bytes = blksize - uio.uio_resid;
1922
1923 while (bytes > 0) {
1924 if (ncvp_debug >= 2) {
6ea70f76 1925 kprintf("cache_inefficient_scan: %*.*s\n",
4d22f42a
MD
1926 den->d_namlen, den->d_namlen,
1927 den->d_name);
1928 }
fad57d0e 1929 if (den->d_type != DT_WHT &&
01f31ab3 1930 den->d_ino == vat.va_fileid) {
4d22f42a 1931 if (ncvp_debug) {
6ea70f76 1932 kprintf("cache_inefficient_scan: "
50626622 1933 "MATCHED inode %lld path %s/%*.*s\n",
973c11b9
MD
1934 (long long)vat.va_fileid,
1935 nch->ncp->nc_name,
4d22f42a
MD
1936 den->d_namlen, den->d_namlen,
1937 den->d_name);
1938 }
fad57d0e
MD
1939 nlc.nlc_nameptr = den->d_name;
1940 nlc.nlc_namelen = den->d_namlen;
28623bf9
MD
1941 rncp = cache_nlookup(nch, &nlc);
1942 KKASSERT(rncp.ncp != NULL);
fad57d0e
MD
1943 break;
1944 }
01f31ab3
JS
1945 bytes -= _DIRENT_DIRSIZ(den);
1946 den = _DIRENT_NEXT(den);
fad57d0e 1947 }
28623bf9 1948 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
fad57d0e
MD
1949 goto again;
1950 }
33387738
MD
1951 kfree(rbuf, M_TEMP);
1952done:
885ecb13 1953 vrele(pvp);
28623bf9
MD
1954 if (rncp.ncp) {
1955 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) {
4b5bbb78 1956 _cache_setvp(rncp.mount, rncp.ncp, dvp);
fad57d0e 1957 if (ncvp_debug >= 2) {
6ea70f76 1958 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
28623bf9 1959 nch->ncp->nc_name, rncp.ncp->nc_name, dvp);
fad57d0e
MD
1960 }
1961 } else {
1962 if (ncvp_debug >= 2) {
6ea70f76 1963 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
28623bf9
MD
1964 nch->ncp->nc_name, rncp.ncp->nc_name, dvp,
1965 rncp.ncp->nc_vp);
fad57d0e
MD
1966 }
1967 }
28623bf9
MD
1968 if (rncp.ncp->nc_vp == NULL)
1969 error = rncp.ncp->nc_error;
1142bff7
MD
1970 /*
1971 * Release rncp after a successful nlookup. rncp was fully
1972 * referenced.
1973 */
1974 cache_put(&rncp);
fad57d0e 1975 } else {
6ea70f76 1976 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
28623bf9 1977 dvp, nch->ncp->nc_name);
fad57d0e
MD
1978 error = ENOENT;
1979 }
fad57d0e
MD
1980 return (error);
1981}
1982
1983/*
67773eb3
MD
1984 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
1985 * state, which disassociates it from its vnode or ncneglist.
7ea21ed1 1986 *
67773eb3 1987 * Then, if there are no additional references to the ncp and no children,
f63911bf 1988 * the ncp is removed from the topology and destroyed.
7ea21ed1 1989 *
67773eb3
MD
1990 * References and/or children may exist if the ncp is in the middle of the
1991 * topology, preventing the ncp from being destroyed.
7ea21ed1 1992 *
67773eb3
MD
1993 * This function must be called with the ncp held and locked and will unlock
1994 * and drop it during zapping.
f63911bf 1995 *
65870584
MD
1996 * If nonblock is non-zero and the parent ncp cannot be locked we give up.
1997 * This case can occur in the cache_drop() path.
1998 *
f63911bf
MD
1999 * This function may returned a held (but NOT locked) parent node which the
2000 * caller must drop. We do this so _cache_drop() can loop, to avoid
2001 * blowing out the kernel stack.
2002 *
2003 * WARNING! For MPSAFE operation this routine must acquire up to three
2004 * spin locks to be able to safely test nc_refs. Lock order is
2005 * very important.
2006 *
2007 * hash spinlock if on hash list
2008 * parent spinlock if child of parent
2009 * (the ncp is unresolved so there is no vnode association)
984263bc 2010 */
f63911bf 2011static struct namecache *
65870584 2012cache_zap(struct namecache *ncp, int nonblock)
984263bc 2013{
7ea21ed1 2014 struct namecache *par;
f63911bf 2015 struct vnode *dropvp;
f63911bf 2016 int refs;
7ea21ed1
MD
2017
2018 /*
2019 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
2020 */
28623bf9 2021 _cache_setunresolved(ncp);
7ea21ed1
MD
2022
2023 /*
2024 * Try to scrap the entry and possibly tail-recurse on its parent.
2025 * We only scrap unref'd (other then our ref) unresolved entries,
2026 * we do not scrap 'live' entries.
f63911bf
MD
2027 *
2028 * Note that once the spinlocks are acquired if nc_refs == 1 no
2029 * other references are possible. If it isn't, however, we have
2030 * to decrement but also be sure to avoid a 1->0 transition.
7ea21ed1 2031 */
f63911bf
MD
2032 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
2033 KKASSERT(ncp->nc_refs > 0);
7ea21ed1 2034
f63911bf 2035 /*
65870584
MD
2036 * Acquire locks. Note that the parent can't go away while we hold
2037 * a child locked.
f63911bf 2038 */
2247fe02 2039 if ((par = ncp->nc_parent) != NULL) {
65870584
MD
2040 if (nonblock) {
2041 for (;;) {
2042 if (_cache_lock_nonblock(par) == 0)
2043 break;
65870584
MD
2044 refs = ncp->nc_refs;
2045 ncp->nc_flag |= NCF_DEFEREDZAP;
2046 ++numdefered; /* MP race ok */
2047 if (atomic_cmpset_int(&ncp->nc_refs,
2048 refs, refs - 1)) {
2049 _cache_unlock(ncp);
2050 return(NULL);
2051 }
2052 cpu_pause();
2053 }
2054 _cache_hold(par);
2055 } else {
2056 _cache_hold(par);
2057 _cache_lock(par);
2058 }
287a8577 2059 spin_lock(&ncp->nc_head->spin);
f63911bf 2060 }
7ea21ed1 2061
f63911bf
MD
2062 /*
2063 * If someone other then us has a ref or we have children
2064 * we cannot zap the entry. The 1->0 transition and any
2065 * further list operation is protected by the spinlocks
2066 * we have acquired but other transitions are not.
2067 */
2068 for (;;) {
2069 refs = ncp->nc_refs;
2070 if (refs == 1 && TAILQ_EMPTY(&ncp->nc_list))
2071 break;
2072 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) {
2247fe02 2073 if (par) {
287a8577 2074 spin_unlock(&ncp->nc_head->spin);
2247fe02
MD
2075 _cache_put(par);
2076 }
f63911bf
MD
2077 _cache_unlock(ncp);
2078 return(NULL);
7ea21ed1 2079 }
2247fe02 2080 cpu_pause();
f63911bf 2081 }
67773eb3 2082
f63911bf
MD
2083 /*
2084 * We are the only ref and with the spinlocks held no further
2085 * refs can be acquired by others.
2086 *
2087 * Remove us from the hash list and parent list. We have to
2088 * drop a ref on the parent's vp if the parent's list becomes
2089 * empty.
2090 */
f63911bf 2091 dropvp = NULL;
2247fe02
MD
2092 if (par) {
2093 struct nchash_head *nchpp = ncp->nc_head;
67773eb3 2094
2247fe02
MD
2095 KKASSERT(nchpp != NULL);
2096 LIST_REMOVE(ncp, nc_hash);
2097 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
f63911bf
MD
2098 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
2099 dropvp = par->nc_vp;
2247fe02
MD
2100 ncp->nc_head = NULL;
2101 ncp->nc_parent = NULL;
287a8577 2102 spin_unlock(&nchpp->spin);
2247fe02
MD
2103 _cache_unlock(par);
2104 } else {
2105 KKASSERT(ncp->nc_head == NULL);
7ea21ed1 2106 }
f63911bf
MD
2107
2108 /*
2109 * ncp should not have picked up any refs. Physically
2110 * destroy the ncp.
2111 */
f63911bf 2112 KKASSERT(ncp->nc_refs == 1);
f63911bf
MD
2113 /* _cache_unlock(ncp) not required */
2114 ncp->nc_refs = -1; /* safety */
2115 if (ncp->nc_name)
2116 kfree(ncp->nc_name, M_VFSCACHE);
2117 kfree(ncp, M_VFSCACHE);
2118
2119 /*
2120 * Delayed drop (we had to release our spinlocks)
2121 *
2122 * The refed parent (if not NULL) must be dropped. The
2123 * caller is responsible for looping.
2124 */
2125 if (dropvp)
2126 vdrop(dropvp);
2127 return(par);
984263bc
MD
2128}
2129
65870584
MD
2130/*
2131 * Clean up dangling negative cache and defered-drop entries in the
2132 * namecache.
2133 */
9e10d70b
MD
2134typedef enum { CHI_LOW, CHI_HIGH } cache_hs_t;
2135
2136static cache_hs_t neg_cache_hysteresis_state = CHI_LOW;
2137static cache_hs_t pos_cache_hysteresis_state = CHI_LOW;
62d0f1f0 2138
62d0f1f0 2139void
65870584 2140cache_hysteresis(void)
62d0f1f0 2141{
9e10d70b
MD
2142 int poslimit;
2143
62d0f1f0
MD
2144 /*
2145 * Don't cache too many negative hits. We use hysteresis to reduce
2146 * the impact on the critical path.
2147 */
9e10d70b 2148 switch(neg_cache_hysteresis_state) {
62d0f1f0
MD
2149 case CHI_LOW:
2150 if (numneg > MINNEG && numneg * ncnegfactor > numcache) {
65870584 2151 _cache_cleanneg(10);
9e10d70b 2152 neg_cache_hysteresis_state = CHI_HIGH;
62d0f1f0
MD
2153 }
2154 break;
2155 case CHI_HIGH:
2156 if (numneg > MINNEG * 9 / 10 &&
2157 numneg * ncnegfactor * 9 / 10 > numcache
2158 ) {
65870584 2159 _cache_cleanneg(10);
62d0f1f0 2160 } else {
9e10d70b
MD
2161 neg_cache_hysteresis_state = CHI_LOW;
2162 }
2163 break;
2164 }
2165
2166 /*
2167 * Don't cache too many positive hits. We use hysteresis to reduce
2168 * the impact on the critical path.
2169 *
2170 * Excessive positive hits can accumulate due to large numbers of
2171 * hardlinks (the vnode cache will not prevent hl ncps from growing
2172 * into infinity).
2173 */
2174 if ((poslimit = ncposlimit) == 0)
2175 poslimit = desiredvnodes * 2;
2176
2177 switch(pos_cache_hysteresis_state) {
2178 case CHI_LOW:
2179 if (numcache > poslimit && numcache > MINPOS) {
2180 _cache_cleanpos(10);
2181 pos_cache_hysteresis_state = CHI_HIGH;
2182 }
2183 break;
2184 case CHI_HIGH:
2185 if (numcache > poslimit * 5 / 6 && numcache > MINPOS) {
2186 _cache_cleanpos(10);
2187 } else {
2188 pos_cache_hysteresis_state = CHI_LOW;
62d0f1f0
MD
2189 }
2190 break;
2191 }
65870584
MD
2192
2193 /*
2194 * Clean out dangling defered-zap ncps which could not
2195 * be cleanly dropped if too many build up. Note
2196 * that numdefered is not an exact number as such ncps
2197 * can be reused and the counter is not handled in a MP
2198 * safe manner by design.
2199 */
2200 if (numdefered * ncnegfactor > numcache) {
2201 _cache_cleandefered();
2202 }
62d0f1f0
MD
2203}
2204
984263bc 2205/*
14c92d03
MD
2206 * NEW NAMECACHE LOOKUP API
2207 *
2247fe02
MD
2208 * Lookup an entry in the namecache. The passed par_nch must be referenced
2209 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp
2210 * is ALWAYS returned, eve if the supplied component is illegal.
2211 *
fad57d0e 2212 * The resulting namecache entry should be returned to the system with
2247fe02 2213 * cache_put() or cache_unlock() + cache_drop().
14c92d03
MD
2214 *
2215 * namecache locks are recursive but care must be taken to avoid lock order
2247fe02
MD
2216 * reversals (hence why the passed par_nch must be unlocked). Locking
2217 * rules are to order for parent traversals, not for child traversals.
14c92d03
MD
2218 *
2219 * Nobody else will be able to manipulate the associated namespace (e.g.
2220 * create, delete, rename, rename-target) until the caller unlocks the
2221 * entry.
2222 *
2223 * The returned entry will be in one of three states: positive hit (non-null
2224 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
2225 * Unresolved entries must be resolved through the filesystem to associate the
2226 * vnode and/or determine whether a positive or negative hit has occured.
2227 *
2228 * It is not necessary to lock a directory in order to lock namespace under
2229 * that directory. In fact, it is explicitly not allowed to do that. A
2230 * directory is typically only locked when being created, renamed, or
2231 * destroyed.
2232 *
2233 * The directory (par) may be unresolved, in which case any returned child
2234 * will likely also be marked unresolved. Likely but not guarenteed. Since
fad57d0e
MD
2235 * the filesystem lookup requires a resolved directory vnode the caller is
2236 * responsible for resolving the namecache chain top-down. This API
14c92d03
MD
2237 * specifically allows whole chains to be created in an unresolved state.
2238 */
28623bf9
MD
2239struct nchandle
2240cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc)
14c92d03 2241{
28623bf9 2242 struct nchandle nch;
690a3127
MD
2243 struct namecache *ncp;
2244 struct namecache *new_ncp;
f63911bf 2245 struct nchash_head *nchpp;
4b5bbb78 2246 struct mount *mp;
690a3127
MD
2247 u_int32_t hash;
2248 globaldata_t gd;
2247fe02 2249 int par_locked;
690a3127
MD
2250
2251 numcalls++;
2252 gd = mycpu;
4b5bbb78 2253 mp = par_nch->mount;
2247fe02
MD
2254 par_locked = 0;
2255
2256 /*
2257 * This is a good time to call it, no ncp's are locked by
2258 * the caller or us.
2259 */
65870584 2260 cache_hysteresis();
690a3127
MD
2261
2262 /*
690a3127
MD
2263 * Try to locate an existing entry
2264 */
2265 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
28623bf9 2266 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
690a3127 2267 new_ncp = NULL;
f63911bf 2268 nchpp = NCHHASH(hash);
690a3127 2269restart:
287a8577 2270 spin_lock(&nchpp->spin);
f63911bf 2271 LIST_FOREACH(ncp, &nchpp->list, nc_hash) {
690a3127
MD
2272 numchecks++;
2273
2274 /*
690a3127 2275 * Break out if we find a matching entry. Note that
e09206ba
MD
2276 * UNRESOLVED entries may match, but DESTROYED entries
2277 * do not.
690a3127 2278 */
28623bf9 2279 if (ncp->nc_parent == par_nch->ncp &&
690a3127 2280 ncp->nc_nlen == nlc->nlc_namelen &&
e09206ba
MD
2281 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
2282 (ncp->nc_flag & NCF_DESTROYED) == 0
690a3127 2283 ) {
f63911bf 2284 _cache_hold(ncp);
287a8577 2285 spin_unlock(&nchpp->spin);
2247fe02
MD
2286 if (par_locked) {
2287 _cache_unlock(par_nch->ncp);
2288 par_locked = 0;
2289 }
2290 if (_cache_lock_special(ncp) == 0) {
4b5bbb78 2291 _cache_auto_unresolve(mp, ncp);
67773eb3 2292 if (new_ncp)
28623bf9 2293 _cache_free(new_ncp);
67773eb3
MD
2294 goto found;
2295 }
28623bf9
MD
2296 _cache_get(ncp);
2297 _cache_put(ncp);
f63911bf 2298 _cache_drop(ncp);
67773eb3 2299 goto restart;
690a3127
MD
2300 }
2301 }
2302
2303 /*
2304 * We failed to locate an entry, create a new entry and add it to
2247fe02
MD
2305 * the cache. The parent ncp must also be locked so we
2306 * can link into it.
2307 *
2308 * We have to relookup after possibly blocking in kmalloc or
2309 * when locking par_nch.
2310 *
2311 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
2312 * mount case, in which case nc_name will be NULL.
690a3127
MD
2313 */
2314 if (new_ncp == NULL) {
287a8577 2315 spin_unlock(&nchpp->spin);
524c845c 2316 new_ncp = cache_alloc(nlc->nlc_namelen);
2247fe02
MD
2317 if (nlc->nlc_namelen) {
2318 bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
2319 nlc->nlc_namelen);
2320 new_ncp->nc_name[nlc->nlc_namelen] = 0;
2321 }
2322 goto restart;
2323 }
2324 if (par_locked == 0) {
287a8577 2325 spin_unlock(&nchpp->spin);
2247fe02
MD
2326 _cache_lock(par_nch->ncp);
2327 par_locked = 1;
690a3127
MD
2328 goto restart;
2329 }
690a3127
MD
2330
2331 /*
2247fe02 2332 * WARNING! We still hold the spinlock. We have to set the hash
668b43c5 2333 * table entry atomically.
690a3127 2334 */
2247fe02
MD
2335 ncp = new_ncp;
2336 _cache_link_parent(ncp, par_nch->ncp, nchpp);
287a8577 2337 spin_unlock(&nchpp->spin);
2247fe02
MD
2338 _cache_unlock(par_nch->ncp);
2339 /* par_locked = 0 - not used */
690a3127 2340found:
fad57d0e
MD
2341 /*
2342 * stats and namecache size management
2343 */
2344 if (ncp->nc_flag & NCF_UNRESOLVED)
2345 ++gd->gd_nchstats->ncs_miss;
2346 else if (ncp->nc_vp)
2347 ++gd->gd_nchstats->ncs_goodhits;
2348 else
2349 ++gd->gd_nchstats->ncs_neghits;
4b5bbb78 2350 nch.mount = mp;
28623bf9 2351 nch.ncp = ncp;
61f96b6f 2352 atomic_add_int(&nch.mount->mnt_refs, 1);
28623bf9 2353 return(nch);
690a3127
MD
2354}
2355
2356/*
668b43c5
MD
2357 * This is a non-blocking verison of cache_nlookup() used by
2358 * nfs_readdirplusrpc_uio(). It can fail for any reason and
2359 * will return nch.ncp == NULL in that case.
2360 */
2361struct nchandle
2362cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc)
2363{
2364 struct nchandle nch;
2365 struct namecache *ncp;
2366 struct namecache *new_ncp;
2367 struct nchash_head *nchpp;
2368 struct mount *mp;
2369 u_int32_t hash;
2370 globaldata_t gd;
2371 int par_locked;
2372
2373 numcalls++;
2374 gd = mycpu;
2375 mp = par_nch->mount;
2376 par_locked = 0;
2377
2378 /*
2379 * Try to locate an existing entry
2380 */
2381 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
2382 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
2383 new_ncp = NULL;
2384 nchpp = NCHHASH(hash);
2385restart:
287a8577 2386 spin_lock(&nchpp->spin);
668b43c5
MD
2387 LIST_FOREACH(ncp, &nchpp->list, nc_hash) {
2388 numchecks++;
2389
2390 /*
2391 * Break out if we find a matching entry. Note that
2392 * UNRESOLVED entries may match, but DESTROYED entries
2393 * do not.
2394 */
2395 if (ncp->nc_parent == par_nch->ncp &&
2396 ncp->nc_nlen == nlc->nlc_namelen &&
2397 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
2398 (ncp->nc_flag & NCF_DESTROYED) == 0
2399 ) {
2400 _cache_hold(ncp);
287a8577 2401 spin_unlock(&nchpp->spin);
668b43c5
MD
2402 if (par_locked) {
2403 _cache_unlock(par_nch->ncp);
2404 par_locked = 0;
2405 }
2406 if (_cache_lock_special(ncp) == 0) {
2407 _cache_auto_unresolve(mp, ncp);
2408 if (new_ncp) {
2409 _cache_free(new_ncp);
2410 new_ncp = NULL;
2411 }
2412 goto found;
2413 }
2414 _cache_drop(ncp);
2415 goto failed;
2416 }
2417 }
2418
2419 /*
2420 * We failed to locate an entry, create a new entry and add it to
2421 * the cache. The parent ncp must also be locked so we
2422 * can link into it.
2423 *
2424 * We have to relookup after possibly blocking in kmalloc or
2425 * when locking par_nch.
2426 *
2427 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
2428 * mount case, in which case nc_name will be NULL.
2429 */
2430 if (new_ncp == NULL) {
287a8577 2431 spin_unlock(&nchpp->spin);
668b43c5
MD
2432 new_ncp = cache_alloc(nlc->nlc_namelen);
2433 if (nlc->nlc_namelen) {
2434 bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
2435 nlc->nlc_namelen);
2436 new_ncp->nc_name[nlc->nlc_namelen] = 0;
2437 }
2438 goto restart;
2439 }
2440 if (par_locked == 0) {
287a8577 2441 spin_unlock(&nchpp->spin);
668b43c5
MD
2442 if (_cache_lock_nonblock(par_nch->ncp) == 0) {
2443 par_locked = 1;
2444 goto restart;
2445 }
2446 goto failed;
2447 }
2448
2449 /*
2450 * WARNING! We still hold the spinlock. We have to set the hash
2451 * table entry atomically.
2452 */
2453 ncp = new_ncp;
2454 _cache_link_parent(ncp, par_nch->ncp, nchpp);
287a8577 2455 spin_unlock(&nchpp->spin);
668b43c5
MD
2456 _cache_unlock(par_nch->ncp);
2457 /* par_locked = 0 - not used */
2458found:
2459 /*
2460 * stats and namecache size management
2461 */
2462 if (ncp->nc_flag & NCF_UNRESOLVED)
2463 ++gd->gd_nchstats->ncs_miss;
2464 else if (ncp->nc_vp)
2465 ++gd->gd_nchstats->ncs_goodhits;
2466 else
2467 ++gd->gd_nchstats->ncs_neghits;
2468 nch.mount = mp;
2469 nch.ncp = ncp;
2470 atomic_add_int(&nch.mount->mnt_refs, 1);
2471 return(nch);
2472failed:
2473 if (new_ncp) {
2474 _cache_free(new_ncp);
2475 new_ncp = NULL;
2476 }
2477 nch.mount = NULL;
2478 nch.ncp = NULL;
2479 return(nch);
2480}
2481
2482/*
28623bf9
MD
2483 * The namecache entry is marked as being used as a mount point.
2484 * Locate the mount if it is visible to the caller.
1d505369 2485 */
28623bf9
MD
2486struct findmount_info {
2487 struct mount *result;
2488 struct mount *nch_mount;
2489 struct namecache *nch_ncp;
2490};
2491
2492static
2493int
2494cache_findmount_callback(struct mount *mp, void *data)
1d505369 2495{
28623bf9 2496 struct findmount_info *info = data;
1d505369 2497
28623bf9
MD
2498 /*
2499 * Check the mount's mounted-on point against the passed nch.
2500 */
2501 if (mp->mnt_ncmounton.mount == info->nch_mount &&
2502 mp->mnt_ncmounton.ncp == info->nch_ncp
2503 ) {
2504 info->result = mp;
2505 return(-1);
1d505369 2506 }
28623bf9 2507 return(0);
1d505369
MD
2508}
2509
28623bf9
MD
2510struct mount *
2511cache_findmount(struct nchandle *nch)
9b1b3591 2512{
28623bf9
MD
2513 struct findmount_info info;
2514
2515 info.result = NULL;
2516 info.nch_mount = nch->mount;
2517 info.nch_ncp = nch->ncp;
2518 mountlist_scan(cache_findmount_callback, &info,
2519 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
2520 return(info.result);
9b1b3591
MD
2521}
2522
2523/*
21739618 2524 * Resolve an unresolved namecache entry, generally by looking it up.
67773eb3 2525 * The passed ncp must be locked and refd.
21739618
MD
2526 *
2527 * Theoretically since a vnode cannot be recycled while held, and since
2528 * the nc_parent chain holds its vnode as long as children exist, the
2529 * direct parent of the cache entry we are trying to resolve should
2530 * have a valid vnode. If not then generate an error that we can
2531 * determine is related to a resolver bug.
fad57d0e 2532 *
9b1b3591
MD
2533 * However, if a vnode was in the middle of a recyclement when the NCP
2534 * got locked, ncp->nc_vp might point to a vnode that is about to become
2535 * invalid. cache_resolve() handles this case by unresolving the entry
2536 * and then re-resolving it.
2537 *
fad57d0e
MD
2538 * Note that successful resolution does not necessarily return an error
2539 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
2540 * will be returned.
2247fe02
MD
2541 *
2542 * MPSAFE
690a3127
MD
2543 */
2544int
28623bf9 2545cache_resolve(struct nchandle *nch, struct ucred *cred)
690a3127 2546{
2247fe02 2547 struct namecache *par_tmp;
21739618 2548 struct namecache *par;
28623bf9
MD
2549 struct namecache *ncp;
2550 struct nchandle nctmp;
2551 struct mount *mp;
dff430ab 2552 struct vnode *dvp;
67773eb3 2553 int error;
8e005a45 2554
28623bf9
MD
2555 ncp = nch->ncp;
2556 mp = nch->mount;
67773eb3 2557restart:
8e005a45 2558 /*
9b1b3591
MD
2559 * If the ncp is already resolved we have nothing to do. However,
2560 * we do want to guarentee that a usable vnode is returned when
2561 * a vnode is present, so make sure it hasn't been reclaimed.
8e005a45 2562 */
9b1b3591
MD
2563 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
2564 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
28623bf9 2565 _cache_setunresolved(ncp);
9b1b3591
MD
2566 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
2567 return (ncp->nc_error);
2568 }
21739618 2569
646a1cda
MD
2570 /*
2571 * Mount points need special handling because the parent does not
2572 * belong to the same filesystem as the ncp.
2573 */
28623bf9
MD
2574 if (ncp == mp->mnt_ncmountpt.ncp)
2575 return (cache_resolve_mp(mp));
646a1cda
MD
2576
2577 /*
2578 * We expect an unbroken chain of ncps to at least the mount point,
2579 * and even all the way to root (but this code doesn't have to go
2580 * past the mount point).
2581 */
2582 if (ncp->nc_parent == NULL) {
6ea70f76 2583 kprintf("EXDEV case 1 %p %*.*s\n", ncp,
646a1cda 2584 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
21739618 2585 ncp->nc_error = EXDEV;
646a1cda
MD
2586 return(ncp->nc_error);
2587 }
2588
2589 /*
2590 * The vp's of the parent directories in the chain are held via vhold()
2591 * due to the existance of the child, and should not disappear.
2592 * However, there are cases where they can disappear:
2593 *
2594 * - due to filesystem I/O errors.
2595 * - due to NFS being stupid about tracking the namespace and
2596 * destroys the namespace for entire directories quite often.
2597 * - due to forced unmounts.
e09206ba 2598 * - due to an rmdir (parent will be marked DESTROYED)
646a1cda
MD
2599 *
2600 * When this occurs we have to track the chain backwards and resolve
2601 * it, looping until the resolver catches up to the current node. We
2602 * could recurse here but we might run ourselves out of kernel stack
2603 * so we do it in a more painful manner. This situation really should
2604 * not occur all that often, or if it does not have to go back too
2605 * many nodes to resolve the ncp.
2606 */
5312fa43 2607 while ((dvp = cache_dvpref(ncp)) == NULL) {
e09206ba
MD
2608 /*
2609 * This case can occur if a process is CD'd into a
2610 * directory which is then rmdir'd. If the parent is marked
2611 * destroyed there is no point trying to resolve it.
2612 */
2613 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
2614 return(ENOENT);
646a1cda 2615 par = ncp->nc_parent;
2247fe02
MD
2616 _cache_hold(par);
2617 _cache_lock(par);
2618 while ((par_tmp = par->nc_parent) != NULL &&
2619 par_tmp->nc_vp == NULL) {
2620 _cache_hold(par_tmp);
2621 _cache_lock(par_tmp);
2622 _cache_put(par);
2623 par = par_tmp;
2624 }
646a1cda 2625 if (par->nc_parent == NULL) {
6ea70f76 2626 kprintf("EXDEV case 2 %*.*s\n",
646a1cda 2627 par->nc_nlen, par->nc_nlen, par->nc_name);
2247fe02 2628 _cache_put(par);
646a1cda
MD
2629 return (EXDEV);
2630 }
6ea70f76 2631 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
646a1cda
MD
2632 par->nc_nlen, par->nc_nlen, par->nc_name);
2633 /*
67773eb3
MD
2634 * The parent is not set in stone, ref and lock it to prevent
2635 * it from disappearing. Also note that due to renames it
2636 * is possible for our ncp to move and for par to no longer
2637 * be one of its parents. We resolve it anyway, the loop
2638 * will handle any moves.
646a1cda 2639 */
2247fe02
MD
2640 _cache_get(par); /* additional hold/lock */
2641 _cache_put(par); /* from earlier hold/lock */
28623bf9
MD
2642 if (par == nch->mount->mnt_ncmountpt.ncp) {
2643 cache_resolve_mp(nch->mount);
c0c70b27 2644 } else if ((dvp = cache_dvpref(par)) == NULL) {
6ea70f76 2645 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
28623bf9 2646 _cache_put(par);
8e005a45 2647 continue;
c0c70b27
MD
2648 } else {
2649 if (par->nc_flag & NCF_UNRESOLVED) {
2650 nctmp.mount = mp;
2651 nctmp.ncp = par;
2652 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
2653 }
5312fa43 2654 vrele(dvp);
646a1cda 2655 }
67773eb3
MD
2656 if ((error = par->nc_error) != 0) {
2657 if (par->nc_error != EAGAIN) {
6ea70f76 2658 kprintf("EXDEV case 3 %*.*s error %d\n",
67773eb3
MD
2659 par->nc_nlen, par->nc_nlen, par->nc_name,
2660 par->nc_error);
28623bf9 2661 _cache_put(par);
67773eb3
MD
2662 return(error);
2663 }
6ea70f76 2664 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
67773eb3 2665 par, par->nc_nlen, par->nc_nlen, par->nc_name);
646a1cda 2666 }
28623bf9 2667 _cache_put(par);
67773eb3 2668 /* loop */
646a1cda 2669 }
8e005a45
MD
2670
2671 /*
fad57d0e 2672 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
8e005a45
MD
2673 * ncp's and reattach them. If this occurs the original ncp is marked
2674 * EAGAIN to force a relookup.
fad57d0e
MD
2675 *
2676 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
2677 * ncp must already be resolved.
8e005a45 2678 */
5312fa43 2679 if (dvp) {
c0c70b27
MD
2680 nctmp.mount = mp;
2681 nctmp.ncp = ncp;
2682 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
5312fa43 2683 vrele(dvp);
c0c70b27
MD
2684 } else {
2685 ncp->nc_error = EPERM;
2686 }
67773eb3 2687 if (ncp->nc_error == EAGAIN) {
6ea70f76 2688 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
67773eb3
MD
2689 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
2690 goto restart;
2691 }
646a1cda
MD
2692 return(ncp->nc_error);
2693}
2694
2695/*
2696 * Resolve the ncp associated with a mount point. Such ncp's almost always
2697 * remain resolved and this routine is rarely called. NFS MPs tends to force
2698 * re-resolution more often due to its mac-truck-smash-the-namecache
2699 * method of tracking namespace changes.
2700 *
6215aa92
MD
2701 * The semantics for this call is that the passed ncp must be locked on
2702 * entry and will be locked on return. However, if we actually have to
2703 * resolve the mount point we temporarily unlock the entry in order to
2704 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
2705 * the unlock we have to recheck the flags after we relock.
646a1cda
MD
2706 */
2707static int
28623bf9 2708cache_resolve_mp(struct mount *mp)
646a1cda 2709{
28623bf9 2710 struct namecache *ncp = mp->mnt_ncmountpt.ncp;
646a1cda 2711 struct vnode *vp;
6215aa92 2712 int error;
646a1cda
MD
2713
2714 KKASSERT(mp != NULL);
9b1b3591
MD
2715
2716 /*
2717 * If the ncp is already resolved we have nothing to do. However,
2718 * we do want to guarentee that a usable vnode is returned when
2719 * a vnode is present, so make sure it hasn't been reclaimed.
2720 */
2721 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
2722 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
28623bf9 2723 _cache_setunresolved(ncp);
9b1b3591
MD
2724 }
2725
646a1cda 2726 if (ncp->nc_flag & NCF_UNRESOLVED) {
28623bf9 2727 _cache_unlock(ncp);
f9642f56 2728 while (vfs_busy(mp, 0))
646a1cda 2729 ;
6215aa92 2730 error = VFS_ROOT(mp, &vp);
28623bf9 2731 _cache_lock(ncp);
6215aa92
MD
2732
2733 /*
2734 * recheck the ncp state after relocking.
2735 */
2736 if (ncp->nc_flag & NCF_UNRESOLVED) {
2737 ncp->nc_error = error;
2738 if (error == 0) {
4b5bbb78 2739 _cache_setvp(mp, ncp, vp);
6215aa92
MD
2740 vput(vp);
2741 } else {
341a6e45
MD
2742 kprintf("[diagnostic] cache_resolve_mp: failed"
2743 " to resolve mount %p err=%d ncp=%p\n",
2744 mp, error, ncp);
4b5bbb78 2745 _cache_setvp(mp, ncp, NULL);
6215aa92
MD
2746 }
2747 } else if (error == 0) {
646a1cda 2748 vput(vp);
646a1cda 2749 }
f9642f56 2750 vfs_unbusy(mp);
21739618
MD
2751 }
2752 return(ncp->nc_error);
14c92d03
MD
2753}
2754
f63911bf 2755/*
65870584
MD
2756 * Clean out negative cache entries when too many have accumulated.
2757 *
f63911bf
MD
2758 * MPSAFE
2759 */
65870584
MD
2760static void
2761_cache_cleanneg(int count)
62d0f1f0
MD
2762{
2763 struct namecache *ncp;
7ea21ed1
MD
2764
2765 /*
62d0f1f0
MD
2766 * Attempt to clean out the specified number of negative cache
2767 * entries.
2768 */
2769 while (count) {
287a8577 2770 spin_lock(&ncspin);
7ea21ed1 2771 ncp = TAILQ_FIRST(&ncneglist);
eb82ae62 2772 if (ncp == NULL) {
287a8577 2773 spin_unlock(&ncspin);
eb82ae62
MD
2774 break;
2775 }
62d0f1f0
MD
2776 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
2777 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
f63911bf 2778 _cache_hold(ncp);
287a8577 2779 spin_unlock(&ncspin);
2247fe02 2780 if (_cache_lock_special(ncp) == 0) {
b8587f8c 2781 ncp = cache_zap(ncp, 1);
f63911bf
MD
2782 if (ncp)
2783 _cache_drop(ncp);
2784 } else {
2785 _cache_drop(ncp);
2786 }
62d0f1f0 2787 --count;
984263bc
MD
2788 }
2789}
2790
fad57d0e 2791/*
9e10d70b
MD
2792 * Clean out positive cache entries when too many have accumulated.
2793 *
2794 * MPSAFE
2795 */
2796static void
2797_cache_cleanpos(int count)
2798{
2799 static volatile int rover;
2800 struct nchash_head *nchpp;
2801 struct namecache *ncp;
2802 int rover_copy;
2803
2804 /*
2805 * Attempt to clean out the specified number of negative cache
2806 * entries.
2807 */
2808 while (count) {
2809 rover_copy = ++rover; /* MPSAFEENOUGH */
2810 nchpp = NCHHASH(rover_copy);
2811
2812 spin_lock(&nchpp->spin);
2813 ncp = LIST_FIRST(&nchpp->list);
2814 if (ncp)
2815 _cache_hold(ncp);
2816 spin_unlock(&nchpp->spin);
2817
2818 if (ncp) {
2819 if (_cache_lock_special(ncp) == 0) {
2820 ncp = cache_zap(ncp, 1);
2821 if (ncp)
2822 _cache_drop(ncp);
2823 } else {
2824 _cache_drop(ncp);
2825 }
2826 }
2827 --count;
2828 }
2829}
2830
2831/*
65870584
MD
2832 * This is a kitchen sink function to clean out ncps which we
2833 * tried to zap from cache_drop() but failed because we were
2834 * unable to acquire the parent lock.
2835 *
2836 * Such entries can also be removed via cache_inval_vp(), such
2837 * as when unmounting.
2838 *
2839 * MPSAFE
2840 */
2841static void
2842_cache_cleandefered(void)
2843{
2844 struct nchash_head *nchpp;
2845 struct namecache *ncp;
2846 struct namecache dummy;
2847 int i;
2848
055f5cc8 2849 numdefered = 0;
65870584
MD
2850 bzero(&dummy, sizeof(dummy));
2851 dummy.nc_flag = NCF_DESTROYED;
2852
2853 for (i = 0; i <= nchash; ++i) {
2854 nchpp = &nchashtbl[i];
2855
287a8577 2856 spin_lock(&nchpp->spin);
65870584
MD
2857 LIST_INSERT_HEAD(&nchpp->list, &dummy, nc_hash);
2858 ncp = &dummy;
2859 while ((ncp = LIST_NEXT(ncp, nc_hash)) != NULL) {
2860 if ((ncp->nc_flag & NCF_DEFEREDZAP) == 0)
2861 continue;
2862 LIST_REMOVE(&dummy, nc_hash);
2863 LIST_INSERT_AFTER(ncp, &dummy, nc_hash);
2864 _cache_hold(ncp);
287a8577 2865 spin_unlock(&nchpp->spin);
055f5cc8
MD
2866 if (_cache_lock_nonblock(ncp) == 0) {
2867 ncp->nc_flag &= ~NCF_DEFEREDZAP;
2868 _cache_unlock(ncp);
2869 }
65870584 2870 _cache_drop(ncp);
287a8577 2871 spin_lock(&nchpp->spin);
65870584
MD
2872 ncp = &dummy;
2873 }
2874 LIST_REMOVE(&dummy, nc_hash);
287a8577 2875 spin_unlock(&nchpp->spin);
65870584
MD
2876 }
2877}
2878
2879/*
24e51f36 2880 * Name cache initialization, from vfsinit() when we are booting
984263bc
MD
2881 */
2882void
8987aad7 2883nchinit(void)
984263bc 2884{
24e51f36
HP
2885 int i;
2886 globaldata_t gd;
2887
2888 /* initialise per-cpu namecache effectiveness statistics. */
2889 for (i = 0; i < ncpus; ++i) {
2890 gd = globaldata_find(i);
2891 gd->gd_nchstats = &nchstats[i];
2892 }
7ea21ed1 2893 TAILQ_INIT(&ncneglist);
f63911bf 2894 spin_init(&ncspin);
9e10d70b
MD
2895 nchashtbl = hashinit_ext(desiredvnodes / 2,
2896 sizeof(struct nchash_head),
f63911bf
MD
2897 M_VFSCACHE, &nchash);
2898 for (i = 0; i <= (int)nchash; ++i) {
2899 LIST_INIT(&nchashtbl[i].list);
2900 spin_init(&nchashtbl[i].spin);
2901 }
17bde83a 2902 nclockwarn = 5 * hz;
21739618
MD
2903}
2904
2905/*
2906 * Called from start_init() to bootstrap the root filesystem. Returns
2907 * a referenced, unlocked namecache record.
2908 */
28623bf9
MD
2909void
2910cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp)
21739618 2911{
28623bf9
MD
2912 nch->ncp = cache_alloc(0);
2913 nch->mount = mp;
61f96b6f 2914 atomic_add_int(&mp->mnt_refs, 1);
28623bf9 2915 if (vp)
4b5bbb78 2916 _cache_setvp(nch->mount, nch->ncp, vp);
984263bc
MD
2917}
2918
2919/*
7ea21ed1 2920 * vfs_cache_setroot()
984263bc 2921 *
7ea21ed1
MD
2922 * Create an association between the root of our namecache and
2923 * the root vnode. This routine may be called several times during
2924 * booting.
690a3127
MD
2925 *
2926 * If the caller intends to save the returned namecache pointer somewhere
2927 * it must cache_hold() it.
7ea21ed1 2928 */
21739618 2929void
28623bf9 2930vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch)
7ea21ed1 2931{
21739618 2932 struct vnode *ovp;
28623bf9 2933 struct nchandle onch;
21739618
MD
2934
2935 ovp = rootvnode;
28623bf9 2936 onch = rootnch;
21739618 2937 rootvnode = nvp;
28623bf9
MD
2938 if (nch)
2939 rootnch = *nch;
2940 else
2941 cache_zero(&rootnch);
21739618
MD
2942 if (ovp)
2943 vrele(ovp);
28623bf9
MD
2944 if (onch.ncp)
2945 cache_drop(&onch);
7ea21ed1
MD
2946}
2947
2948/*
fad57d0e
MD
2949 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
2950 * topology and is being removed as quickly as possible. The new VOP_N*()
2951 * API calls are required to make specific adjustments using the supplied
2952 * ncp pointers rather then just bogusly purging random vnodes.
2953 *
7ea21ed1
MD
2954 * Invalidate all namecache entries to a particular vnode as well as
2955 * any direct children of that vnode in the namecache. This is a
2956 * 'catch all' purge used by filesystems that do not know any better.
2957 *
7ea21ed1
MD
2958 * Note that the linkage between the vnode and its namecache entries will
2959 * be removed, but the namecache entries themselves might stay put due to
2960 * active references from elsewhere in the system or due to the existance of
2961 * the children. The namecache topology is left intact even if we do not
2962 * know what the vnode association is. Such entries will be marked
2963 * NCF_UNRESOLVED.
984263bc 2964 */
984263bc 2965void
8987aad7 2966cache_purge(struct vnode *vp)
984263bc 2967{
6b008938 2968 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
984263bc
MD
2969}
2970
2971/*
2972 * Flush all entries referencing a particular filesystem.
2973 *
2974 * Since we need to check it anyway, we will flush all the invalid
2975 * entries at the same time.
2976 */
28623bf9
MD
2977#if 0
2978
984263bc 2979void
8987aad7 2980cache_purgevfs(struct mount *mp)
984263bc 2981{
f63911bf 2982 struct nchash_head *nchpp;
984263bc
MD
2983 struct namecache *ncp, *nnp;
2984
7ea21ed1
MD
2985 /*
2986 * Scan hash tables for applicable entries.
2987 */
bc0c094e 2988 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) {
f63911bf
MD
2989 spin_lock_wr(&nchpp->spin); XXX
2990 ncp = LIST_FIRST(&nchpp->list);
7ea21ed1 2991 if (ncp)
28623bf9 2992 _cache_hold(ncp);
7ea21ed1 2993 while (ncp) {
984263bc 2994 nnp = LIST_NEXT(ncp, nc_hash);
7ea21ed1 2995 if (nnp)
28623bf9 2996 _cache_hold(nnp);
4fcb1cf7 2997 if (ncp->nc_mount == mp) {
28623bf9 2998 _cache_lock(ncp);
65870584 2999 ncp = cache_zap(ncp, 0);
f63911bf
MD
3000 if (ncp)
3001 _cache_drop(ncp);
67773eb3 3002 } else {
28623bf9 3003 _cache_drop(ncp);
67773eb3 3004 }
7ea21ed1 3005 ncp = nnp;
984263bc 3006 }
f63911bf 3007 spin_unlock_wr(&nchpp->spin); XXX
984263bc
MD
3008 }
3009}
3010
28623bf9
MD
3011#endif
3012
984263bc 3013static int disablecwd;
0c52fa62
SG
3014SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
3015 "Disable getcwd");
984263bc
MD
3016
3017static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
3018static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
3019static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
3020static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
3021static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
3022static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
41c20dac 3023
3919ced0
MD
3024/*
3025 * MPALMOSTSAFE
3026 */
984263bc 3027int
753fd850 3028sys___getcwd(struct __getcwd_args *uap)
984263bc 3029{
2d63aca1 3030 u_int buflen;
63f58b90 3031 int error;
02680f1b
MD
3032 char *buf;
3033 char *bp;
3034
3035 if (disablecwd)
3036 return (ENODEV);
3037
3038 buflen = uap->buflen;
2ce1f68b 3039 if (buflen == 0)
02680f1b
MD
3040 return (EINVAL);
3041 if (buflen > MAXPATHLEN)
3042 buflen = MAXPATHLEN;
63f58b90 3043
efda3bd0 3044 buf = kmalloc(buflen, M_TEMP, M_WAITOK);
3919ced0 3045 get_mplock();
02680f1b 3046 bp = kern_getcwd(buf, buflen, &error);
3919ced0 3047 rel_mplock();
63f58b90 3048 if (error == 0)
02680f1b 3049 error = copyout(bp, uap->buf, strlen(bp) + 1);
efda3bd0 3050 kfree(buf, M_TEMP);
63f58b90
EN
3051 return (error);
3052}
3053
02680f1b
MD
3054char *
3055kern_getcwd(char *buf, size_t buflen, int *error)
63f58b90 3056{
41c20dac 3057 struct proc *p = curproc;
63f58b90 3058 char *bp;
02680f1b 3059 int i, slash_prefixed;
984263bc 3060 struct filedesc *fdp;
28623bf9 3061 struct nchandle nch;
2247fe02 3062 struct namecache *ncp;
984263bc
MD
3063
3064 numcwdcalls++;
63f58b90
EN
3065 bp = buf;
3066 bp += buflen - 1;
984263bc
MD
3067 *bp = '\0';
3068 fdp = p->p_fd;
3069 slash_prefixed = 0;
524c845c 3070
28623bf9 3071 nch = fdp->fd_ncdir;
2247fe02
MD
3072 ncp = nch.ncp;
3073 if (ncp)
3074 _cache_hold(ncp);
3075
3076 while (ncp && (ncp != fdp->fd_nrdir.ncp ||
28623bf9
MD
3077 nch.mount != fdp->fd_nrdir.mount)
3078 ) {
3079 /*
3080 * While traversing upwards if we encounter the root
3081 * of the current mount we have to skip to the mount point
3082 * in the underlying filesystem.
3083 */
2247fe02 3084 if (ncp == nch.mount->mnt_ncmountpt.ncp) {
28623bf9 3085 nch = nch.mount->mnt_ncmounton;
2247fe02
MD
3086 _cache_drop(ncp);
3087 ncp = nch.ncp;
3088 if (ncp)
3089 _cache_hold(ncp);
984263bc
MD
3090 continue;
3091 }
28623bf9
MD
3092
3093 /*
3094 * Prepend the path segment
3095 */
2247fe02 3096 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
984263bc
MD
3097 if (bp == buf) {
3098 numcwdfail4++;
2ce1f68b 3099 *error = ERANGE;
2247fe02
MD
3100 bp = NULL;
3101 goto done;
984263bc 3102 }
2247fe02 3103 *--bp = ncp->nc_name[i];
984263bc
MD
3104 }
3105 if (bp == buf) {
3106 numcwdfail4++;
2ce1f68b 3107 *error = ERANGE;
2247fe02
MD
3108 bp = NULL;
3109 goto done;
984263bc
MD
3110 }
3111 *--bp = '/';
3112 slash_prefixed = 1;
28623bf9
MD
3113
3114 /*
3115 * Go up a directory. This isn't a mount point so we don't
3116 * have to check again.
3117 */
2247fe02
MD
3118 while ((nch.ncp = ncp->nc_parent) != NULL) {
3119 _cache_lock(ncp);
3120 if (nch.ncp != ncp->nc_parent) {
3121 _cache_unlock(ncp);
3122 continue;
3123 }
3124 _cache_hold(nch.ncp);
3125 _cache_unlock(ncp);
3126 break;
3127 }
3128 _cache_drop(ncp);
3129 ncp = nch.ncp;
524c845c 3130 }
2247fe02 3131 if (ncp == NULL) {
524c845c
MD
3132 numcwdfail2++;
3133 *error = ENOENT;
2247fe02
MD
3134 bp = NULL;
3135 goto done;
984263bc
MD
3136 }
3137 if (!slash_prefixed) {
3138 if (bp == buf) {
3139 numcwdfail4++;
2ce1f68b 3140 *error = ERANGE;
2247fe02
MD
3141 bp = NULL;
3142 goto done;
984263bc
MD
3143 }
3144 *--bp = '/';
3145 }
3146 numcwdfound++;
02680f1b 3147 *error = 0;
2247fe02
MD
3148done:
3149 if (ncp)
3150 _cache_drop(ncp);
02680f1b 3151 return (bp);
984263bc
MD
3152}
3153
3154/*
3155 * Thus begins the fullpath magic.
2247fe02
MD
3156 *
3157 * The passed nchp is referenced but not locked.
984263bc 3158 */
984263bc
MD
3159#undef STATNODE
3160#define STATNODE(name) \
3161 static u_int name; \
3162 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
3163
3164static int disablefullpath;
3165SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
0c52fa62
SG
3166 &disablefullpath, 0,
3167 "Disable fullpath lookups");
984263bc
MD
3168
3169STATNODE(numfullpathcalls);
3170STATNODE(numfullpathfail1);
3171STATNODE(numfullpathfail2);
3172STATNODE(numfullpathfail3);
3173STATNODE(numfullpathfail4);
3174STATNODE(numfullpathfound);
3175
3176int
2247fe02 3177cache_fullpath(struct proc *p, struct nchandle *nchp,
5b4cfb7e 3178 char **retbuf, char **freebuf, int guess)
8987aad7 3179{
28623bf9
MD
3180 struct nchandle fd_nrdir;
3181 struct nchandle nch;
f63911bf 3182 struct namecache *ncp;
5b4cfb7e 3183 struct mount *mp, *new_mp;
f63911bf
MD
3184 char *bp, *buf;
3185 int slash_prefixed;
3186 int error = 0;
3187 int i;
984263bc 3188
f63911bf 3189 atomic_add_int(&numfullpathcalls, -1);
b310dfc4 3190
28623bf9
MD
3191 *retbuf = NULL;
3192 *freebuf = NULL;
3193
efda3bd0 3194 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK);
984263bc
MD
3195 bp = buf + MAXPATHLEN - 1;
3196 *bp = '\0';
75ffff0d
JS
3197 if (p != NULL)
3198 fd_nrdir = p->p_fd->fd_nrdir;
3199 else
28623bf9 3200 fd_nrdir = rootnch;
984263bc 3201 slash_prefixed = 0;
2247fe02 3202 nch = *nchp;
f63911bf 3203 ncp = nch.ncp;
2247fe02
MD
3204 if (ncp)
3205 _cache_hold(ncp);
f63911bf 3206 mp = nch.mount;
28623bf9 3207
f63911bf 3208 while (ncp && (ncp != fd_nrdir.ncp || mp != fd_nrdir.mount)) {
5b4cfb7e
AH
3209 new_mp = NULL;
3210
3211 /*
3212 * If we are asked to guess the upwards path, we do so whenever
3213 * we encounter an ncp marked as a mountpoint. We try to find
3214 * the actual mountpoint by finding the mountpoint with this ncp.
3215 */
3216 if (guess && (ncp->nc_flag & NCF_ISMOUNTPT)) {
3217 new_mp = mount_get_by_nc(ncp);
3218 }
28623bf9
MD
3219 /*
3220 * While traversing upwards if we encounter the root
3221 * of the current mount we have to skip to the mount point.
3222 */
f63911bf 3223 if (ncp == mp->mnt_ncmountpt.ncp) {
5b4cfb7e
AH
3224 new_mp = mp;
3225 }
3226 if (new_mp) {
5b4cfb7e 3227 nch = new_mp->mnt_ncmounton;
2247fe02 3228 _cache_drop(ncp);
f63911bf 3229 ncp = nch.ncp;
2247fe02
MD
3230 if (ncp)
3231 _cache_hold(ncp);
f63911bf 3232 mp = nch.mount;
984263bc
MD
3233 continue;
3234 }
28623bf9
MD
3235
3236 /*
3237 * Prepend the path segment
3238 */
2247fe02 3239 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
984263bc
MD
3240 if (bp == buf) {
3241 numfullpathfail4++;
efda3bd0 3242 kfree(buf, M_TEMP);
f63911bf
MD
3243 error = ENOMEM;
3244 goto done;
984263bc 3245 }
2247fe02 3246 *--bp = ncp->nc_name[i];
984263bc
MD
3247 }
3248 if (bp == buf) {
3249 numfullpathfail4++;
efda3bd0 3250 kfree(buf, M_TEMP);
f63911bf
MD
3251 error = ENOMEM;
3252 goto done;
984263bc
MD
3253 }
3254 *--bp = '/';
3255 slash_prefixed = 1;
28623bf9
MD
3256
3257 /*
3258 * Go up a directory. This isn't a mount point so we don't
3259 * have to check again.
f63911bf 3260 *
2247fe02 3261 * We can only safely access nc_parent with ncp held locked.
28623bf9 3262 */
2247fe02
MD
3263 while ((nch.ncp = ncp->nc_parent) != NULL) {
3264 _cache_lock(ncp);
3265 if (nch.ncp != ncp->nc_parent) {
3266 _cache_unlock(ncp);
3267 continue;
3268 }
f63911bf 3269 _cache_hold(nch.ncp);
2247fe02
MD
3270 _cache_unlock(ncp);
3271 break;
3272 }
f63911bf
MD
3273 _cache_drop(ncp);
3274 ncp = nch.ncp;
524c845c 3275 }
2247fe02 3276 if (ncp == NULL) {
524c845c 3277 numfullpathfail2++;
efda3bd0 3278 kfree(buf, M_TEMP);
f63911bf
MD
3279 error = ENOENT;
3280 goto done;
984263bc 3281 }
28623bf9 3282
984263bc
MD
3283 if (!slash_prefixed) {
3284 if (bp == buf) {
3285 numfullpathfail4++;
efda3bd0 3286 kfree(buf, M_TEMP);
f63911bf
MD
3287 error = ENOMEM;
3288 goto done;
984263bc
MD
3289 }
3290 *--bp = '/';
3291 }
3292 numfullpathfound++;
3293 *retbuf = bp;
b310dfc4 3294 *freebuf = buf;
f63911bf
MD
3295 error = 0;
3296done:
2247fe02
MD
3297 if (ncp)
3298 _cache_drop(ncp);
f63911bf 3299 return(error);
984263bc 3300}
8987aad7 3301
b6372d22 3302int
5b4cfb7e 3303vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf, int guess)
b6372d22 3304{
b6372d22 3305 struct namecache *ncp;
28623bf9 3306 struct nchandle nch;
f63911bf 3307 int error;
b6372d22 3308
f63911bf 3309 atomic_add_int(&numfullpathcalls, 1);
b6372d22
JS
3310 if (disablefullpath)
3311 return (ENODEV);
3312
3313 if (p == NULL)
3314 return (EINVAL);
3315
3316 /* vn is NULL, client wants us to use p->p_textvp */
3317 if (vn == NULL) {
3318 if ((vn = p->p_textvp) == NULL)
3319 return (EINVAL);
3320 }
287a8577 3321 spin_lock(&vn->v_spinlock);
b6372d22
JS
3322 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
3323 if (ncp->nc_nlen)
3324 break;
3325 }
f63911bf 3326 if (ncp == NULL) {
287a8577 3327 spin_unlock(&vn->v_spinlock);
b6372d22 3328 return (EINVAL);
f63911bf
MD
3329 }
3330 _cache_hold(ncp);
287a8577 3331 spin_unlock(&vn->v_spinlock);
b6372d22 3332
f63911bf 3333 atomic_add_int(&numfullpathcalls, -1);
28623bf9
MD
3334 nch.ncp = ncp;;
3335 nch.mount = vn->v_mount;
5b4cfb7e 3336 error = cache_fullpath(p, &nch, retbuf, freebuf, guess);
f63911bf
MD
3337 _cache_drop(ncp);
3338 return (error);
b6372d22 3339}