VNode sequencing and locking - part 4/4 - subpart 1 of many.
[dragonfly.git] / sys / kern / vfs_cache.c
CommitLineData
984263bc 1/*
8c10bfcf
MD
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
984263bc
MD
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
36 *
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $
885ecb13 70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.75 2006/08/19 17:27:23 dillon Exp $
984263bc
MD
71 */
72
73#include <sys/param.h>
74#include <sys/systm.h>
75#include <sys/kernel.h>
76#include <sys/sysctl.h>
77#include <sys/mount.h>
78#include <sys/vnode.h>
984263bc
MD
79#include <sys/malloc.h>
80#include <sys/sysproto.h>
81#include <sys/proc.h>
dadab5e9 82#include <sys/namei.h>
690a3127 83#include <sys/nlookup.h>
984263bc
MD
84#include <sys/filedesc.h>
85#include <sys/fnv_hash.h>
24e51f36 86#include <sys/globaldata.h>
63f58b90 87#include <sys/kern_syscall.h>
fad57d0e 88#include <sys/dirent.h>
8c361dda 89#include <ddb/ddb.h>
984263bc
MD
90
91/*
7ea21ed1 92 * Random lookups in the cache are accomplished with a hash table using
8987aad7 93 * a hash key of (nc_src_vp, name).
984263bc 94 *
7ea21ed1 95 * Negative entries may exist and correspond to structures where nc_vp
8987aad7
MD
96 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry
97 * corresponds to a whited-out directory entry (verses simply not finding the
98 * entry at all).
984263bc 99 *
8987aad7
MD
100 * Upon reaching the last segment of a path, if the reference is for DELETE,
101 * or NOCACHE is set (rewrite), and the name is located in the cache, it
102 * will be dropped.
984263bc
MD
103 */
104
105/*
106 * Structures associated with name cacheing.
107 */
8987aad7 108#define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
f517a1bb 109#define MINNEG 1024
8987aad7 110
24e51f36
HP
111MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
112
984263bc 113static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */
7ea21ed1 114static struct namecache_list ncneglist; /* instead of vnode */
8987aad7 115
fad57d0e
MD
116/*
117 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
118 * to create the namecache infrastructure leading to a dangling vnode.
119 *
120 * 0 Only errors are reported
121 * 1 Successes are reported
122 * 2 Successes + the whole directory scan is reported
123 * 3 Force the directory scan code run as if the parent vnode did not
124 * have a namecache record, even if it does have one.
125 */
126static int ncvp_debug;
127SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, "");
128
984263bc
MD
129static u_long nchash; /* size of hash table */
130SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
8987aad7 131
984263bc
MD
132static u_long ncnegfactor = 16; /* ratio of negative entries */
133SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
8987aad7 134
fc21741a
MD
135static int nclockwarn; /* warn on locked entries in ticks */
136SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, "");
137
984263bc
MD
138static u_long numneg; /* number of cache entries allocated */
139SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
8987aad7 140
984263bc
MD
141static u_long numcache; /* number of cache entries allocated */
142SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
8987aad7 143
f517a1bb
MD
144static u_long numunres; /* number of unresolved entries */
145SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, "");
146
984263bc
MD
147SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
148SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
149
646a1cda 150static int cache_resolve_mp(struct namecache *ncp);
8e005a45 151static void cache_rehash(struct namecache *ncp);
646a1cda 152
984263bc
MD
153/*
154 * The new name cache statistics
155 */
156SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
157#define STATNODE(mode, name, var) \
158 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
159STATNODE(CTLFLAG_RD, numneg, &numneg);
160STATNODE(CTLFLAG_RD, numcache, &numcache);
161static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
162static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
163static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
164static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
165static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
166static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
167static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
168static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
169static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
170static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
171
24e51f36
HP
172struct nchstats nchstats[SMP_MAXCPU];
173/*
174 * Export VFS cache effectiveness statistics to user-land.
175 *
176 * The statistics are left for aggregation to user-land so
177 * neat things can be achieved, like observing per-CPU cache
178 * distribution.
179 */
180static int
3736bb9b 181sysctl_nchstats(SYSCTL_HANDLER_ARGS)
24e51f36
HP
182{
183 struct globaldata *gd;
184 int i, error;
185
186 error = 0;
187 for (i = 0; i < ncpus; ++i) {
188 gd = globaldata_find(i);
189 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
190 sizeof(struct nchstats))))
191 break;
192 }
984263bc 193
24e51f36
HP
194 return (error);
195}
196SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
3736bb9b 197 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
984263bc 198
24e51f36 199static void cache_zap(struct namecache *ncp);
984263bc
MD
200
201/*
7ea21ed1
MD
202 * cache_hold() and cache_drop() prevent the premature deletion of a
203 * namecache entry but do not prevent operations (such as zapping) on
204 * that namecache entry.
5b287bba 205 *
36e90efd
MD
206 * This routine may only be called from outside this source module if
207 * nc_refs is already at least 1.
5b287bba 208 *
36e90efd
MD
209 * This is a rare case where callers are allowed to hold a spinlock,
210 * so we can't ourselves.
984263bc 211 */
7ea21ed1
MD
212static __inline
213struct namecache *
bc0c094e 214_cache_hold(struct namecache *ncp)
7ea21ed1 215{
5b287bba 216 atomic_add_int(&ncp->nc_refs, 1);
7ea21ed1
MD
217 return(ncp);
218}
219
8c361dda 220/*
67773eb3
MD
221 * When dropping an entry, if only one ref remains and the entry has not
222 * been resolved, zap it. Since the one reference is being dropped the
223 * entry had better not be locked.
8c361dda 224 */
7ea21ed1
MD
225static __inline
226void
bc0c094e 227_cache_drop(struct namecache *ncp)
7ea21ed1
MD
228{
229 KKASSERT(ncp->nc_refs > 0);
f517a1bb
MD
230 if (ncp->nc_refs == 1 &&
231 (ncp->nc_flag & NCF_UNRESOLVED) &&
232 TAILQ_EMPTY(&ncp->nc_list)
233 ) {
67773eb3
MD
234 KKASSERT(ncp->nc_exlocks == 0);
235 cache_lock(ncp);
7ea21ed1 236 cache_zap(ncp);
f517a1bb 237 } else {
36e90efd 238 atomic_subtract_int(&ncp->nc_refs, 1);
f517a1bb 239 }
7ea21ed1 240}
8987aad7 241
690a3127
MD
242/*
243 * Link a new namecache entry to its parent. Be careful to avoid races
244 * if vhold() blocks in the future.
245 */
246static void
247cache_link_parent(struct namecache *ncp, struct namecache *par)
248{
249 KKASSERT(ncp->nc_parent == NULL);
250 ncp->nc_parent = par;
251 if (TAILQ_EMPTY(&par->nc_list)) {
252 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
21739618
MD
253 /*
254 * Any vp associated with an ncp which has children must
55361147 255 * be held to prevent it from being recycled.
21739618 256 */
690a3127
MD
257 if (par->nc_vp)
258 vhold(par->nc_vp);
259 } else {
260 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
261 }
262}
263
264/*
b8997912
MD
265 * Remove the parent association from a namecache structure. If this is
266 * the last child of the parent the cache_drop(par) will attempt to
267 * recursively zap the parent.
690a3127
MD
268 */
269static void
270cache_unlink_parent(struct namecache *ncp)
271{
272 struct namecache *par;
273
274 if ((par = ncp->nc_parent) != NULL) {
275 ncp->nc_parent = NULL;
276 par = cache_hold(par);
277 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
278 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
279 vdrop(par->nc_vp);
280 cache_drop(par);
281 }
282}
283
284/*
fad57d0e
MD
285 * Allocate a new namecache structure. Most of the code does not require
286 * zero-termination of the string but it makes vop_compat_ncreate() easier.
690a3127
MD
287 */
288static struct namecache *
524c845c 289cache_alloc(int nlen)
690a3127
MD
290{
291 struct namecache *ncp;
292
293 ncp = malloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
524c845c 294 if (nlen)
fad57d0e 295 ncp->nc_name = malloc(nlen + 1, M_VFSCACHE, M_WAITOK);
524c845c 296 ncp->nc_nlen = nlen;
690a3127
MD
297 ncp->nc_flag = NCF_UNRESOLVED;
298 ncp->nc_error = ENOTCONN; /* needs to be resolved */
8c361dda 299 ncp->nc_refs = 1;
e4bff3c8
MD
300
301 /*
302 * Construct a fake FSMID based on the time of day and a 32 bit
303 * roller for uniqueness. This is used to generate a useful
304 * FSMID for filesystems which do not support it.
305 */
6b008938 306 ncp->nc_fsmid = cache_getnewfsmid();
690a3127 307 TAILQ_INIT(&ncp->nc_list);
8c361dda 308 cache_lock(ncp);
690a3127
MD
309 return(ncp);
310}
311
8c361dda
MD
312static void
313cache_free(struct namecache *ncp)
314{
315 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1);
316 if (ncp->nc_name)
317 free(ncp->nc_name, M_VFSCACHE);
318 free(ncp, M_VFSCACHE);
319}
690a3127
MD
320
321/*
322 * Ref and deref a namecache structure.
5b287bba
MD
323 *
324 * Warning: caller may hold an unrelated read spinlock, which means we can't
325 * use read spinlocks here.
690a3127 326 */
bc0c094e
MD
327struct namecache *
328cache_hold(struct namecache *ncp)
329{
330 return(_cache_hold(ncp));
331}
332
333void
334cache_drop(struct namecache *ncp)
335{
336 _cache_drop(ncp);
337}
338
14c92d03
MD
339/*
340 * Namespace locking. The caller must already hold a reference to the
21739618
MD
341 * namecache structure in order to lock/unlock it. This function prevents
342 * the namespace from being created or destroyed by accessors other then
343 * the lock holder.
14c92d03 344 *
55361147
MD
345 * Note that holding a locked namecache structure prevents other threads
346 * from making namespace changes (e.g. deleting or creating), prevents
347 * vnode association state changes by other threads, and prevents the
348 * namecache entry from being resolved or unresolved by other threads.
349 *
350 * The lock owner has full authority to associate/disassociate vnodes
351 * and resolve/unresolve the locked ncp.
352 *
9b1b3591
MD
353 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
354 * or recycled, but it does NOT help you if the vnode had already initiated
355 * a recyclement. If this is important, use cache_get() rather then
356 * cache_lock() (and deal with the differences in the way the refs counter
357 * is handled). Or, alternatively, make an unconditional call to
358 * cache_validate() or cache_resolve() after cache_lock() returns.
14c92d03
MD
359 */
360void
361cache_lock(struct namecache *ncp)
362{
55361147
MD
363 thread_t td;
364 int didwarn;
14c92d03
MD
365
366 KKASSERT(ncp->nc_refs != 0);
55361147
MD
367 didwarn = 0;
368 td = curthread;
369
14c92d03
MD
370 for (;;) {
371 if (ncp->nc_exlocks == 0) {
372 ncp->nc_exlocks = 1;
373 ncp->nc_locktd = td;
55361147
MD
374 /*
375 * The vp associated with a locked ncp must be held
376 * to prevent it from being recycled (which would
377 * cause the ncp to become unresolved).
378 *
9b1b3591
MD
379 * WARNING! If VRECLAIMED is set the vnode could
380 * already be in the middle of a recycle. Callers
381 * should not assume that nc_vp is usable when
382 * not NULL. cache_vref() or cache_vget() must be
383 * called.
384 *
55361147
MD
385 * XXX loop on race for later MPSAFE work.
386 */
387 if (ncp->nc_vp)
388 vhold(ncp->nc_vp);
14c92d03
MD
389 break;
390 }
391 if (ncp->nc_locktd == td) {
392 ++ncp->nc_exlocks;
393 break;
394 }
395 ncp->nc_flag |= NCF_LOCKREQ;
fc21741a 396 if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) {
5fd012e0
MD
397 if (didwarn)
398 continue;
399 didwarn = 1;
400 printf("[diagnostic] cache_lock: blocked on %p", ncp);
401 if ((ncp->nc_flag & NCF_MOUNTPT) && ncp->nc_mount)
75ffff0d 402 printf(" [MOUNTFROM %s]\n", ncp->nc_mount->mnt_stat.f_mntfromname);
5fd012e0
MD
403 else
404 printf(" \"%*.*s\"\n",
405 ncp->nc_nlen, ncp->nc_nlen,
406 ncp->nc_name);
14c92d03
MD
407 }
408 }
55361147 409
14c92d03 410 if (didwarn == 1) {
21739618 411 printf("[diagnostic] cache_lock: unblocked %*.*s\n",
14c92d03
MD
412 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
413 }
414}
415
fad57d0e
MD
416int
417cache_lock_nonblock(struct namecache *ncp)
418{
419 thread_t td;
420
421 KKASSERT(ncp->nc_refs != 0);
422 td = curthread;
423 if (ncp->nc_exlocks == 0) {
424 ncp->nc_exlocks = 1;
425 ncp->nc_locktd = td;
426 /*
427 * The vp associated with a locked ncp must be held
428 * to prevent it from being recycled (which would
429 * cause the ncp to become unresolved).
430 *
9b1b3591
MD
431 * WARNING! If VRECLAIMED is set the vnode could
432 * already be in the middle of a recycle. Callers
433 * should not assume that nc_vp is usable when
434 * not NULL. cache_vref() or cache_vget() must be
435 * called.
436 *
fad57d0e
MD
437 * XXX loop on race for later MPSAFE work.
438 */
439 if (ncp->nc_vp)
440 vhold(ncp->nc_vp);
441 return(0);
442 } else {
443 return(EWOULDBLOCK);
444 }
445}
446
14c92d03
MD
447void
448cache_unlock(struct namecache *ncp)
449{
450 thread_t td = curthread;
451
452 KKASSERT(ncp->nc_refs > 0);
453 KKASSERT(ncp->nc_exlocks > 0);
454 KKASSERT(ncp->nc_locktd == td);
455 if (--ncp->nc_exlocks == 0) {
55361147
MD
456 if (ncp->nc_vp)
457 vdrop(ncp->nc_vp);
14c92d03
MD
458 ncp->nc_locktd = NULL;
459 if (ncp->nc_flag & NCF_LOCKREQ) {
460 ncp->nc_flag &= ~NCF_LOCKREQ;
fc21741a 461 wakeup(ncp);
14c92d03
MD
462 }
463 }
464}
465
466/*
690a3127 467 * ref-and-lock, unlock-and-deref functions.
9b1b3591
MD
468 *
469 * This function is primarily used by nlookup. Even though cache_lock
470 * holds the vnode, it is possible that the vnode may have already
471 * initiated a recyclement. We want cache_get() to return a definitively
472 * usable vnode or a definitively unresolved ncp.
14c92d03 473 */
21739618 474struct namecache *
690a3127
MD
475cache_get(struct namecache *ncp)
476{
477 _cache_hold(ncp);
478 cache_lock(ncp);
9b1b3591
MD
479 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
480 cache_setunresolved(ncp);
21739618 481 return(ncp);
690a3127
MD
482}
483
8e005a45
MD
484int
485cache_get_nonblock(struct namecache *ncp)
486{
487 /* XXX MP */
488 if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) {
489 _cache_hold(ncp);
490 cache_lock(ncp);
9b1b3591
MD
491 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
492 cache_setunresolved(ncp);
8e005a45
MD
493 return(0);
494 }
495 return(EWOULDBLOCK);
496}
497
14c92d03
MD
498void
499cache_put(struct namecache *ncp)
500{
501 cache_unlock(ncp);
502 _cache_drop(ncp);
503}
504
690a3127
MD
505/*
506 * Resolve an unresolved ncp by associating a vnode with it. If the
507 * vnode is NULL, a negative cache entry is created.
508 *
509 * The ncp should be locked on entry and will remain locked on return.
510 */
511void
512cache_setvp(struct namecache *ncp, struct vnode *vp)
ce6da7e4 513{
690a3127 514 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
ce6da7e4
MD
515 ncp->nc_vp = vp;
516 if (vp != NULL) {
21739618
MD
517 /*
518 * Any vp associated with an ncp which has children must
55361147 519 * be held. Any vp associated with a locked ncp must be held.
21739618
MD
520 */
521 if (!TAILQ_EMPTY(&ncp->nc_list))
522 vhold(vp);
ce6da7e4 523 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
55361147
MD
524 if (ncp->nc_exlocks)
525 vhold(vp);
21739618
MD
526
527 /*
528 * Set auxillary flags
529 */
690a3127
MD
530 switch(vp->v_type) {
531 case VDIR:
21739618
MD
532 ncp->nc_flag |= NCF_ISDIR;
533 break;
690a3127 534 case VLNK:
21739618
MD
535 ncp->nc_flag |= NCF_ISSYMLINK;
536 /* XXX cache the contents of the symlink */
537 break;
690a3127 538 default:
21739618 539 break;
690a3127 540 }
ce6da7e4 541 ++numcache;
21739618 542 ncp->nc_error = 0;
ce6da7e4 543 } else {
1345c2b6 544 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
ce6da7e4 545 ++numneg;
21739618 546 ncp->nc_error = ENOENT;
ce6da7e4 547 }
690a3127 548 ncp->nc_flag &= ~NCF_UNRESOLVED;
ce6da7e4
MD
549}
550
fad57d0e
MD
551void
552cache_settimeout(struct namecache *ncp, int nticks)
553{
554 if ((ncp->nc_timeout = ticks + nticks) == 0)
555 ncp->nc_timeout = 1;
556}
557
690a3127
MD
558/*
559 * Disassociate the vnode or negative-cache association and mark a
560 * namecache entry as unresolved again. Note that the ncp is still
561 * left in the hash table and still linked to its parent.
562 *
67773eb3
MD
563 * The ncp should be locked and refd on entry and will remain locked and refd
564 * on return.
8c361dda
MD
565 *
566 * This routine is normally never called on a directory containing children.
567 * However, NFS often does just that in its rename() code as a cop-out to
568 * avoid complex namespace operations. This disconnects a directory vnode
569 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
570 * sync.
f2e3ccf2
MD
571 *
572 * NOTE: NCF_FSMID must be cleared so a refurbishment of the ncp, such as
573 * in a create, properly propogates flag up the chain.
690a3127
MD
574 */
575void
576cache_setunresolved(struct namecache *ncp)
14c92d03 577{
690a3127 578 struct vnode *vp;
14c92d03 579
690a3127
MD
580 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
581 ncp->nc_flag |= NCF_UNRESOLVED;
fad57d0e 582 ncp->nc_timeout = 0;
690a3127
MD
583 ncp->nc_error = ENOTCONN;
584 ++numunres;
585 if ((vp = ncp->nc_vp) != NULL) {
586 --numcache;
fad57d0e 587 ncp->nc_vp = NULL;
690a3127 588 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
55361147
MD
589
590 /*
591 * Any vp associated with an ncp with children is
592 * held by that ncp. Any vp associated with a locked
593 * ncp is held by that ncp. These conditions must be
594 * undone when the vp is cleared out from the ncp.
595 */
6b008938
MD
596 if (ncp->nc_flag & NCF_FSMID)
597 vupdatefsmid(vp);
690a3127
MD
598 if (!TAILQ_EMPTY(&ncp->nc_list))
599 vdrop(vp);
55361147
MD
600 if (ncp->nc_exlocks)
601 vdrop(vp);
690a3127
MD
602 } else {
603 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
604 --numneg;
605 }
6b008938
MD
606 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK|
607 NCF_FSMID);
8e005a45
MD
608 }
609}
8c361dda 610
8e005a45 611/*
e09206ba
MD
612 * Invalidate portions of the namecache topology given a starting entry.
613 * The passed ncp is set to an unresolved state and:
8e005a45 614 *
e09206ba
MD
615 * The passed ncp must be locked.
616 *
617 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
618 * that the physical underlying nodes have been
619 * destroyed... as in deleted. For example, when
620 * a directory is removed. This will cause record
621 * lookups on the name to no longer be able to find
622 * the record and tells the resolver to return failure
623 * rather then trying to resolve through the parent.
624 *
625 * The topology itself, including ncp->nc_name,
626 * remains intact.
627 *
628 * This only applies to the passed ncp, if CINV_CHILDREN
629 * is specified the children are not flagged.
630 *
631 * CINV_CHILDREN - Set all children (recursively) to an unresolved
632 * state as well.
633 *
634 * Note that this will also have the side effect of
635 * cleaning out any unreferenced nodes in the topology
636 * from the leaves up as the recursion backs out.
637 *
638 * Note that the topology for any referenced nodes remains intact.
25cb3304
MD
639 *
640 * It is possible for cache_inval() to race a cache_resolve(), meaning that
641 * the namecache entry may not actually be invalidated on return if it was
642 * revalidated while recursing down into its children. This code guarentees
643 * that the node(s) will go through an invalidation cycle, but does not
644 * guarentee that they will remain in an invalidated state.
645 *
646 * Returns non-zero if a revalidation was detected during the invalidation
647 * recursion, zero otherwise. Note that since only the original ncp is
648 * locked the revalidation ultimately can only indicate that the original ncp
649 * *MIGHT* no have been reresolved.
8e005a45 650 */
25cb3304 651int
8e005a45
MD
652cache_inval(struct namecache *ncp, int flags)
653{
654 struct namecache *kid;
b8997912 655 struct namecache *nextkid;
25cb3304 656 int rcnt = 0;
8e005a45 657
e09206ba 658 KKASSERT(ncp->nc_exlocks);
25cb3304 659
e09206ba
MD
660 cache_setunresolved(ncp);
661 if (flags & CINV_DESTROY)
662 ncp->nc_flag |= NCF_DESTROYED;
b8997912 663
e09206ba
MD
664 if ((flags & CINV_CHILDREN) &&
665 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL
666 ) {
667 cache_hold(kid);
668 cache_unlock(ncp);
b8997912
MD
669 while (kid) {
670 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
671 cache_hold(nextkid);
e09206ba
MD
672 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
673 TAILQ_FIRST(&kid->nc_list)
b8997912 674 ) {
e09206ba 675 cache_lock(kid);
25cb3304 676 rcnt += cache_inval(kid, flags & ~CINV_DESTROY);
e09206ba 677 cache_unlock(kid);
b8997912 678 }
b8997912 679 cache_drop(kid);
fad57d0e 680 kid = nextkid;
8e005a45 681 }
e09206ba 682 cache_lock(ncp);
8e005a45 683 }
25cb3304
MD
684
685 /*
686 * Someone could have gotten in there while ncp was unlocked,
687 * retry if so.
688 */
689 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
690 ++rcnt;
691 return (rcnt);
8e005a45
MD
692}
693
e09206ba 694/*
25cb3304
MD
695 * Invalidate a vnode's namecache associations. To avoid races against
696 * the resolver we do not invalidate a node which we previously invalidated
697 * but which was then re-resolved while we were in the invalidation loop.
698 *
699 * Returns non-zero if any namecache entries remain after the invalidation
700 * loop completed.
2aefb2c5
MD
701 *
702 * NOTE: unlike the namecache topology which guarentees that ncp's will not
703 * be ripped out of the topology while held, the vnode's v_namecache list
704 * has no such restriction. NCP's can be ripped out of the list at virtually
705 * any time if not locked, even if held.
e09206ba 706 */
25cb3304 707int
6b008938 708cache_inval_vp(struct vnode *vp, int flags)
8e005a45
MD
709{
710 struct namecache *ncp;
25cb3304
MD
711 struct namecache *next;
712
2aefb2c5 713restart:
25cb3304
MD
714 ncp = TAILQ_FIRST(&vp->v_namecache);
715 if (ncp)
716 cache_hold(ncp);
717 while (ncp) {
718 /* loop entered with ncp held */
2aefb2c5 719 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
25cb3304
MD
720 cache_hold(next);
721 cache_lock(ncp);
2aefb2c5
MD
722 if (ncp->nc_vp != vp) {
723 printf("Warning: cache_inval_vp: race-A detected on "
724 "%s\n", ncp->nc_name);
725 cache_put(ncp);
69313361
MD
726 if (next)
727 cache_drop(next);
2aefb2c5
MD
728 goto restart;
729 }
e09206ba 730 cache_inval(ncp, flags);
25cb3304
MD
731 cache_put(ncp); /* also releases reference */
732 ncp = next;
2aefb2c5
MD
733 if (ncp && ncp->nc_vp != vp) {
734 printf("Warning: cache_inval_vp: race-B detected on "
735 "%s\n", ncp->nc_name);
736 cache_drop(ncp);
737 goto restart;
738 }
690a3127 739 }
25cb3304 740 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
14c92d03 741}
14c92d03 742
fad57d0e
MD
743/*
744 * The source ncp has been renamed to the target ncp. Both fncp and tncp
745 * must be locked. Both will be set to unresolved, any children of tncp
746 * will be disconnected (the prior contents of the target is assumed to be
747 * destroyed by the rename operation, e.g. renaming over an empty directory),
748 * and all children of fncp will be moved to tncp.
749 *
e09206ba
MD
750 * XXX the disconnection could pose a problem, check code paths to make
751 * sure any code that blocks can handle the parent being changed out from
752 * under it. Maybe we should lock the children (watch out for deadlocks) ?
753 *
fad57d0e
MD
754 * After we return the caller has the option of calling cache_setvp() if
755 * the vnode of the new target ncp is known.
756 *
757 * Any process CD'd into any of the children will no longer be able to ".."
758 * back out. An rm -rf can cause this situation to occur.
759 */
760void
761cache_rename(struct namecache *fncp, struct namecache *tncp)
762{
763 struct namecache *scan;
25cb3304 764 int didwarn = 0;
fad57d0e
MD
765
766 cache_setunresolved(fncp);
767 cache_setunresolved(tncp);
25cb3304
MD
768 while (cache_inval(tncp, CINV_CHILDREN) != 0) {
769 if (didwarn++ % 10 == 0) {
770 printf("Warning: cache_rename: race during "
771 "rename %s->%s\n",
772 fncp->nc_name, tncp->nc_name);
773 }
774 tsleep(tncp, 0, "mvrace", hz / 10);
775 cache_setunresolved(tncp);
776 }
fad57d0e
MD
777 while ((scan = TAILQ_FIRST(&fncp->nc_list)) != NULL) {
778 cache_hold(scan);
779 cache_unlink_parent(scan);
780 cache_link_parent(scan, tncp);
781 if (scan->nc_flag & NCF_HASHED)
782 cache_rehash(scan);
783 cache_drop(scan);
784 }
785}
786
21739618
MD
787/*
788 * vget the vnode associated with the namecache entry. Resolve the namecache
789 * entry if necessary and deal with namecache/vp races. The passed ncp must
790 * be referenced and may be locked. The ncp's ref/locking state is not
791 * effected by this call.
792 *
793 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
794 * (depending on the passed lk_type) will be returned in *vpp with an error
795 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
796 * most typical error is ENOENT, meaning that the ncp represents a negative
797 * cache hit and there is no vnode to retrieve, but other errors can occur
798 * too.
799 *
800 * The main race we have to deal with are namecache zaps. The ncp itself
801 * will not disappear since it is referenced, and it turns out that the
802 * validity of the vp pointer can be checked simply by rechecking the
803 * contents of ncp->nc_vp.
804 */
805int
806cache_vget(struct namecache *ncp, struct ucred *cred,
807 int lk_type, struct vnode **vpp)
808{
809 struct vnode *vp;
810 int error;
811
812again:
813 vp = NULL;
814 if (ncp->nc_flag & NCF_UNRESOLVED) {
815 cache_lock(ncp);
816 error = cache_resolve(ncp, cred);
817 cache_unlock(ncp);
818 } else {
819 error = 0;
820 }
821 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
9b1b3591
MD
822 /*
823 * Accessing the vnode from the namecache is a bit
824 * dangerous. Because there are no refs on the vnode, it
825 * could be in the middle of a reclaim.
826 */
827 if (vp->v_flag & VRECLAIMED) {
828 printf("Warning: vnode reclaim race detected in cache_vget on %p (%s)\n", vp, ncp->nc_name);
829 cache_lock(ncp);
830 cache_setunresolved(ncp);
831 cache_unlock(ncp);
832 goto again;
833 }
87de5057 834 error = vget(vp, lk_type);
21739618 835 if (error) {
9b1b3591 836 if (vp != ncp->nc_vp)
21739618
MD
837 goto again;
838 vp = NULL;
9b1b3591 839 } else if (vp != ncp->nc_vp) {
21739618
MD
840 vput(vp);
841 goto again;
9b1b3591
MD
842 } else if (vp->v_flag & VRECLAIMED) {
843 panic("vget succeeded on a VRECLAIMED node! vp %p", vp);
21739618
MD
844 }
845 }
846 if (error == 0 && vp == NULL)
847 error = ENOENT;
848 *vpp = vp;
849 return(error);
850}
851
852int
853cache_vref(struct namecache *ncp, struct ucred *cred, struct vnode **vpp)
854{
855 struct vnode *vp;
856 int error;
857
858again:
859 vp = NULL;
860 if (ncp->nc_flag & NCF_UNRESOLVED) {
861 cache_lock(ncp);
862 error = cache_resolve(ncp, cred);
863 cache_unlock(ncp);
864 } else {
865 error = 0;
866 }
867 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
9b1b3591
MD
868 /*
869 * Since we did not obtain any locks, a cache zap
870 * race can occur here if the vnode is in the middle
871 * of being reclaimed and has not yet been able to
872 * clean out its cache node. If that case occurs,
873 * we must lock and unresolve the cache, then loop
874 * to retry.
875 */
876 if (vp->v_flag & VRECLAIMED) {
877 printf("Warning: vnode reclaim race detected on cache_vref %p (%s)\n", vp, ncp->nc_name);
878 cache_lock(ncp);
879 cache_setunresolved(ncp);
880 cache_unlock(ncp);
21739618
MD
881 goto again;
882 }
44b1cf3d 883 vref_initial(vp, 1);
21739618
MD
884 }
885 if (error == 0 && vp == NULL)
886 error = ENOENT;
887 *vpp = vp;
888 return(error);
889}
890
dc1be39c
MD
891/*
892 * Recursively set the FSMID update flag for namecache nodes leading
f2e3ccf2
MD
893 * to root. This will cause the next getattr or reclaim to increment the
894 * fsmid and mark the inode for lazy updating.
895 *
896 * Stop recursing when we hit a node whos NCF_FSMID flag is already set.
897 * This makes FSMIDs work in an Einsteinian fashion - where the observation
898 * effects the result. In this case a program monitoring a higher level
899 * node will have detected some prior change and started its scan (clearing
900 * NCF_FSMID in higher level nodes), but since it has not yet observed the
901 * node where we find NCF_FSMID still set, we can safely make the related
902 * modification without interfering with the theorized program.
903 *
904 * This also means that FSMIDs cannot represent time-domain quantities
905 * in a hierarchical sense. But the main reason for doing it this way
906 * is to reduce the amount of recursion that occurs in the critical path
907 * when e.g. a program is writing to a file that sits deep in a directory
908 * hierarchy.
dc1be39c 909 */
7d15906a
MD
910void
911cache_update_fsmid(struct namecache *ncp)
912{
913 struct vnode *vp;
914 struct namecache *scan;
7d15906a 915
9b1b3591
MD
916 /*
917 * Warning: even if we get a non-NULL vp it could still be in the
918 * middle of a recyclement. Don't do anything fancy, just set
919 * NCF_FSMID.
920 */
7d15906a
MD
921 if ((vp = ncp->nc_vp) != NULL) {
922 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
f2e3ccf2
MD
923 for (scan = ncp; scan; scan = scan->nc_parent) {
924 if (scan->nc_flag & NCF_FSMID)
925 break;
dc1be39c 926 scan->nc_flag |= NCF_FSMID;
f2e3ccf2 927 }
7d15906a
MD
928 }
929 } else {
f2e3ccf2 930 while (ncp && (ncp->nc_flag & NCF_FSMID) == 0) {
dc1be39c 931 ncp->nc_flag |= NCF_FSMID;
7d15906a
MD
932 ncp = ncp->nc_parent;
933 }
934 }
935}
936
937void
938cache_update_fsmid_vp(struct vnode *vp)
939{
940 struct namecache *ncp;
941 struct namecache *scan;
7d15906a
MD
942
943 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
f2e3ccf2
MD
944 for (scan = ncp; scan; scan = scan->nc_parent) {
945 if (scan->nc_flag & NCF_FSMID)
946 break;
dc1be39c 947 scan->nc_flag |= NCF_FSMID;
f2e3ccf2 948 }
7d15906a
MD
949 }
950}
951
dc1be39c
MD
952/*
953 * If getattr is called on a vnode (e.g. a stat call), the filesystem
954 * may call this routine to determine if the namecache has the hierarchical
955 * change flag set, requiring the fsmid to be updated.
956 *
957 * Since 0 indicates no support, make sure the filesystem fsmid is at least
958 * 1.
959 */
960int
961cache_check_fsmid_vp(struct vnode *vp, int64_t *fsmid)
962{
963 struct namecache *ncp;
964 int changed = 0;
965
966 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
967 if (ncp->nc_flag & NCF_FSMID) {
968 ncp->nc_flag &= ~NCF_FSMID;
969 changed = 1;
970 }
971 }
972 if (*fsmid == 0)
973 ++*fsmid;
974 if (changed)
975 ++*fsmid;
976 return(changed);
977}
978
021f7340
MD
979/*
980 * Obtain the FSMID for a vnode for filesystems which do not support
981 * a built-in FSMID.
982 */
983int64_t
984cache_sync_fsmid_vp(struct vnode *vp)
985{
986 struct namecache *ncp;
987
988 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) {
989 if (ncp->nc_flag & NCF_FSMID) {
990 ncp->nc_flag &= ~NCF_FSMID;
991 ++ncp->nc_fsmid;
992 }
993 return(ncp->nc_fsmid);
994 }
995 return(VNOVAL);
996}
997
fad57d0e
MD
998/*
999 * Convert a directory vnode to a namecache record without any other
1000 * knowledge of the topology. This ONLY works with directory vnodes and
1001 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
1002 * returned ncp (if not NULL) will be held and unlocked.
1003 *
1004 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
1005 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
1006 * for dvp. This will fail only if the directory has been deleted out from
1007 * under the caller.
1008 *
1009 * Callers must always check for a NULL return no matter the value of 'makeit'.
a0d57516
MD
1010 *
1011 * To avoid underflowing the kernel stack each recursive call increments
1012 * the makeit variable.
fad57d0e
MD
1013 */
1014
1015static int cache_inefficient_scan(struct namecache *ncp, struct ucred *cred,
1016 struct vnode *dvp);
a0d57516 1017static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
cc4c3b52 1018 struct vnode **saved_dvp);
fad57d0e
MD
1019
1020struct namecache *
1021cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit)
1022{
1023 struct namecache *ncp;
cc4c3b52 1024 struct vnode *saved_dvp;
fad57d0e
MD
1025 struct vnode *pvp;
1026 int error;
1027
a0d57516 1028 ncp = NULL;
cc4c3b52 1029 saved_dvp = NULL;
a0d57516 1030
fad57d0e
MD
1031 /*
1032 * Temporary debugging code to force the directory scanning code
1033 * to be exercised.
1034 */
fad57d0e
MD
1035 if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) {
1036 ncp = TAILQ_FIRST(&dvp->v_namecache);
1037 printf("cache_fromdvp: forcing %s\n", ncp->nc_name);
1038 goto force;
1039 }
1040
1041 /*
1042 * Loop until resolution, inside code will break out on error.
1043 */
1044 while ((ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) {
1045force:
1046 /*
1047 * If dvp is the root of its filesystem it should already
1048 * have a namecache pointer associated with it as a side
1049 * effect of the mount, but it may have been disassociated.
1050 */
1051 if (dvp->v_flag & VROOT) {
1052 ncp = cache_get(dvp->v_mount->mnt_ncp);
1053 error = cache_resolve_mp(ncp);
1054 cache_put(ncp);
1055 if (ncvp_debug) {
1056 printf("cache_fromdvp: resolve root of mount %p error %d",
1057 dvp->v_mount, error);
1058 }
1059 if (error) {
1060 if (ncvp_debug)
1061 printf(" failed\n");
1062 ncp = NULL;
1063 break;
1064 }
1065 if (ncvp_debug)
1066 printf(" succeeded\n");
1067 continue;
1068 }
1069
a0d57516
MD
1070 /*
1071 * If we are recursed too deeply resort to an O(n^2)
1072 * algorithm to resolve the namecache topology. The
cc4c3b52 1073 * resolved pvp is left referenced in saved_dvp to
a0d57516
MD
1074 * prevent the tree from being destroyed while we loop.
1075 */
1076 if (makeit > 20) {
cc4c3b52 1077 error = cache_fromdvp_try(dvp, cred, &saved_dvp);
a0d57516
MD
1078 if (error) {
1079 printf("lookupdotdot(longpath) failed %d "
1080 "dvp %p\n", error, dvp);
1081 break;
1082 }
1083 continue;
1084 }
1085
fad57d0e
MD
1086 /*
1087 * Get the parent directory and resolve its ncp.
1088 */
6ddb7618 1089 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred);
fad57d0e 1090 if (error) {
a0d57516 1091 printf("lookupdotdot failed %d dvp %p\n", error, dvp);
fad57d0e
MD
1092 break;
1093 }
a11aaa81 1094 vn_unlock(pvp);
fad57d0e
MD
1095
1096 /*
a0d57516 1097 * Reuse makeit as a recursion depth counter.
fad57d0e 1098 */
a0d57516 1099 ncp = cache_fromdvp(pvp, cred, makeit + 1);
fad57d0e
MD
1100 vrele(pvp);
1101 if (ncp == NULL)
1102 break;
1103
1104 /*
1105 * Do an inefficient scan of pvp (embodied by ncp) to look
1106 * for dvp. This will create a namecache record for dvp on
1107 * success. We loop up to recheck on success.
1108 *
1109 * ncp and dvp are both held but not locked.
1110 */
1111 error = cache_inefficient_scan(ncp, cred, dvp);
1112 cache_drop(ncp);
1113 if (error) {
1114 printf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
1115 pvp, ncp->nc_name, dvp);
1116 ncp = NULL;
1117 break;
1118 }
1119 if (ncvp_debug) {
1120 printf("cache_fromdvp: scan %p (%s) succeeded\n",
1121 pvp, ncp->nc_name);
1122 }
1123 }
1124 if (ncp)
1125 cache_hold(ncp);
cc4c3b52
MD
1126 if (saved_dvp)
1127 vrele(saved_dvp);
fad57d0e
MD
1128 return (ncp);
1129}
1130
a0d57516
MD
1131/*
1132 * Go up the chain of parent directories until we find something
1133 * we can resolve into the namecache. This is very inefficient.
1134 */
1135static
1136int
1137cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
cc4c3b52 1138 struct vnode **saved_dvp)
a0d57516
MD
1139{
1140 struct namecache *ncp;
1141 struct vnode *pvp;
1142 int error;
1143 static time_t last_fromdvp_report;
1144
1145 /*
1146 * Loop getting the parent directory vnode until we get something we
1147 * can resolve in the namecache.
1148 */
1149 vref(dvp);
1150 for (;;) {
1151 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred);
1152 if (error) {
1153 vrele(dvp);
1154 return (error);
1155 }
a11aaa81 1156 vn_unlock(pvp);
a0d57516
MD
1157 if ((ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
1158 cache_hold(ncp);
1159 vrele(pvp);
1160 break;
1161 }
1162 if (pvp->v_flag & VROOT) {
1163 ncp = cache_get(pvp->v_mount->mnt_ncp);
1164 error = cache_resolve_mp(ncp);
1165 cache_unlock(ncp);
1166 vrele(pvp);
1167 if (error) {
1168 cache_drop(ncp);
1169 vrele(dvp);
1170 return (error);
1171 }
1172 break;
1173 }
1174 vrele(dvp);
1175 dvp = pvp;
1176 }
1177 if (last_fromdvp_report != time_second) {
1178 last_fromdvp_report = time_second;
1179 printf("Warning: extremely inefficient path resolution on %s\n",
1180 ncp->nc_name);
1181 }
1182 error = cache_inefficient_scan(ncp, cred, dvp);
cc4c3b52
MD
1183
1184 /*
1185 * Hopefully dvp now has a namecache record associated with it.
1186 * Leave it referenced to prevent the kernel from recycling the
1187 * vnode. Otherwise extremely long directory paths could result
1188 * in endless recycling.
1189 */
1190 if (*saved_dvp)
1191 vrele(*saved_dvp);
1192 *saved_dvp = dvp;
a0d57516
MD
1193 return (error);
1194}
1195
1196
fad57d0e
MD
1197/*
1198 * Do an inefficient scan of the directory represented by ncp looking for
1199 * the directory vnode dvp. ncp must be held but not locked on entry and
1200 * will be held on return. dvp must be refd but not locked on entry and
1201 * will remain refd on return.
1202 *
1203 * Why do this at all? Well, due to its stateless nature the NFS server
1204 * converts file handles directly to vnodes without necessarily going through
1205 * the namecache ops that would otherwise create the namecache topology
1206 * leading to the vnode. We could either (1) Change the namecache algorithms
1207 * to allow disconnect namecache records that are re-merged opportunistically,
1208 * or (2) Make the NFS server backtrack and scan to recover a connected
1209 * namecache topology in order to then be able to issue new API lookups.
1210 *
1211 * It turns out that (1) is a huge mess. It takes a nice clean set of
1212 * namecache algorithms and introduces a lot of complication in every subsystem
1213 * that calls into the namecache to deal with the re-merge case, especially
1214 * since we are using the namecache to placehold negative lookups and the
1215 * vnode might not be immediately assigned. (2) is certainly far less
1216 * efficient then (1), but since we are only talking about directories here
1217 * (which are likely to remain cached), the case does not actually run all
1218 * that often and has the supreme advantage of not polluting the namecache
1219 * algorithms.
1220 */
1221static int
1222cache_inefficient_scan(struct namecache *ncp, struct ucred *cred,
1223 struct vnode *dvp)
1224{
1225 struct nlcomponent nlc;
1226 struct namecache *rncp;
1227 struct dirent *den;
1228 struct vnode *pvp;
1229 struct vattr vat;
1230 struct iovec iov;
1231 struct uio uio;
fad57d0e
MD
1232 int blksize;
1233 int eofflag;
4d22f42a 1234 int bytes;
fad57d0e
MD
1235 char *rbuf;
1236 int error;
fad57d0e
MD
1237
1238 vat.va_blocksize = 0;
87de5057 1239 if ((error = VOP_GETATTR(dvp, &vat)) != 0)
fad57d0e 1240 return (error);
885ecb13 1241 if ((error = cache_vref(ncp, cred, &pvp)) != 0)
fad57d0e
MD
1242 return (error);
1243 if (ncvp_debug)
1244 printf("inefficient_scan: directory iosize %ld vattr fileid = %ld\n", vat.va_blocksize, (long)vat.va_fileid);
1245 if ((blksize = vat.va_blocksize) == 0)
1246 blksize = DEV_BSIZE;
1247 rbuf = malloc(blksize, M_TEMP, M_WAITOK);
1248 rncp = NULL;
1249
1250 eofflag = 0;
1251 uio.uio_offset = 0;
fad57d0e 1252again:
fad57d0e
MD
1253 iov.iov_base = rbuf;
1254 iov.iov_len = blksize;
1255 uio.uio_iov = &iov;
1256 uio.uio_iovcnt = 1;
1257 uio.uio_resid = blksize;
1258 uio.uio_segflg = UIO_SYSSPACE;
1259 uio.uio_rw = UIO_READ;
1260 uio.uio_td = curthread;
1261
fad57d0e 1262 if (ncvp_debug >= 2)
4d22f42a
MD
1263 printf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
1264 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
fad57d0e 1265 if (error == 0) {
4d22f42a
MD
1266 den = (struct dirent *)rbuf;
1267 bytes = blksize - uio.uio_resid;
1268
1269 while (bytes > 0) {
1270 if (ncvp_debug >= 2) {
fad57d0e 1271 printf("cache_inefficient_scan: %*.*s\n",
4d22f42a
MD
1272 den->d_namlen, den->d_namlen,
1273 den->d_name);
1274 }
fad57d0e 1275 if (den->d_type != DT_WHT &&
01f31ab3 1276 den->d_ino == vat.va_fileid) {
4d22f42a
MD
1277 if (ncvp_debug) {
1278 printf("cache_inefficient_scan: "
1279 "MATCHED inode %ld path %s/%*.*s\n",
1280 vat.va_fileid, ncp->nc_name,
1281 den->d_namlen, den->d_namlen,
1282 den->d_name);
1283 }
fad57d0e
MD
1284 nlc.nlc_nameptr = den->d_name;
1285 nlc.nlc_namelen = den->d_namlen;
fad57d0e
MD
1286 rncp = cache_nlookup(ncp, &nlc);
1287 KKASSERT(rncp != NULL);
1288 break;
1289 }
01f31ab3
JS
1290 bytes -= _DIRENT_DIRSIZ(den);
1291 den = _DIRENT_NEXT(den);
fad57d0e
MD
1292 }
1293 if (rncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
1294 goto again;
1295 }
885ecb13 1296 vrele(pvp);
fad57d0e 1297 if (rncp) {
fad57d0e
MD
1298 if (rncp->nc_flag & NCF_UNRESOLVED) {
1299 cache_setvp(rncp, dvp);
1300 if (ncvp_debug >= 2) {
1301 printf("cache_inefficient_scan: setvp %s/%s = %p\n",
1302 ncp->nc_name, rncp->nc_name, dvp);
1303 }
1304 } else {
1305 if (ncvp_debug >= 2) {
1306 printf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
1307 ncp->nc_name, rncp->nc_name, dvp,
1308 rncp->nc_vp);
1309 }
1310 }
1311 if (rncp->nc_vp == NULL)
1312 error = rncp->nc_error;
1313 cache_put(rncp);
1314 } else {
1315 printf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
1316 dvp, ncp->nc_name);
fad57d0e
MD
1317 error = ENOENT;
1318 }
1319 free(rbuf, M_TEMP);
1320 return (error);
1321}
1322
984263bc 1323/*
67773eb3
MD
1324 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
1325 * state, which disassociates it from its vnode or ncneglist.
7ea21ed1 1326 *
67773eb3
MD
1327 * Then, if there are no additional references to the ncp and no children,
1328 * the ncp is removed from the topology and destroyed. This function will
1329 * also run through the nc_parent chain and destroy parent ncps if possible.
1330 * As a side benefit, it turns out the only conditions that allow running
1331 * up the chain are also the conditions to ensure no deadlock will occur.
7ea21ed1 1332 *
67773eb3
MD
1333 * References and/or children may exist if the ncp is in the middle of the
1334 * topology, preventing the ncp from being destroyed.
7ea21ed1 1335 *
67773eb3
MD
1336 * This function must be called with the ncp held and locked and will unlock
1337 * and drop it during zapping.
984263bc
MD
1338 */
1339static void
8987aad7 1340cache_zap(struct namecache *ncp)
984263bc 1341{
7ea21ed1 1342 struct namecache *par;
7ea21ed1
MD
1343
1344 /*
1345 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
1346 */
690a3127 1347 cache_setunresolved(ncp);
7ea21ed1
MD
1348
1349 /*
1350 * Try to scrap the entry and possibly tail-recurse on its parent.
1351 * We only scrap unref'd (other then our ref) unresolved entries,
1352 * we do not scrap 'live' entries.
1353 */
1354 while (ncp->nc_flag & NCF_UNRESOLVED) {
1355 /*
1356 * Someone other then us has a ref, stop.
1357 */
1358 if (ncp->nc_refs > 1)
1359 goto done;
1360
1361 /*
1362 * We have children, stop.
1363 */
1364 if (!TAILQ_EMPTY(&ncp->nc_list))
1365 goto done;
1366
67773eb3
MD
1367 /*
1368 * Remove ncp from the topology: hash table and parent linkage.
1369 */
7ea21ed1
MD
1370 if (ncp->nc_flag & NCF_HASHED) {
1371 ncp->nc_flag &= ~NCF_HASHED;
1372 LIST_REMOVE(ncp, nc_hash);
1373 }
7ea21ed1
MD
1374 if ((par = ncp->nc_parent) != NULL) {
1375 par = cache_hold(par);
1376 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
67773eb3 1377 ncp->nc_parent = NULL;
7ea21ed1
MD
1378 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
1379 vdrop(par->nc_vp);
1380 }
67773eb3
MD
1381
1382 /*
1383 * ncp should not have picked up any refs. Physically
1384 * destroy the ncp.
1385 */
1386 KKASSERT(ncp->nc_refs == 1);
f517a1bb 1387 --numunres;
67773eb3 1388 /* cache_unlock(ncp) not required */
7ea21ed1 1389 ncp->nc_refs = -1; /* safety */
7ea21ed1
MD
1390 if (ncp->nc_name)
1391 free(ncp->nc_name, M_VFSCACHE);
1392 free(ncp, M_VFSCACHE);
67773eb3
MD
1393
1394 /*
1395 * Loop on the parent (it may be NULL). Only bother looping
1396 * if the parent has a single ref (ours), which also means
1397 * we can lock it trivially.
1398 */
1399 ncp = par;
1400 if (ncp == NULL)
1401 return;
1402 if (ncp->nc_refs != 1) {
1403 cache_drop(ncp);
8c361dda 1404 return;
67773eb3
MD
1405 }
1406 KKASSERT(par->nc_exlocks == 0);
1407 cache_lock(ncp);
7ea21ed1
MD
1408 }
1409done:
67773eb3 1410 cache_unlock(ncp);
36e90efd 1411 atomic_subtract_int(&ncp->nc_refs, 1);
984263bc
MD
1412}
1413
62d0f1f0
MD
1414static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW;
1415
1416static __inline
1417void
1418cache_hysteresis(void)
1419{
1420 /*
1421 * Don't cache too many negative hits. We use hysteresis to reduce
1422 * the impact on the critical path.
1423 */
1424 switch(cache_hysteresis_state) {
1425 case CHI_LOW:
1426 if (numneg > MINNEG && numneg * ncnegfactor > numcache) {
1427 cache_cleanneg(10);
1428 cache_hysteresis_state = CHI_HIGH;
1429 }
1430 break;
1431 case CHI_HIGH:
1432 if (numneg > MINNEG * 9 / 10 &&
1433 numneg * ncnegfactor * 9 / 10 > numcache
1434 ) {
1435 cache_cleanneg(10);
1436 } else {
1437 cache_hysteresis_state = CHI_LOW;
1438 }
1439 break;
1440 }
1441}
1442
14c92d03
MD
1443/*
1444 * NEW NAMECACHE LOOKUP API
1445 *
1446 * Lookup an entry in the cache. A locked, referenced, non-NULL
1447 * entry is *always* returned, even if the supplied component is illegal.
fad57d0e 1448 * The resulting namecache entry should be returned to the system with
14c92d03
MD
1449 * cache_put() or cache_unlock() + cache_drop().
1450 *
1451 * namecache locks are recursive but care must be taken to avoid lock order
1452 * reversals.
1453 *
1454 * Nobody else will be able to manipulate the associated namespace (e.g.
1455 * create, delete, rename, rename-target) until the caller unlocks the
1456 * entry.
1457 *
1458 * The returned entry will be in one of three states: positive hit (non-null
1459 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
1460 * Unresolved entries must be resolved through the filesystem to associate the
1461 * vnode and/or determine whether a positive or negative hit has occured.
1462 *
1463 * It is not necessary to lock a directory in order to lock namespace under
1464 * that directory. In fact, it is explicitly not allowed to do that. A
1465 * directory is typically only locked when being created, renamed, or
1466 * destroyed.
1467 *
1468 * The directory (par) may be unresolved, in which case any returned child
1469 * will likely also be marked unresolved. Likely but not guarenteed. Since
fad57d0e
MD
1470 * the filesystem lookup requires a resolved directory vnode the caller is
1471 * responsible for resolving the namecache chain top-down. This API
14c92d03
MD
1472 * specifically allows whole chains to be created in an unresolved state.
1473 */
1474struct namecache *
690a3127 1475cache_nlookup(struct namecache *par, struct nlcomponent *nlc)
14c92d03 1476{
690a3127
MD
1477 struct namecache *ncp;
1478 struct namecache *new_ncp;
1479 struct nchashhead *nchpp;
1480 u_int32_t hash;
1481 globaldata_t gd;
1482
1483 numcalls++;
1484 gd = mycpu;
1485
690a3127
MD
1486 /*
1487 * Try to locate an existing entry
1488 */
1489 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
1490 hash = fnv_32_buf(&par, sizeof(par), hash);
1491 new_ncp = NULL;
1492restart:
1493 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1494 numchecks++;
1495
1496 /*
1497 * Zap entries that have timed out.
1498 */
1499 if (ncp->nc_timeout &&
67773eb3
MD
1500 (int)(ncp->nc_timeout - ticks) < 0 &&
1501 (ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
1502 ncp->nc_exlocks == 0
690a3127 1503 ) {
67773eb3 1504 cache_zap(cache_get(ncp));
690a3127
MD
1505 goto restart;
1506 }
1507
1508 /*
1509 * Break out if we find a matching entry. Note that
e09206ba
MD
1510 * UNRESOLVED entries may match, but DESTROYED entries
1511 * do not.
690a3127
MD
1512 */
1513 if (ncp->nc_parent == par &&
1514 ncp->nc_nlen == nlc->nlc_namelen &&
e09206ba
MD
1515 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
1516 (ncp->nc_flag & NCF_DESTROYED) == 0
690a3127 1517 ) {
67773eb3
MD
1518 if (cache_get_nonblock(ncp) == 0) {
1519 if (new_ncp)
1520 cache_free(new_ncp);
1521 goto found;
1522 }
8c361dda 1523 cache_get(ncp);
67773eb3
MD
1524 cache_put(ncp);
1525 goto restart;
690a3127
MD
1526 }
1527 }
1528
1529 /*
1530 * We failed to locate an entry, create a new entry and add it to
1531 * the cache. We have to relookup after possibly blocking in
1532 * malloc.
1533 */
1534 if (new_ncp == NULL) {
524c845c 1535 new_ncp = cache_alloc(nlc->nlc_namelen);
690a3127
MD
1536 goto restart;
1537 }
1538
1539 ncp = new_ncp;
1540
1541 /*
1542 * Initialize as a new UNRESOLVED entry, lock (non-blocking),
fad57d0e
MD
1543 * and link to the parent. The mount point is usually inherited
1544 * from the parent unless this is a special case such as a mount
1545 * point where nlc_namelen is 0. The caller is responsible for
1546 * setting nc_mount in that case. If nlc_namelen is 0 nc_name will
1547 * be NULL.
690a3127 1548 */
4fcb1cf7
MD
1549 if (nlc->nlc_namelen) {
1550 bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen);
fad57d0e 1551 ncp->nc_name[nlc->nlc_namelen] = 0;
4fcb1cf7
MD
1552 ncp->nc_mount = par->nc_mount;
1553 }
690a3127
MD
1554 nchpp = NCHHASH(hash);
1555 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
1556 ncp->nc_flag |= NCF_HASHED;
690a3127 1557 cache_link_parent(ncp, par);
690a3127 1558found:
fad57d0e
MD
1559 /*
1560 * stats and namecache size management
1561 */
1562 if (ncp->nc_flag & NCF_UNRESOLVED)
1563 ++gd->gd_nchstats->ncs_miss;
1564 else if (ncp->nc_vp)
1565 ++gd->gd_nchstats->ncs_goodhits;
1566 else
1567 ++gd->gd_nchstats->ncs_neghits;
62d0f1f0 1568 cache_hysteresis();
690a3127
MD
1569 return(ncp);
1570}
1571
9b1b3591
MD
1572/*
1573 * Given a locked ncp, validate that the vnode, if present, is actually
1574 * usable. If it is not usable set the ncp to an unresolved state.
1575 */
1576void
1577cache_validate(struct namecache *ncp)
1578{
1579 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1580 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1581 cache_setunresolved(ncp);
1582 }
1583}
1584
690a3127 1585/*
21739618 1586 * Resolve an unresolved namecache entry, generally by looking it up.
67773eb3 1587 * The passed ncp must be locked and refd.
21739618
MD
1588 *
1589 * Theoretically since a vnode cannot be recycled while held, and since
1590 * the nc_parent chain holds its vnode as long as children exist, the
1591 * direct parent of the cache entry we are trying to resolve should
1592 * have a valid vnode. If not then generate an error that we can
1593 * determine is related to a resolver bug.
fad57d0e 1594 *
9b1b3591
MD
1595 * However, if a vnode was in the middle of a recyclement when the NCP
1596 * got locked, ncp->nc_vp might point to a vnode that is about to become
1597 * invalid. cache_resolve() handles this case by unresolving the entry
1598 * and then re-resolving it.
1599 *
fad57d0e
MD
1600 * Note that successful resolution does not necessarily return an error
1601 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
1602 * will be returned.
690a3127
MD
1603 */
1604int
21739618 1605cache_resolve(struct namecache *ncp, struct ucred *cred)
690a3127 1606{
21739618 1607 struct namecache *par;
67773eb3 1608 int error;
8e005a45 1609
67773eb3 1610restart:
8e005a45 1611 /*
9b1b3591
MD
1612 * If the ncp is already resolved we have nothing to do. However,
1613 * we do want to guarentee that a usable vnode is returned when
1614 * a vnode is present, so make sure it hasn't been reclaimed.
8e005a45 1615 */
9b1b3591
MD
1616 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1617 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1618 cache_setunresolved(ncp);
1619 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1620 return (ncp->nc_error);
1621 }
21739618 1622
646a1cda
MD
1623 /*
1624 * Mount points need special handling because the parent does not
1625 * belong to the same filesystem as the ncp.
1626 */
8e005a45 1627 if (ncp->nc_flag & NCF_MOUNTPT)
646a1cda 1628 return (cache_resolve_mp(ncp));
646a1cda
MD
1629
1630 /*
1631 * We expect an unbroken chain of ncps to at least the mount point,
1632 * and even all the way to root (but this code doesn't have to go
1633 * past the mount point).
1634 */
1635 if (ncp->nc_parent == NULL) {
8e005a45 1636 printf("EXDEV case 1 %p %*.*s\n", ncp,
646a1cda 1637 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
21739618 1638 ncp->nc_error = EXDEV;
646a1cda
MD
1639 return(ncp->nc_error);
1640 }
1641
1642 /*
1643 * The vp's of the parent directories in the chain are held via vhold()
1644 * due to the existance of the child, and should not disappear.
1645 * However, there are cases where they can disappear:
1646 *
1647 * - due to filesystem I/O errors.
1648 * - due to NFS being stupid about tracking the namespace and
1649 * destroys the namespace for entire directories quite often.
1650 * - due to forced unmounts.
e09206ba 1651 * - due to an rmdir (parent will be marked DESTROYED)
646a1cda
MD
1652 *
1653 * When this occurs we have to track the chain backwards and resolve
1654 * it, looping until the resolver catches up to the current node. We
1655 * could recurse here but we might run ourselves out of kernel stack
1656 * so we do it in a more painful manner. This situation really should
1657 * not occur all that often, or if it does not have to go back too
1658 * many nodes to resolve the ncp.
1659 */
1660 while (ncp->nc_parent->nc_vp == NULL) {
e09206ba
MD
1661 /*
1662 * This case can occur if a process is CD'd into a
1663 * directory which is then rmdir'd. If the parent is marked
1664 * destroyed there is no point trying to resolve it.
1665 */
1666 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
1667 return(ENOENT);
1668
646a1cda
MD
1669 par = ncp->nc_parent;
1670 while (par->nc_parent && par->nc_parent->nc_vp == NULL)
1671 par = par->nc_parent;
1672 if (par->nc_parent == NULL) {
1673 printf("EXDEV case 2 %*.*s\n",
1674 par->nc_nlen, par->nc_nlen, par->nc_name);
1675 return (EXDEV);
1676 }
1677 printf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
1678 par->nc_nlen, par->nc_nlen, par->nc_name);
1679 /*
67773eb3
MD
1680 * The parent is not set in stone, ref and lock it to prevent
1681 * it from disappearing. Also note that due to renames it
1682 * is possible for our ncp to move and for par to no longer
1683 * be one of its parents. We resolve it anyway, the loop
1684 * will handle any moves.
646a1cda
MD
1685 */
1686 cache_get(par);
1687 if (par->nc_flag & NCF_MOUNTPT) {
1688 cache_resolve_mp(par);
8e005a45
MD
1689 } else if (par->nc_parent->nc_vp == NULL) {
1690 printf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
1691 cache_put(par);
1692 continue;
fad57d0e
MD
1693 } else if (par->nc_flag & NCF_UNRESOLVED) {
1694 par->nc_error = VOP_NRESOLVE(par, cred);
646a1cda 1695 }
67773eb3
MD
1696 if ((error = par->nc_error) != 0) {
1697 if (par->nc_error != EAGAIN) {
1698 printf("EXDEV case 3 %*.*s error %d\n",
1699 par->nc_nlen, par->nc_nlen, par->nc_name,
1700 par->nc_error);
1701 cache_put(par);
1702 return(error);
1703 }
1704 printf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
1705 par, par->nc_nlen, par->nc_nlen, par->nc_name);
646a1cda 1706 }
67773eb3
MD
1707 cache_put(par);
1708 /* loop */
646a1cda 1709 }
8e005a45
MD
1710
1711 /*
fad57d0e 1712 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
8e005a45
MD
1713 * ncp's and reattach them. If this occurs the original ncp is marked
1714 * EAGAIN to force a relookup.
fad57d0e
MD
1715 *
1716 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
1717 * ncp must already be resolved.
8e005a45
MD
1718 */
1719 KKASSERT((ncp->nc_flag & NCF_MOUNTPT) == 0);
fad57d0e 1720 ncp->nc_error = VOP_NRESOLVE(ncp, cred);
6ddb7618 1721 /*vop_nresolve(*ncp->nc_parent->nc_vp->v_ops, ncp, cred);*/
67773eb3
MD
1722 if (ncp->nc_error == EAGAIN) {
1723 printf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
1724 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
1725 goto restart;
1726 }
646a1cda
MD
1727 return(ncp->nc_error);
1728}
1729
1730/*
1731 * Resolve the ncp associated with a mount point. Such ncp's almost always
1732 * remain resolved and this routine is rarely called. NFS MPs tends to force
1733 * re-resolution more often due to its mac-truck-smash-the-namecache
1734 * method of tracking namespace changes.
1735 *
6215aa92
MD
1736 * The semantics for this call is that the passed ncp must be locked on
1737 * entry and will be locked on return. However, if we actually have to
1738 * resolve the mount point we temporarily unlock the entry in order to
1739 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
1740 * the unlock we have to recheck the flags after we relock.
646a1cda
MD
1741 */
1742static int
1743cache_resolve_mp(struct namecache *ncp)
1744{
1745 struct vnode *vp;
1746 struct mount *mp = ncp->nc_mount;
6215aa92 1747 int error;
646a1cda
MD
1748
1749 KKASSERT(mp != NULL);
9b1b3591
MD
1750
1751 /*
1752 * If the ncp is already resolved we have nothing to do. However,
1753 * we do want to guarentee that a usable vnode is returned when
1754 * a vnode is present, so make sure it hasn't been reclaimed.
1755 */
1756 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1757 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1758 cache_setunresolved(ncp);
1759 }
1760
646a1cda 1761 if (ncp->nc_flag & NCF_UNRESOLVED) {
6215aa92 1762 cache_unlock(ncp);
f9642f56 1763 while (vfs_busy(mp, 0))
646a1cda 1764 ;
6215aa92
MD
1765 error = VFS_ROOT(mp, &vp);
1766 cache_lock(ncp);
1767
1768 /*
1769 * recheck the ncp state after relocking.
1770 */
1771 if (ncp->nc_flag & NCF_UNRESOLVED) {
1772 ncp->nc_error = error;
1773 if (error == 0) {
1774 cache_setvp(ncp, vp);
1775 vput(vp);
1776 } else {
1777 printf("[diagnostic] cache_resolve_mp: failed to resolve mount %p\n", mp);
1778 cache_setvp(ncp, NULL);
1779 }
1780 } else if (error == 0) {
646a1cda 1781 vput(vp);
646a1cda 1782 }
f9642f56 1783 vfs_unbusy(mp);
21739618
MD
1784 }
1785 return(ncp->nc_error);
14c92d03
MD
1786}
1787
62d0f1f0
MD
1788void
1789cache_cleanneg(int count)
1790{
1791 struct namecache *ncp;
7ea21ed1
MD
1792
1793 /*
62d0f1f0
MD
1794 * Automode from the vnlru proc - clean out 10% of the negative cache
1795 * entries.
7ea21ed1 1796 */
62d0f1f0
MD
1797 if (count == 0)
1798 count = numneg / 10 + 1;
1799
1800 /*
1801 * Attempt to clean out the specified number of negative cache
1802 * entries.
1803 */
1804 while (count) {
7ea21ed1 1805 ncp = TAILQ_FIRST(&ncneglist);
eb82ae62
MD
1806 if (ncp == NULL) {
1807 KKASSERT(numneg == 0);
1808 break;
1809 }
62d0f1f0
MD
1810 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
1811 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
67773eb3
MD
1812 if (cache_get_nonblock(ncp) == 0)
1813 cache_zap(ncp);
62d0f1f0 1814 --count;
984263bc
MD
1815 }
1816}
1817
fad57d0e
MD
1818/*
1819 * Rehash a ncp. Rehashing is typically required if the name changes (should
1820 * not generally occur) or the parent link changes. This function will
1821 * unhash the ncp if the ncp is no longer hashable.
1822 */
8c361dda
MD
1823static void
1824cache_rehash(struct namecache *ncp)
1825{
1826 struct nchashhead *nchpp;
1827 u_int32_t hash;
1828
1829 if (ncp->nc_flag & NCF_HASHED) {
1830 ncp->nc_flag &= ~NCF_HASHED;
1831 LIST_REMOVE(ncp, nc_hash);
1832 }
fad57d0e
MD
1833 if (ncp->nc_nlen && ncp->nc_parent) {
1834 hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT);
1835 hash = fnv_32_buf(&ncp->nc_parent,
1836 sizeof(ncp->nc_parent), hash);
1837 nchpp = NCHHASH(hash);
1838 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
1839 ncp->nc_flag |= NCF_HASHED;
1840 }
8c361dda
MD
1841}
1842
984263bc 1843/*
24e51f36 1844 * Name cache initialization, from vfsinit() when we are booting
984263bc
MD
1845 */
1846void
8987aad7 1847nchinit(void)
984263bc 1848{
24e51f36
HP
1849 int i;
1850 globaldata_t gd;
1851
1852 /* initialise per-cpu namecache effectiveness statistics. */
1853 for (i = 0; i < ncpus; ++i) {
1854 gd = globaldata_find(i);
1855 gd->gd_nchstats = &nchstats[i];
1856 }
7ea21ed1 1857 TAILQ_INIT(&ncneglist);
984263bc 1858 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash);
fc21741a 1859 nclockwarn = 1 * hz;
21739618
MD
1860}
1861
1862/*
1863 * Called from start_init() to bootstrap the root filesystem. Returns
1864 * a referenced, unlocked namecache record.
1865 */
1866struct namecache *
5fd012e0 1867cache_allocroot(struct mount *mp, struct vnode *vp)
21739618 1868{
524c845c 1869 struct namecache *ncp = cache_alloc(0);
21739618 1870
21739618 1871 ncp->nc_flag |= NCF_MOUNTPT | NCF_ROOT;
5fd012e0 1872 ncp->nc_mount = mp;
8c361dda
MD
1873 cache_setvp(ncp, vp);
1874 return(ncp);
984263bc
MD
1875}
1876
1877/*
7ea21ed1 1878 * vfs_cache_setroot()
984263bc 1879 *
7ea21ed1
MD
1880 * Create an association between the root of our namecache and
1881 * the root vnode. This routine may be called several times during
1882 * booting.
690a3127
MD
1883 *
1884 * If the caller intends to save the returned namecache pointer somewhere
1885 * it must cache_hold() it.
7ea21ed1 1886 */
21739618
MD
1887void
1888vfs_cache_setroot(struct vnode *nvp, struct namecache *ncp)
7ea21ed1 1889{
21739618
MD
1890 struct vnode *ovp;
1891 struct namecache *oncp;
1892
1893 ovp = rootvnode;
1894 oncp = rootncp;
1895 rootvnode = nvp;
1896 rootncp = ncp;
1897
1898 if (ovp)
1899 vrele(ovp);
1900 if (oncp)
1901 cache_drop(oncp);
7ea21ed1
MD
1902}
1903
1904/*
fad57d0e
MD
1905 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
1906 * topology and is being removed as quickly as possible. The new VOP_N*()
1907 * API calls are required to make specific adjustments using the supplied
1908 * ncp pointers rather then just bogusly purging random vnodes.
1909 *
7ea21ed1
MD
1910 * Invalidate all namecache entries to a particular vnode as well as
1911 * any direct children of that vnode in the namecache. This is a
1912 * 'catch all' purge used by filesystems that do not know any better.
1913 *
7ea21ed1
MD
1914 * Note that the linkage between the vnode and its namecache entries will
1915 * be removed, but the namecache entries themselves might stay put due to
1916 * active references from elsewhere in the system or due to the existance of
1917 * the children. The namecache topology is left intact even if we do not
1918 * know what the vnode association is. Such entries will be marked
1919 * NCF_UNRESOLVED.
984263bc 1920 */
984263bc 1921void
8987aad7 1922cache_purge(struct vnode *vp)
984263bc 1923{
6b008938 1924 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
984263bc
MD
1925}
1926
1927/*
1928 * Flush all entries referencing a particular filesystem.
1929 *
1930 * Since we need to check it anyway, we will flush all the invalid
1931 * entries at the same time.
1932 */
1933void
8987aad7 1934cache_purgevfs(struct mount *mp)
984263bc 1935{
bc0c094e 1936 struct nchashhead *nchpp;
984263bc
MD
1937 struct namecache *ncp, *nnp;
1938
7ea21ed1
MD
1939 /*
1940 * Scan hash tables for applicable entries.
1941 */
bc0c094e
MD
1942 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) {
1943 ncp = LIST_FIRST(nchpp);
7ea21ed1
MD
1944 if (ncp)
1945 cache_hold(ncp);
1946 while (ncp) {
984263bc 1947 nnp = LIST_NEXT(ncp, nc_hash);
7ea21ed1
MD
1948 if (nnp)
1949 cache_hold(nnp);
4fcb1cf7 1950 if (ncp->nc_mount == mp) {
67773eb3 1951 cache_lock(ncp);
984263bc 1952 cache_zap(ncp);
67773eb3 1953 } else {
7ea21ed1 1954 cache_drop(ncp);
67773eb3 1955 }
7ea21ed1 1956 ncp = nnp;
984263bc
MD
1957 }
1958 }
1959}
1960
6b008938
MD
1961/*
1962 * Create a new (theoretically) unique fsmid
1963 */
1964int64_t
1965cache_getnewfsmid(void)
1966{
1967 static int fsmid_roller;
1968 int64_t fsmid;
1969
1970 ++fsmid_roller;
1971 fsmid = ((int64_t)time_second << 32) |
1972 (fsmid_roller & 0x7FFFFFFF);
1973 return (fsmid);
1974}
1975
1976
984263bc
MD
1977static int disablecwd;
1978SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
1979
1980static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
1981static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
1982static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
1983static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
1984static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
1985static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
41c20dac 1986
984263bc 1987int
753fd850 1988sys___getcwd(struct __getcwd_args *uap)
63f58b90 1989{
02680f1b 1990 int buflen;
63f58b90 1991 int error;
02680f1b
MD
1992 char *buf;
1993 char *bp;
1994
1995 if (disablecwd)
1996 return (ENODEV);
1997
1998 buflen = uap->buflen;
1999 if (buflen < 2)
2000 return (EINVAL);
2001 if (buflen > MAXPATHLEN)
2002 buflen = MAXPATHLEN;
63f58b90 2003
02680f1b
MD
2004 buf = malloc(buflen, M_TEMP, M_WAITOK);
2005 bp = kern_getcwd(buf, buflen, &error);
63f58b90 2006 if (error == 0)
02680f1b
MD
2007 error = copyout(bp, uap->buf, strlen(bp) + 1);
2008 free(buf, M_TEMP);
63f58b90
EN
2009 return (error);
2010}
2011
02680f1b
MD
2012char *
2013kern_getcwd(char *buf, size_t buflen, int *error)
984263bc 2014{
41c20dac 2015 struct proc *p = curproc;
63f58b90 2016 char *bp;
02680f1b 2017 int i, slash_prefixed;
984263bc
MD
2018 struct filedesc *fdp;
2019 struct namecache *ncp;
984263bc
MD
2020
2021 numcwdcalls++;
63f58b90
EN
2022 bp = buf;
2023 bp += buflen - 1;
984263bc
MD
2024 *bp = '\0';
2025 fdp = p->p_fd;
2026 slash_prefixed = 0;
524c845c
MD
2027
2028 ncp = fdp->fd_ncdir;
2029 while (ncp && ncp != fdp->fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) {
2030 if (ncp->nc_flag & NCF_MOUNTPT) {
2031 if (ncp->nc_mount == NULL) {
2032 *error = EBADF; /* forced unmount? */
02680f1b 2033 return(NULL);
984263bc 2034 }
524c845c 2035 ncp = ncp->nc_parent;
984263bc
MD
2036 continue;
2037 }
984263bc
MD
2038 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
2039 if (bp == buf) {
2040 numcwdfail4++;
02680f1b
MD
2041 *error = ENOMEM;
2042 return(NULL);
984263bc
MD
2043 }
2044 *--bp = ncp->nc_name[i];
2045 }
2046 if (bp == buf) {
2047 numcwdfail4++;
02680f1b
MD
2048 *error = ENOMEM;
2049 return(NULL);
984263bc
MD
2050 }
2051 *--bp = '/';
2052 slash_prefixed = 1;
524c845c
MD
2053 ncp = ncp->nc_parent;
2054 }
2055 if (ncp == NULL) {
2056 numcwdfail2++;
2057 *error = ENOENT;
2058 return(NULL);
984263bc
MD
2059 }
2060 if (!slash_prefixed) {
2061 if (bp == buf) {
2062 numcwdfail4++;
02680f1b
MD
2063 *error = ENOMEM;
2064 return(NULL);
984263bc
MD
2065 }
2066 *--bp = '/';
2067 }
2068 numcwdfound++;
02680f1b
MD
2069 *error = 0;
2070 return (bp);
984263bc
MD
2071}
2072
2073/*
2074 * Thus begins the fullpath magic.
2075 */
2076
2077#undef STATNODE
2078#define STATNODE(name) \
2079 static u_int name; \
2080 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
2081
2082static int disablefullpath;
2083SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
2084 &disablefullpath, 0, "");
2085
2086STATNODE(numfullpathcalls);
2087STATNODE(numfullpathfail1);
2088STATNODE(numfullpathfail2);
2089STATNODE(numfullpathfail3);
2090STATNODE(numfullpathfail4);
2091STATNODE(numfullpathfound);
2092
2093int
b6372d22 2094cache_fullpath(struct proc *p, struct namecache *ncp, char **retbuf, char **freebuf)
8987aad7 2095{
984263bc
MD
2096 char *bp, *buf;
2097 int i, slash_prefixed;
75ffff0d 2098 struct namecache *fd_nrdir;
984263bc 2099
b6372d22 2100 numfullpathcalls--;
b310dfc4 2101
984263bc
MD
2102 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
2103 bp = buf + MAXPATHLEN - 1;
2104 *bp = '\0';
75ffff0d
JS
2105 if (p != NULL)
2106 fd_nrdir = p->p_fd->fd_nrdir;
2107 else
2108 fd_nrdir = NULL;
984263bc 2109 slash_prefixed = 0;
75ffff0d 2110 while (ncp && ncp != fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) {
524c845c
MD
2111 if (ncp->nc_flag & NCF_MOUNTPT) {
2112 if (ncp->nc_mount == NULL) {
984263bc 2113 free(buf, M_TEMP);
524c845c 2114 return(EBADF);
984263bc 2115 }
524c845c 2116 ncp = ncp->nc_parent;
984263bc
MD
2117 continue;
2118 }
984263bc
MD
2119 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
2120 if (bp == buf) {
2121 numfullpathfail4++;
2122 free(buf, M_TEMP);
b6372d22 2123 return(ENOMEM);
984263bc
MD
2124 }
2125 *--bp = ncp->nc_name[i];
2126 }
2127 if (bp == buf) {
2128 numfullpathfail4++;
2129 free(buf, M_TEMP);
b6372d22 2130 return(ENOMEM);
984263bc
MD
2131 }
2132 *--bp = '/';
2133 slash_prefixed = 1;
524c845c
MD
2134 ncp = ncp->nc_parent;
2135 }
2136 if (ncp == NULL) {
2137 numfullpathfail2++;
2138 free(buf, M_TEMP);
b6372d22 2139 return(ENOENT);
984263bc 2140 }
75ffff0d 2141 if (p != NULL && (ncp->nc_flag & NCF_ROOT) && ncp != fd_nrdir) {
872b00c0
JS
2142 bp = buf + MAXPATHLEN - 1;
2143 *bp = '\0';
2144 slash_prefixed = 0;
2145 }
984263bc
MD
2146 if (!slash_prefixed) {
2147 if (bp == buf) {
2148 numfullpathfail4++;
2149 free(buf, M_TEMP);
b6372d22 2150 return(ENOMEM);
984263bc
MD
2151 }
2152 *--bp = '/';
2153 }
2154 numfullpathfound++;
2155 *retbuf = bp;
b310dfc4 2156 *freebuf = buf;
6a506bad
JS
2157
2158 return(0);
984263bc 2159}
8987aad7 2160
b6372d22
JS
2161int
2162vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf)
2163{
b6372d22
JS
2164 struct namecache *ncp;
2165
2166 numfullpathcalls++;
2167 if (disablefullpath)
2168 return (ENODEV);
2169
2170 if (p == NULL)
2171 return (EINVAL);
2172
2173 /* vn is NULL, client wants us to use p->p_textvp */
2174 if (vn == NULL) {
2175 if ((vn = p->p_textvp) == NULL)
2176 return (EINVAL);
2177 }
2178 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
2179 if (ncp->nc_nlen)
2180 break;
2181 }
2182 if (ncp == NULL)
2183 return (EINVAL);
2184
2185 numfullpathcalls--;
2186 return(cache_fullpath(p, ncp, retbuf, freebuf));
2187}