Modify kern/makesyscall.sh to prefix all kernel system call procedures
[dragonfly.git] / sys / kern / vfs_cache.c
CommitLineData
984263bc 1/*
8c10bfcf
MD
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
984263bc
MD
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
36 *
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $
753fd850 70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.72 2006/06/05 07:26:10 dillon Exp $
984263bc
MD
71 */
72
73#include <sys/param.h>
74#include <sys/systm.h>
75#include <sys/kernel.h>
76#include <sys/sysctl.h>
77#include <sys/mount.h>
78#include <sys/vnode.h>
984263bc
MD
79#include <sys/malloc.h>
80#include <sys/sysproto.h>
81#include <sys/proc.h>
dadab5e9 82#include <sys/namei.h>
690a3127 83#include <sys/nlookup.h>
984263bc
MD
84#include <sys/filedesc.h>
85#include <sys/fnv_hash.h>
24e51f36 86#include <sys/globaldata.h>
63f58b90 87#include <sys/kern_syscall.h>
fad57d0e 88#include <sys/dirent.h>
8c361dda 89#include <ddb/ddb.h>
984263bc
MD
90
91/*
7ea21ed1 92 * Random lookups in the cache are accomplished with a hash table using
8987aad7 93 * a hash key of (nc_src_vp, name).
984263bc 94 *
7ea21ed1 95 * Negative entries may exist and correspond to structures where nc_vp
8987aad7
MD
96 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry
97 * corresponds to a whited-out directory entry (verses simply not finding the
98 * entry at all).
984263bc 99 *
8987aad7
MD
100 * Upon reaching the last segment of a path, if the reference is for DELETE,
101 * or NOCACHE is set (rewrite), and the name is located in the cache, it
102 * will be dropped.
984263bc
MD
103 */
104
105/*
106 * Structures associated with name cacheing.
107 */
8987aad7 108#define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
f517a1bb 109#define MINNEG 1024
8987aad7 110
24e51f36
HP
111MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
112
984263bc 113static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */
7ea21ed1 114static struct namecache_list ncneglist; /* instead of vnode */
8987aad7 115
fad57d0e
MD
116/*
117 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
118 * to create the namecache infrastructure leading to a dangling vnode.
119 *
120 * 0 Only errors are reported
121 * 1 Successes are reported
122 * 2 Successes + the whole directory scan is reported
123 * 3 Force the directory scan code run as if the parent vnode did not
124 * have a namecache record, even if it does have one.
125 */
126static int ncvp_debug;
127SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, "");
128
984263bc
MD
129static u_long nchash; /* size of hash table */
130SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
8987aad7 131
984263bc
MD
132static u_long ncnegfactor = 16; /* ratio of negative entries */
133SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
8987aad7 134
fc21741a
MD
135static int nclockwarn; /* warn on locked entries in ticks */
136SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, "");
137
984263bc
MD
138static u_long numneg; /* number of cache entries allocated */
139SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
8987aad7 140
984263bc
MD
141static u_long numcache; /* number of cache entries allocated */
142SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
8987aad7 143
f517a1bb
MD
144static u_long numunres; /* number of unresolved entries */
145SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, "");
146
984263bc
MD
147SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
148SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
149
646a1cda 150static int cache_resolve_mp(struct namecache *ncp);
8e005a45 151static void cache_rehash(struct namecache *ncp);
646a1cda 152
984263bc
MD
153/*
154 * The new name cache statistics
155 */
156SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
157#define STATNODE(mode, name, var) \
158 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
159STATNODE(CTLFLAG_RD, numneg, &numneg);
160STATNODE(CTLFLAG_RD, numcache, &numcache);
161static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
162static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
163static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
164static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
165static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
166static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
167static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
168static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
169static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
170static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
171
24e51f36
HP
172struct nchstats nchstats[SMP_MAXCPU];
173/*
174 * Export VFS cache effectiveness statistics to user-land.
175 *
176 * The statistics are left for aggregation to user-land so
177 * neat things can be achieved, like observing per-CPU cache
178 * distribution.
179 */
180static int
3736bb9b 181sysctl_nchstats(SYSCTL_HANDLER_ARGS)
24e51f36
HP
182{
183 struct globaldata *gd;
184 int i, error;
185
186 error = 0;
187 for (i = 0; i < ncpus; ++i) {
188 gd = globaldata_find(i);
189 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
190 sizeof(struct nchstats))))
191 break;
192 }
984263bc 193
24e51f36
HP
194 return (error);
195}
196SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
3736bb9b 197 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
984263bc 198
24e51f36 199static void cache_zap(struct namecache *ncp);
984263bc
MD
200
201/*
7ea21ed1
MD
202 * cache_hold() and cache_drop() prevent the premature deletion of a
203 * namecache entry but do not prevent operations (such as zapping) on
204 * that namecache entry.
5b287bba 205 *
36e90efd
MD
206 * This routine may only be called from outside this source module if
207 * nc_refs is already at least 1.
5b287bba 208 *
36e90efd
MD
209 * This is a rare case where callers are allowed to hold a spinlock,
210 * so we can't ourselves.
984263bc 211 */
7ea21ed1
MD
212static __inline
213struct namecache *
bc0c094e 214_cache_hold(struct namecache *ncp)
7ea21ed1 215{
5b287bba 216 atomic_add_int(&ncp->nc_refs, 1);
7ea21ed1
MD
217 return(ncp);
218}
219
8c361dda 220/*
67773eb3
MD
221 * When dropping an entry, if only one ref remains and the entry has not
222 * been resolved, zap it. Since the one reference is being dropped the
223 * entry had better not be locked.
8c361dda 224 */
7ea21ed1
MD
225static __inline
226void
bc0c094e 227_cache_drop(struct namecache *ncp)
7ea21ed1
MD
228{
229 KKASSERT(ncp->nc_refs > 0);
f517a1bb
MD
230 if (ncp->nc_refs == 1 &&
231 (ncp->nc_flag & NCF_UNRESOLVED) &&
232 TAILQ_EMPTY(&ncp->nc_list)
233 ) {
67773eb3
MD
234 KKASSERT(ncp->nc_exlocks == 0);
235 cache_lock(ncp);
7ea21ed1 236 cache_zap(ncp);
f517a1bb 237 } else {
36e90efd 238 atomic_subtract_int(&ncp->nc_refs, 1);
f517a1bb 239 }
7ea21ed1 240}
8987aad7 241
690a3127
MD
242/*
243 * Link a new namecache entry to its parent. Be careful to avoid races
244 * if vhold() blocks in the future.
8c361dda
MD
245 *
246 * If we are creating a child under an oldapi parent we must mark the
247 * child as being an oldapi entry as well.
690a3127
MD
248 */
249static void
250cache_link_parent(struct namecache *ncp, struct namecache *par)
251{
252 KKASSERT(ncp->nc_parent == NULL);
253 ncp->nc_parent = par;
254 if (TAILQ_EMPTY(&par->nc_list)) {
255 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
21739618
MD
256 /*
257 * Any vp associated with an ncp which has children must
55361147 258 * be held to prevent it from being recycled.
21739618 259 */
690a3127
MD
260 if (par->nc_vp)
261 vhold(par->nc_vp);
262 } else {
263 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
264 }
265}
266
267/*
b8997912
MD
268 * Remove the parent association from a namecache structure. If this is
269 * the last child of the parent the cache_drop(par) will attempt to
270 * recursively zap the parent.
690a3127
MD
271 */
272static void
273cache_unlink_parent(struct namecache *ncp)
274{
275 struct namecache *par;
276
277 if ((par = ncp->nc_parent) != NULL) {
278 ncp->nc_parent = NULL;
279 par = cache_hold(par);
280 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
281 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
282 vdrop(par->nc_vp);
283 cache_drop(par);
284 }
285}
286
287/*
fad57d0e
MD
288 * Allocate a new namecache structure. Most of the code does not require
289 * zero-termination of the string but it makes vop_compat_ncreate() easier.
690a3127
MD
290 */
291static struct namecache *
524c845c 292cache_alloc(int nlen)
690a3127
MD
293{
294 struct namecache *ncp;
295
296 ncp = malloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
524c845c 297 if (nlen)
fad57d0e 298 ncp->nc_name = malloc(nlen + 1, M_VFSCACHE, M_WAITOK);
524c845c 299 ncp->nc_nlen = nlen;
690a3127
MD
300 ncp->nc_flag = NCF_UNRESOLVED;
301 ncp->nc_error = ENOTCONN; /* needs to be resolved */
8c361dda 302 ncp->nc_refs = 1;
e4bff3c8
MD
303
304 /*
305 * Construct a fake FSMID based on the time of day and a 32 bit
306 * roller for uniqueness. This is used to generate a useful
307 * FSMID for filesystems which do not support it.
308 */
6b008938 309 ncp->nc_fsmid = cache_getnewfsmid();
690a3127 310 TAILQ_INIT(&ncp->nc_list);
8c361dda 311 cache_lock(ncp);
690a3127
MD
312 return(ncp);
313}
314
8c361dda
MD
315static void
316cache_free(struct namecache *ncp)
317{
318 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1);
319 if (ncp->nc_name)
320 free(ncp->nc_name, M_VFSCACHE);
321 free(ncp, M_VFSCACHE);
322}
690a3127
MD
323
324/*
325 * Ref and deref a namecache structure.
5b287bba
MD
326 *
327 * Warning: caller may hold an unrelated read spinlock, which means we can't
328 * use read spinlocks here.
690a3127 329 */
bc0c094e
MD
330struct namecache *
331cache_hold(struct namecache *ncp)
332{
333 return(_cache_hold(ncp));
334}
335
336void
337cache_drop(struct namecache *ncp)
338{
339 _cache_drop(ncp);
340}
341
14c92d03
MD
342/*
343 * Namespace locking. The caller must already hold a reference to the
21739618
MD
344 * namecache structure in order to lock/unlock it. This function prevents
345 * the namespace from being created or destroyed by accessors other then
346 * the lock holder.
14c92d03 347 *
55361147
MD
348 * Note that holding a locked namecache structure prevents other threads
349 * from making namespace changes (e.g. deleting or creating), prevents
350 * vnode association state changes by other threads, and prevents the
351 * namecache entry from being resolved or unresolved by other threads.
352 *
353 * The lock owner has full authority to associate/disassociate vnodes
354 * and resolve/unresolve the locked ncp.
355 *
9b1b3591
MD
356 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
357 * or recycled, but it does NOT help you if the vnode had already initiated
358 * a recyclement. If this is important, use cache_get() rather then
359 * cache_lock() (and deal with the differences in the way the refs counter
360 * is handled). Or, alternatively, make an unconditional call to
361 * cache_validate() or cache_resolve() after cache_lock() returns.
14c92d03
MD
362 */
363void
364cache_lock(struct namecache *ncp)
365{
55361147
MD
366 thread_t td;
367 int didwarn;
14c92d03
MD
368
369 KKASSERT(ncp->nc_refs != 0);
55361147
MD
370 didwarn = 0;
371 td = curthread;
372
14c92d03
MD
373 for (;;) {
374 if (ncp->nc_exlocks == 0) {
375 ncp->nc_exlocks = 1;
376 ncp->nc_locktd = td;
55361147
MD
377 /*
378 * The vp associated with a locked ncp must be held
379 * to prevent it from being recycled (which would
380 * cause the ncp to become unresolved).
381 *
9b1b3591
MD
382 * WARNING! If VRECLAIMED is set the vnode could
383 * already be in the middle of a recycle. Callers
384 * should not assume that nc_vp is usable when
385 * not NULL. cache_vref() or cache_vget() must be
386 * called.
387 *
55361147
MD
388 * XXX loop on race for later MPSAFE work.
389 */
390 if (ncp->nc_vp)
391 vhold(ncp->nc_vp);
14c92d03
MD
392 break;
393 }
394 if (ncp->nc_locktd == td) {
395 ++ncp->nc_exlocks;
396 break;
397 }
398 ncp->nc_flag |= NCF_LOCKREQ;
fc21741a 399 if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) {
5fd012e0
MD
400 if (didwarn)
401 continue;
402 didwarn = 1;
403 printf("[diagnostic] cache_lock: blocked on %p", ncp);
404 if ((ncp->nc_flag & NCF_MOUNTPT) && ncp->nc_mount)
75ffff0d 405 printf(" [MOUNTFROM %s]\n", ncp->nc_mount->mnt_stat.f_mntfromname);
5fd012e0
MD
406 else
407 printf(" \"%*.*s\"\n",
408 ncp->nc_nlen, ncp->nc_nlen,
409 ncp->nc_name);
14c92d03
MD
410 }
411 }
55361147 412
14c92d03 413 if (didwarn == 1) {
21739618 414 printf("[diagnostic] cache_lock: unblocked %*.*s\n",
14c92d03
MD
415 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
416 }
417}
418
fad57d0e
MD
419int
420cache_lock_nonblock(struct namecache *ncp)
421{
422 thread_t td;
423
424 KKASSERT(ncp->nc_refs != 0);
425 td = curthread;
426 if (ncp->nc_exlocks == 0) {
427 ncp->nc_exlocks = 1;
428 ncp->nc_locktd = td;
429 /*
430 * The vp associated with a locked ncp must be held
431 * to prevent it from being recycled (which would
432 * cause the ncp to become unresolved).
433 *
9b1b3591
MD
434 * WARNING! If VRECLAIMED is set the vnode could
435 * already be in the middle of a recycle. Callers
436 * should not assume that nc_vp is usable when
437 * not NULL. cache_vref() or cache_vget() must be
438 * called.
439 *
fad57d0e
MD
440 * XXX loop on race for later MPSAFE work.
441 */
442 if (ncp->nc_vp)
443 vhold(ncp->nc_vp);
444 return(0);
445 } else {
446 return(EWOULDBLOCK);
447 }
448}
449
14c92d03
MD
450void
451cache_unlock(struct namecache *ncp)
452{
453 thread_t td = curthread;
454
455 KKASSERT(ncp->nc_refs > 0);
456 KKASSERT(ncp->nc_exlocks > 0);
457 KKASSERT(ncp->nc_locktd == td);
458 if (--ncp->nc_exlocks == 0) {
55361147
MD
459 if (ncp->nc_vp)
460 vdrop(ncp->nc_vp);
14c92d03
MD
461 ncp->nc_locktd = NULL;
462 if (ncp->nc_flag & NCF_LOCKREQ) {
463 ncp->nc_flag &= ~NCF_LOCKREQ;
fc21741a 464 wakeup(ncp);
14c92d03
MD
465 }
466 }
467}
468
469/*
690a3127 470 * ref-and-lock, unlock-and-deref functions.
9b1b3591
MD
471 *
472 * This function is primarily used by nlookup. Even though cache_lock
473 * holds the vnode, it is possible that the vnode may have already
474 * initiated a recyclement. We want cache_get() to return a definitively
475 * usable vnode or a definitively unresolved ncp.
14c92d03 476 */
21739618 477struct namecache *
690a3127
MD
478cache_get(struct namecache *ncp)
479{
480 _cache_hold(ncp);
481 cache_lock(ncp);
9b1b3591
MD
482 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
483 cache_setunresolved(ncp);
21739618 484 return(ncp);
690a3127
MD
485}
486
8e005a45
MD
487int
488cache_get_nonblock(struct namecache *ncp)
489{
490 /* XXX MP */
491 if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) {
492 _cache_hold(ncp);
493 cache_lock(ncp);
9b1b3591
MD
494 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
495 cache_setunresolved(ncp);
8e005a45
MD
496 return(0);
497 }
498 return(EWOULDBLOCK);
499}
500
14c92d03
MD
501void
502cache_put(struct namecache *ncp)
503{
504 cache_unlock(ncp);
505 _cache_drop(ncp);
506}
507
690a3127
MD
508/*
509 * Resolve an unresolved ncp by associating a vnode with it. If the
510 * vnode is NULL, a negative cache entry is created.
511 *
512 * The ncp should be locked on entry and will remain locked on return.
513 */
514void
515cache_setvp(struct namecache *ncp, struct vnode *vp)
ce6da7e4 516{
690a3127 517 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
ce6da7e4
MD
518 ncp->nc_vp = vp;
519 if (vp != NULL) {
21739618
MD
520 /*
521 * Any vp associated with an ncp which has children must
55361147 522 * be held. Any vp associated with a locked ncp must be held.
21739618
MD
523 */
524 if (!TAILQ_EMPTY(&ncp->nc_list))
525 vhold(vp);
ce6da7e4 526 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
55361147
MD
527 if (ncp->nc_exlocks)
528 vhold(vp);
21739618
MD
529
530 /*
531 * Set auxillary flags
532 */
690a3127
MD
533 switch(vp->v_type) {
534 case VDIR:
21739618
MD
535 ncp->nc_flag |= NCF_ISDIR;
536 break;
690a3127 537 case VLNK:
21739618
MD
538 ncp->nc_flag |= NCF_ISSYMLINK;
539 /* XXX cache the contents of the symlink */
540 break;
690a3127 541 default:
21739618 542 break;
690a3127 543 }
ce6da7e4 544 ++numcache;
21739618 545 ncp->nc_error = 0;
ce6da7e4 546 } else {
1345c2b6 547 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
ce6da7e4 548 ++numneg;
21739618 549 ncp->nc_error = ENOENT;
ce6da7e4 550 }
690a3127 551 ncp->nc_flag &= ~NCF_UNRESOLVED;
ce6da7e4
MD
552}
553
fad57d0e
MD
554void
555cache_settimeout(struct namecache *ncp, int nticks)
556{
557 if ((ncp->nc_timeout = ticks + nticks) == 0)
558 ncp->nc_timeout = 1;
559}
560
690a3127
MD
561/*
562 * Disassociate the vnode or negative-cache association and mark a
563 * namecache entry as unresolved again. Note that the ncp is still
564 * left in the hash table and still linked to its parent.
565 *
67773eb3
MD
566 * The ncp should be locked and refd on entry and will remain locked and refd
567 * on return.
8c361dda
MD
568 *
569 * This routine is normally never called on a directory containing children.
570 * However, NFS often does just that in its rename() code as a cop-out to
571 * avoid complex namespace operations. This disconnects a directory vnode
572 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
573 * sync.
f2e3ccf2
MD
574 *
575 * NOTE: NCF_FSMID must be cleared so a refurbishment of the ncp, such as
576 * in a create, properly propogates flag up the chain.
690a3127
MD
577 */
578void
579cache_setunresolved(struct namecache *ncp)
14c92d03 580{
690a3127 581 struct vnode *vp;
14c92d03 582
690a3127
MD
583 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
584 ncp->nc_flag |= NCF_UNRESOLVED;
fad57d0e 585 ncp->nc_timeout = 0;
690a3127
MD
586 ncp->nc_error = ENOTCONN;
587 ++numunres;
588 if ((vp = ncp->nc_vp) != NULL) {
589 --numcache;
fad57d0e 590 ncp->nc_vp = NULL;
690a3127 591 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
55361147
MD
592
593 /*
594 * Any vp associated with an ncp with children is
595 * held by that ncp. Any vp associated with a locked
596 * ncp is held by that ncp. These conditions must be
597 * undone when the vp is cleared out from the ncp.
598 */
6b008938
MD
599 if (ncp->nc_flag & NCF_FSMID)
600 vupdatefsmid(vp);
690a3127
MD
601 if (!TAILQ_EMPTY(&ncp->nc_list))
602 vdrop(vp);
55361147
MD
603 if (ncp->nc_exlocks)
604 vdrop(vp);
690a3127
MD
605 } else {
606 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
607 --numneg;
608 }
6b008938
MD
609 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK|
610 NCF_FSMID);
8e005a45
MD
611 }
612}
8c361dda 613
8e005a45 614/*
e09206ba
MD
615 * Invalidate portions of the namecache topology given a starting entry.
616 * The passed ncp is set to an unresolved state and:
8e005a45 617 *
e09206ba
MD
618 * The passed ncp must be locked.
619 *
620 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
621 * that the physical underlying nodes have been
622 * destroyed... as in deleted. For example, when
623 * a directory is removed. This will cause record
624 * lookups on the name to no longer be able to find
625 * the record and tells the resolver to return failure
626 * rather then trying to resolve through the parent.
627 *
628 * The topology itself, including ncp->nc_name,
629 * remains intact.
630 *
631 * This only applies to the passed ncp, if CINV_CHILDREN
632 * is specified the children are not flagged.
633 *
634 * CINV_CHILDREN - Set all children (recursively) to an unresolved
635 * state as well.
636 *
637 * Note that this will also have the side effect of
638 * cleaning out any unreferenced nodes in the topology
639 * from the leaves up as the recursion backs out.
640 *
641 * Note that the topology for any referenced nodes remains intact.
25cb3304
MD
642 *
643 * It is possible for cache_inval() to race a cache_resolve(), meaning that
644 * the namecache entry may not actually be invalidated on return if it was
645 * revalidated while recursing down into its children. This code guarentees
646 * that the node(s) will go through an invalidation cycle, but does not
647 * guarentee that they will remain in an invalidated state.
648 *
649 * Returns non-zero if a revalidation was detected during the invalidation
650 * recursion, zero otherwise. Note that since only the original ncp is
651 * locked the revalidation ultimately can only indicate that the original ncp
652 * *MIGHT* no have been reresolved.
8e005a45 653 */
25cb3304 654int
8e005a45
MD
655cache_inval(struct namecache *ncp, int flags)
656{
657 struct namecache *kid;
b8997912 658 struct namecache *nextkid;
25cb3304 659 int rcnt = 0;
8e005a45 660
e09206ba 661 KKASSERT(ncp->nc_exlocks);
25cb3304 662
e09206ba
MD
663 cache_setunresolved(ncp);
664 if (flags & CINV_DESTROY)
665 ncp->nc_flag |= NCF_DESTROYED;
b8997912 666
e09206ba
MD
667 if ((flags & CINV_CHILDREN) &&
668 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL
669 ) {
670 cache_hold(kid);
671 cache_unlock(ncp);
b8997912
MD
672 while (kid) {
673 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
674 cache_hold(nextkid);
e09206ba
MD
675 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
676 TAILQ_FIRST(&kid->nc_list)
b8997912 677 ) {
e09206ba 678 cache_lock(kid);
25cb3304 679 rcnt += cache_inval(kid, flags & ~CINV_DESTROY);
e09206ba 680 cache_unlock(kid);
b8997912 681 }
b8997912 682 cache_drop(kid);
fad57d0e 683 kid = nextkid;
8e005a45 684 }
e09206ba 685 cache_lock(ncp);
8e005a45 686 }
25cb3304
MD
687
688 /*
689 * Someone could have gotten in there while ncp was unlocked,
690 * retry if so.
691 */
692 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
693 ++rcnt;
694 return (rcnt);
8e005a45
MD
695}
696
e09206ba 697/*
25cb3304
MD
698 * Invalidate a vnode's namecache associations. To avoid races against
699 * the resolver we do not invalidate a node which we previously invalidated
700 * but which was then re-resolved while we were in the invalidation loop.
701 *
702 * Returns non-zero if any namecache entries remain after the invalidation
703 * loop completed.
2aefb2c5
MD
704 *
705 * NOTE: unlike the namecache topology which guarentees that ncp's will not
706 * be ripped out of the topology while held, the vnode's v_namecache list
707 * has no such restriction. NCP's can be ripped out of the list at virtually
708 * any time if not locked, even if held.
e09206ba 709 */
25cb3304 710int
6b008938 711cache_inval_vp(struct vnode *vp, int flags)
8e005a45
MD
712{
713 struct namecache *ncp;
25cb3304
MD
714 struct namecache *next;
715
2aefb2c5 716restart:
25cb3304
MD
717 ncp = TAILQ_FIRST(&vp->v_namecache);
718 if (ncp)
719 cache_hold(ncp);
720 while (ncp) {
721 /* loop entered with ncp held */
2aefb2c5 722 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
25cb3304
MD
723 cache_hold(next);
724 cache_lock(ncp);
2aefb2c5
MD
725 if (ncp->nc_vp != vp) {
726 printf("Warning: cache_inval_vp: race-A detected on "
727 "%s\n", ncp->nc_name);
728 cache_put(ncp);
69313361
MD
729 if (next)
730 cache_drop(next);
2aefb2c5
MD
731 goto restart;
732 }
e09206ba 733 cache_inval(ncp, flags);
25cb3304
MD
734 cache_put(ncp); /* also releases reference */
735 ncp = next;
2aefb2c5
MD
736 if (ncp && ncp->nc_vp != vp) {
737 printf("Warning: cache_inval_vp: race-B detected on "
738 "%s\n", ncp->nc_name);
739 cache_drop(ncp);
740 goto restart;
741 }
690a3127 742 }
25cb3304 743 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
14c92d03 744}
14c92d03 745
fad57d0e
MD
746/*
747 * The source ncp has been renamed to the target ncp. Both fncp and tncp
748 * must be locked. Both will be set to unresolved, any children of tncp
749 * will be disconnected (the prior contents of the target is assumed to be
750 * destroyed by the rename operation, e.g. renaming over an empty directory),
751 * and all children of fncp will be moved to tncp.
752 *
e09206ba
MD
753 * XXX the disconnection could pose a problem, check code paths to make
754 * sure any code that blocks can handle the parent being changed out from
755 * under it. Maybe we should lock the children (watch out for deadlocks) ?
756 *
fad57d0e
MD
757 * After we return the caller has the option of calling cache_setvp() if
758 * the vnode of the new target ncp is known.
759 *
760 * Any process CD'd into any of the children will no longer be able to ".."
761 * back out. An rm -rf can cause this situation to occur.
762 */
763void
764cache_rename(struct namecache *fncp, struct namecache *tncp)
765{
766 struct namecache *scan;
25cb3304 767 int didwarn = 0;
fad57d0e
MD
768
769 cache_setunresolved(fncp);
770 cache_setunresolved(tncp);
25cb3304
MD
771 while (cache_inval(tncp, CINV_CHILDREN) != 0) {
772 if (didwarn++ % 10 == 0) {
773 printf("Warning: cache_rename: race during "
774 "rename %s->%s\n",
775 fncp->nc_name, tncp->nc_name);
776 }
777 tsleep(tncp, 0, "mvrace", hz / 10);
778 cache_setunresolved(tncp);
779 }
fad57d0e
MD
780 while ((scan = TAILQ_FIRST(&fncp->nc_list)) != NULL) {
781 cache_hold(scan);
782 cache_unlink_parent(scan);
783 cache_link_parent(scan, tncp);
784 if (scan->nc_flag & NCF_HASHED)
785 cache_rehash(scan);
786 cache_drop(scan);
787 }
788}
789
21739618
MD
790/*
791 * vget the vnode associated with the namecache entry. Resolve the namecache
792 * entry if necessary and deal with namecache/vp races. The passed ncp must
793 * be referenced and may be locked. The ncp's ref/locking state is not
794 * effected by this call.
795 *
796 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
797 * (depending on the passed lk_type) will be returned in *vpp with an error
798 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
799 * most typical error is ENOENT, meaning that the ncp represents a negative
800 * cache hit and there is no vnode to retrieve, but other errors can occur
801 * too.
802 *
803 * The main race we have to deal with are namecache zaps. The ncp itself
804 * will not disappear since it is referenced, and it turns out that the
805 * validity of the vp pointer can be checked simply by rechecking the
806 * contents of ncp->nc_vp.
807 */
808int
809cache_vget(struct namecache *ncp, struct ucred *cred,
810 int lk_type, struct vnode **vpp)
811{
812 struct vnode *vp;
813 int error;
814
815again:
816 vp = NULL;
817 if (ncp->nc_flag & NCF_UNRESOLVED) {
818 cache_lock(ncp);
819 error = cache_resolve(ncp, cred);
820 cache_unlock(ncp);
821 } else {
822 error = 0;
823 }
824 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
9b1b3591
MD
825 /*
826 * Accessing the vnode from the namecache is a bit
827 * dangerous. Because there are no refs on the vnode, it
828 * could be in the middle of a reclaim.
829 */
830 if (vp->v_flag & VRECLAIMED) {
831 printf("Warning: vnode reclaim race detected in cache_vget on %p (%s)\n", vp, ncp->nc_name);
832 cache_lock(ncp);
833 cache_setunresolved(ncp);
834 cache_unlock(ncp);
835 goto again;
836 }
87de5057 837 error = vget(vp, lk_type);
21739618 838 if (error) {
9b1b3591 839 if (vp != ncp->nc_vp)
21739618
MD
840 goto again;
841 vp = NULL;
9b1b3591 842 } else if (vp != ncp->nc_vp) {
21739618
MD
843 vput(vp);
844 goto again;
9b1b3591
MD
845 } else if (vp->v_flag & VRECLAIMED) {
846 panic("vget succeeded on a VRECLAIMED node! vp %p", vp);
21739618
MD
847 }
848 }
849 if (error == 0 && vp == NULL)
850 error = ENOENT;
851 *vpp = vp;
852 return(error);
853}
854
855int
856cache_vref(struct namecache *ncp, struct ucred *cred, struct vnode **vpp)
857{
858 struct vnode *vp;
859 int error;
860
861again:
862 vp = NULL;
863 if (ncp->nc_flag & NCF_UNRESOLVED) {
864 cache_lock(ncp);
865 error = cache_resolve(ncp, cred);
866 cache_unlock(ncp);
867 } else {
868 error = 0;
869 }
870 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
9b1b3591
MD
871 /*
872 * Since we did not obtain any locks, a cache zap
873 * race can occur here if the vnode is in the middle
874 * of being reclaimed and has not yet been able to
875 * clean out its cache node. If that case occurs,
876 * we must lock and unresolve the cache, then loop
877 * to retry.
878 */
879 if (vp->v_flag & VRECLAIMED) {
880 printf("Warning: vnode reclaim race detected on cache_vref %p (%s)\n", vp, ncp->nc_name);
881 cache_lock(ncp);
882 cache_setunresolved(ncp);
883 cache_unlock(ncp);
21739618
MD
884 goto again;
885 }
9b1b3591 886 vref(vp);
21739618
MD
887 }
888 if (error == 0 && vp == NULL)
889 error = ENOENT;
890 *vpp = vp;
891 return(error);
892}
893
dc1be39c
MD
894/*
895 * Recursively set the FSMID update flag for namecache nodes leading
f2e3ccf2
MD
896 * to root. This will cause the next getattr or reclaim to increment the
897 * fsmid and mark the inode for lazy updating.
898 *
899 * Stop recursing when we hit a node whos NCF_FSMID flag is already set.
900 * This makes FSMIDs work in an Einsteinian fashion - where the observation
901 * effects the result. In this case a program monitoring a higher level
902 * node will have detected some prior change and started its scan (clearing
903 * NCF_FSMID in higher level nodes), but since it has not yet observed the
904 * node where we find NCF_FSMID still set, we can safely make the related
905 * modification without interfering with the theorized program.
906 *
907 * This also means that FSMIDs cannot represent time-domain quantities
908 * in a hierarchical sense. But the main reason for doing it this way
909 * is to reduce the amount of recursion that occurs in the critical path
910 * when e.g. a program is writing to a file that sits deep in a directory
911 * hierarchy.
dc1be39c 912 */
7d15906a
MD
913void
914cache_update_fsmid(struct namecache *ncp)
915{
916 struct vnode *vp;
917 struct namecache *scan;
7d15906a 918
9b1b3591
MD
919 /*
920 * Warning: even if we get a non-NULL vp it could still be in the
921 * middle of a recyclement. Don't do anything fancy, just set
922 * NCF_FSMID.
923 */
7d15906a
MD
924 if ((vp = ncp->nc_vp) != NULL) {
925 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
f2e3ccf2
MD
926 for (scan = ncp; scan; scan = scan->nc_parent) {
927 if (scan->nc_flag & NCF_FSMID)
928 break;
dc1be39c 929 scan->nc_flag |= NCF_FSMID;
f2e3ccf2 930 }
7d15906a
MD
931 }
932 } else {
f2e3ccf2 933 while (ncp && (ncp->nc_flag & NCF_FSMID) == 0) {
dc1be39c 934 ncp->nc_flag |= NCF_FSMID;
7d15906a
MD
935 ncp = ncp->nc_parent;
936 }
937 }
938}
939
940void
941cache_update_fsmid_vp(struct vnode *vp)
942{
943 struct namecache *ncp;
944 struct namecache *scan;
7d15906a
MD
945
946 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
f2e3ccf2
MD
947 for (scan = ncp; scan; scan = scan->nc_parent) {
948 if (scan->nc_flag & NCF_FSMID)
949 break;
dc1be39c 950 scan->nc_flag |= NCF_FSMID;
f2e3ccf2 951 }
7d15906a
MD
952 }
953}
954
dc1be39c
MD
955/*
956 * If getattr is called on a vnode (e.g. a stat call), the filesystem
957 * may call this routine to determine if the namecache has the hierarchical
958 * change flag set, requiring the fsmid to be updated.
959 *
960 * Since 0 indicates no support, make sure the filesystem fsmid is at least
961 * 1.
962 */
963int
964cache_check_fsmid_vp(struct vnode *vp, int64_t *fsmid)
965{
966 struct namecache *ncp;
967 int changed = 0;
968
969 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
970 if (ncp->nc_flag & NCF_FSMID) {
971 ncp->nc_flag &= ~NCF_FSMID;
972 changed = 1;
973 }
974 }
975 if (*fsmid == 0)
976 ++*fsmid;
977 if (changed)
978 ++*fsmid;
979 return(changed);
980}
981
021f7340
MD
982/*
983 * Obtain the FSMID for a vnode for filesystems which do not support
984 * a built-in FSMID.
985 */
986int64_t
987cache_sync_fsmid_vp(struct vnode *vp)
988{
989 struct namecache *ncp;
990
991 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) {
992 if (ncp->nc_flag & NCF_FSMID) {
993 ncp->nc_flag &= ~NCF_FSMID;
994 ++ncp->nc_fsmid;
995 }
996 return(ncp->nc_fsmid);
997 }
998 return(VNOVAL);
999}
1000
fad57d0e
MD
1001/*
1002 * Convert a directory vnode to a namecache record without any other
1003 * knowledge of the topology. This ONLY works with directory vnodes and
1004 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
1005 * returned ncp (if not NULL) will be held and unlocked.
1006 *
1007 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
1008 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
1009 * for dvp. This will fail only if the directory has been deleted out from
1010 * under the caller.
1011 *
1012 * Callers must always check for a NULL return no matter the value of 'makeit'.
a0d57516
MD
1013 *
1014 * To avoid underflowing the kernel stack each recursive call increments
1015 * the makeit variable.
fad57d0e
MD
1016 */
1017
1018static int cache_inefficient_scan(struct namecache *ncp, struct ucred *cred,
1019 struct vnode *dvp);
a0d57516 1020static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
cc4c3b52 1021 struct vnode **saved_dvp);
fad57d0e
MD
1022
1023struct namecache *
1024cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit)
1025{
1026 struct namecache *ncp;
cc4c3b52 1027 struct vnode *saved_dvp;
fad57d0e
MD
1028 struct vnode *pvp;
1029 int error;
1030
a0d57516 1031 ncp = NULL;
cc4c3b52 1032 saved_dvp = NULL;
a0d57516 1033
fad57d0e
MD
1034 /*
1035 * Temporary debugging code to force the directory scanning code
1036 * to be exercised.
1037 */
fad57d0e
MD
1038 if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) {
1039 ncp = TAILQ_FIRST(&dvp->v_namecache);
1040 printf("cache_fromdvp: forcing %s\n", ncp->nc_name);
1041 goto force;
1042 }
1043
1044 /*
1045 * Loop until resolution, inside code will break out on error.
1046 */
1047 while ((ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) {
1048force:
1049 /*
1050 * If dvp is the root of its filesystem it should already
1051 * have a namecache pointer associated with it as a side
1052 * effect of the mount, but it may have been disassociated.
1053 */
1054 if (dvp->v_flag & VROOT) {
1055 ncp = cache_get(dvp->v_mount->mnt_ncp);
1056 error = cache_resolve_mp(ncp);
1057 cache_put(ncp);
1058 if (ncvp_debug) {
1059 printf("cache_fromdvp: resolve root of mount %p error %d",
1060 dvp->v_mount, error);
1061 }
1062 if (error) {
1063 if (ncvp_debug)
1064 printf(" failed\n");
1065 ncp = NULL;
1066 break;
1067 }
1068 if (ncvp_debug)
1069 printf(" succeeded\n");
1070 continue;
1071 }
1072
a0d57516
MD
1073 /*
1074 * If we are recursed too deeply resort to an O(n^2)
1075 * algorithm to resolve the namecache topology. The
cc4c3b52 1076 * resolved pvp is left referenced in saved_dvp to
a0d57516
MD
1077 * prevent the tree from being destroyed while we loop.
1078 */
1079 if (makeit > 20) {
cc4c3b52 1080 error = cache_fromdvp_try(dvp, cred, &saved_dvp);
a0d57516
MD
1081 if (error) {
1082 printf("lookupdotdot(longpath) failed %d "
1083 "dvp %p\n", error, dvp);
1084 break;
1085 }
1086 continue;
1087 }
1088
fad57d0e
MD
1089 /*
1090 * Get the parent directory and resolve its ncp.
1091 */
6ddb7618 1092 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred);
fad57d0e 1093 if (error) {
a0d57516 1094 printf("lookupdotdot failed %d dvp %p\n", error, dvp);
fad57d0e
MD
1095 break;
1096 }
ca466bae 1097 VOP_UNLOCK(pvp, 0);
fad57d0e
MD
1098
1099 /*
a0d57516 1100 * Reuse makeit as a recursion depth counter.
fad57d0e 1101 */
a0d57516 1102 ncp = cache_fromdvp(pvp, cred, makeit + 1);
fad57d0e
MD
1103 vrele(pvp);
1104 if (ncp == NULL)
1105 break;
1106
1107 /*
1108 * Do an inefficient scan of pvp (embodied by ncp) to look
1109 * for dvp. This will create a namecache record for dvp on
1110 * success. We loop up to recheck on success.
1111 *
1112 * ncp and dvp are both held but not locked.
1113 */
1114 error = cache_inefficient_scan(ncp, cred, dvp);
1115 cache_drop(ncp);
1116 if (error) {
1117 printf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
1118 pvp, ncp->nc_name, dvp);
1119 ncp = NULL;
1120 break;
1121 }
1122 if (ncvp_debug) {
1123 printf("cache_fromdvp: scan %p (%s) succeeded\n",
1124 pvp, ncp->nc_name);
1125 }
1126 }
1127 if (ncp)
1128 cache_hold(ncp);
cc4c3b52
MD
1129 if (saved_dvp)
1130 vrele(saved_dvp);
fad57d0e
MD
1131 return (ncp);
1132}
1133
a0d57516
MD
1134/*
1135 * Go up the chain of parent directories until we find something
1136 * we can resolve into the namecache. This is very inefficient.
1137 */
1138static
1139int
1140cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
cc4c3b52 1141 struct vnode **saved_dvp)
a0d57516
MD
1142{
1143 struct namecache *ncp;
1144 struct vnode *pvp;
1145 int error;
1146 static time_t last_fromdvp_report;
1147
1148 /*
1149 * Loop getting the parent directory vnode until we get something we
1150 * can resolve in the namecache.
1151 */
1152 vref(dvp);
1153 for (;;) {
1154 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred);
1155 if (error) {
1156 vrele(dvp);
1157 return (error);
1158 }
ca466bae 1159 VOP_UNLOCK(pvp, 0);
a0d57516
MD
1160 if ((ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
1161 cache_hold(ncp);
1162 vrele(pvp);
1163 break;
1164 }
1165 if (pvp->v_flag & VROOT) {
1166 ncp = cache_get(pvp->v_mount->mnt_ncp);
1167 error = cache_resolve_mp(ncp);
1168 cache_unlock(ncp);
1169 vrele(pvp);
1170 if (error) {
1171 cache_drop(ncp);
1172 vrele(dvp);
1173 return (error);
1174 }
1175 break;
1176 }
1177 vrele(dvp);
1178 dvp = pvp;
1179 }
1180 if (last_fromdvp_report != time_second) {
1181 last_fromdvp_report = time_second;
1182 printf("Warning: extremely inefficient path resolution on %s\n",
1183 ncp->nc_name);
1184 }
1185 error = cache_inefficient_scan(ncp, cred, dvp);
cc4c3b52
MD
1186
1187 /*
1188 * Hopefully dvp now has a namecache record associated with it.
1189 * Leave it referenced to prevent the kernel from recycling the
1190 * vnode. Otherwise extremely long directory paths could result
1191 * in endless recycling.
1192 */
1193 if (*saved_dvp)
1194 vrele(*saved_dvp);
1195 *saved_dvp = dvp;
a0d57516
MD
1196 return (error);
1197}
1198
1199
fad57d0e
MD
1200/*
1201 * Do an inefficient scan of the directory represented by ncp looking for
1202 * the directory vnode dvp. ncp must be held but not locked on entry and
1203 * will be held on return. dvp must be refd but not locked on entry and
1204 * will remain refd on return.
1205 *
1206 * Why do this at all? Well, due to its stateless nature the NFS server
1207 * converts file handles directly to vnodes without necessarily going through
1208 * the namecache ops that would otherwise create the namecache topology
1209 * leading to the vnode. We could either (1) Change the namecache algorithms
1210 * to allow disconnect namecache records that are re-merged opportunistically,
1211 * or (2) Make the NFS server backtrack and scan to recover a connected
1212 * namecache topology in order to then be able to issue new API lookups.
1213 *
1214 * It turns out that (1) is a huge mess. It takes a nice clean set of
1215 * namecache algorithms and introduces a lot of complication in every subsystem
1216 * that calls into the namecache to deal with the re-merge case, especially
1217 * since we are using the namecache to placehold negative lookups and the
1218 * vnode might not be immediately assigned. (2) is certainly far less
1219 * efficient then (1), but since we are only talking about directories here
1220 * (which are likely to remain cached), the case does not actually run all
1221 * that often and has the supreme advantage of not polluting the namecache
1222 * algorithms.
1223 */
1224static int
1225cache_inefficient_scan(struct namecache *ncp, struct ucred *cred,
1226 struct vnode *dvp)
1227{
1228 struct nlcomponent nlc;
1229 struct namecache *rncp;
1230 struct dirent *den;
1231 struct vnode *pvp;
1232 struct vattr vat;
1233 struct iovec iov;
1234 struct uio uio;
fad57d0e
MD
1235 int blksize;
1236 int eofflag;
4d22f42a 1237 int bytes;
fad57d0e
MD
1238 char *rbuf;
1239 int error;
fad57d0e
MD
1240
1241 vat.va_blocksize = 0;
87de5057 1242 if ((error = VOP_GETATTR(dvp, &vat)) != 0)
fad57d0e
MD
1243 return (error);
1244 if ((error = cache_vget(ncp, cred, LK_SHARED, &pvp)) != 0)
1245 return (error);
1246 if (ncvp_debug)
1247 printf("inefficient_scan: directory iosize %ld vattr fileid = %ld\n", vat.va_blocksize, (long)vat.va_fileid);
1248 if ((blksize = vat.va_blocksize) == 0)
1249 blksize = DEV_BSIZE;
1250 rbuf = malloc(blksize, M_TEMP, M_WAITOK);
1251 rncp = NULL;
1252
1253 eofflag = 0;
1254 uio.uio_offset = 0;
fad57d0e 1255again:
fad57d0e
MD
1256 iov.iov_base = rbuf;
1257 iov.iov_len = blksize;
1258 uio.uio_iov = &iov;
1259 uio.uio_iovcnt = 1;
1260 uio.uio_resid = blksize;
1261 uio.uio_segflg = UIO_SYSSPACE;
1262 uio.uio_rw = UIO_READ;
1263 uio.uio_td = curthread;
1264
fad57d0e 1265 if (ncvp_debug >= 2)
4d22f42a
MD
1266 printf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
1267 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
fad57d0e 1268 if (error == 0) {
4d22f42a
MD
1269 den = (struct dirent *)rbuf;
1270 bytes = blksize - uio.uio_resid;
1271
1272 while (bytes > 0) {
1273 if (ncvp_debug >= 2) {
fad57d0e 1274 printf("cache_inefficient_scan: %*.*s\n",
4d22f42a
MD
1275 den->d_namlen, den->d_namlen,
1276 den->d_name);
1277 }
fad57d0e 1278 if (den->d_type != DT_WHT &&
01f31ab3 1279 den->d_ino == vat.va_fileid) {
4d22f42a
MD
1280 if (ncvp_debug) {
1281 printf("cache_inefficient_scan: "
1282 "MATCHED inode %ld path %s/%*.*s\n",
1283 vat.va_fileid, ncp->nc_name,
1284 den->d_namlen, den->d_namlen,
1285 den->d_name);
1286 }
fad57d0e
MD
1287 nlc.nlc_nameptr = den->d_name;
1288 nlc.nlc_namelen = den->d_namlen;
ca466bae 1289 VOP_UNLOCK(pvp, 0);
fad57d0e
MD
1290 rncp = cache_nlookup(ncp, &nlc);
1291 KKASSERT(rncp != NULL);
1292 break;
1293 }
01f31ab3
JS
1294 bytes -= _DIRENT_DIRSIZ(den);
1295 den = _DIRENT_NEXT(den);
fad57d0e
MD
1296 }
1297 if (rncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
1298 goto again;
1299 }
fad57d0e
MD
1300 if (rncp) {
1301 vrele(pvp);
1302 if (rncp->nc_flag & NCF_UNRESOLVED) {
1303 cache_setvp(rncp, dvp);
1304 if (ncvp_debug >= 2) {
1305 printf("cache_inefficient_scan: setvp %s/%s = %p\n",
1306 ncp->nc_name, rncp->nc_name, dvp);
1307 }
1308 } else {
1309 if (ncvp_debug >= 2) {
1310 printf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
1311 ncp->nc_name, rncp->nc_name, dvp,
1312 rncp->nc_vp);
1313 }
1314 }
1315 if (rncp->nc_vp == NULL)
1316 error = rncp->nc_error;
1317 cache_put(rncp);
1318 } else {
1319 printf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
1320 dvp, ncp->nc_name);
1321 vput(pvp);
1322 error = ENOENT;
1323 }
1324 free(rbuf, M_TEMP);
1325 return (error);
1326}
1327
984263bc 1328/*
67773eb3
MD
1329 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
1330 * state, which disassociates it from its vnode or ncneglist.
7ea21ed1 1331 *
67773eb3
MD
1332 * Then, if there are no additional references to the ncp and no children,
1333 * the ncp is removed from the topology and destroyed. This function will
1334 * also run through the nc_parent chain and destroy parent ncps if possible.
1335 * As a side benefit, it turns out the only conditions that allow running
1336 * up the chain are also the conditions to ensure no deadlock will occur.
7ea21ed1 1337 *
67773eb3
MD
1338 * References and/or children may exist if the ncp is in the middle of the
1339 * topology, preventing the ncp from being destroyed.
7ea21ed1 1340 *
67773eb3
MD
1341 * This function must be called with the ncp held and locked and will unlock
1342 * and drop it during zapping.
984263bc
MD
1343 */
1344static void
8987aad7 1345cache_zap(struct namecache *ncp)
984263bc 1346{
7ea21ed1 1347 struct namecache *par;
7ea21ed1
MD
1348
1349 /*
1350 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
1351 */
690a3127 1352 cache_setunresolved(ncp);
7ea21ed1
MD
1353
1354 /*
1355 * Try to scrap the entry and possibly tail-recurse on its parent.
1356 * We only scrap unref'd (other then our ref) unresolved entries,
1357 * we do not scrap 'live' entries.
1358 */
1359 while (ncp->nc_flag & NCF_UNRESOLVED) {
1360 /*
1361 * Someone other then us has a ref, stop.
1362 */
1363 if (ncp->nc_refs > 1)
1364 goto done;
1365
1366 /*
1367 * We have children, stop.
1368 */
1369 if (!TAILQ_EMPTY(&ncp->nc_list))
1370 goto done;
1371
67773eb3
MD
1372 /*
1373 * Remove ncp from the topology: hash table and parent linkage.
1374 */
7ea21ed1
MD
1375 if (ncp->nc_flag & NCF_HASHED) {
1376 ncp->nc_flag &= ~NCF_HASHED;
1377 LIST_REMOVE(ncp, nc_hash);
1378 }
7ea21ed1
MD
1379 if ((par = ncp->nc_parent) != NULL) {
1380 par = cache_hold(par);
1381 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
67773eb3 1382 ncp->nc_parent = NULL;
7ea21ed1
MD
1383 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
1384 vdrop(par->nc_vp);
1385 }
67773eb3
MD
1386
1387 /*
1388 * ncp should not have picked up any refs. Physically
1389 * destroy the ncp.
1390 */
1391 KKASSERT(ncp->nc_refs == 1);
f517a1bb 1392 --numunres;
67773eb3 1393 /* cache_unlock(ncp) not required */
7ea21ed1 1394 ncp->nc_refs = -1; /* safety */
7ea21ed1
MD
1395 if (ncp->nc_name)
1396 free(ncp->nc_name, M_VFSCACHE);
1397 free(ncp, M_VFSCACHE);
67773eb3
MD
1398
1399 /*
1400 * Loop on the parent (it may be NULL). Only bother looping
1401 * if the parent has a single ref (ours), which also means
1402 * we can lock it trivially.
1403 */
1404 ncp = par;
1405 if (ncp == NULL)
1406 return;
1407 if (ncp->nc_refs != 1) {
1408 cache_drop(ncp);
8c361dda 1409 return;
67773eb3
MD
1410 }
1411 KKASSERT(par->nc_exlocks == 0);
1412 cache_lock(ncp);
7ea21ed1
MD
1413 }
1414done:
67773eb3 1415 cache_unlock(ncp);
36e90efd 1416 atomic_subtract_int(&ncp->nc_refs, 1);
984263bc
MD
1417}
1418
62d0f1f0
MD
1419static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW;
1420
1421static __inline
1422void
1423cache_hysteresis(void)
1424{
1425 /*
1426 * Don't cache too many negative hits. We use hysteresis to reduce
1427 * the impact on the critical path.
1428 */
1429 switch(cache_hysteresis_state) {
1430 case CHI_LOW:
1431 if (numneg > MINNEG && numneg * ncnegfactor > numcache) {
1432 cache_cleanneg(10);
1433 cache_hysteresis_state = CHI_HIGH;
1434 }
1435 break;
1436 case CHI_HIGH:
1437 if (numneg > MINNEG * 9 / 10 &&
1438 numneg * ncnegfactor * 9 / 10 > numcache
1439 ) {
1440 cache_cleanneg(10);
1441 } else {
1442 cache_hysteresis_state = CHI_LOW;
1443 }
1444 break;
1445 }
1446}
1447
14c92d03
MD
1448/*
1449 * NEW NAMECACHE LOOKUP API
1450 *
1451 * Lookup an entry in the cache. A locked, referenced, non-NULL
1452 * entry is *always* returned, even if the supplied component is illegal.
fad57d0e 1453 * The resulting namecache entry should be returned to the system with
14c92d03
MD
1454 * cache_put() or cache_unlock() + cache_drop().
1455 *
1456 * namecache locks are recursive but care must be taken to avoid lock order
1457 * reversals.
1458 *
1459 * Nobody else will be able to manipulate the associated namespace (e.g.
1460 * create, delete, rename, rename-target) until the caller unlocks the
1461 * entry.
1462 *
1463 * The returned entry will be in one of three states: positive hit (non-null
1464 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
1465 * Unresolved entries must be resolved through the filesystem to associate the
1466 * vnode and/or determine whether a positive or negative hit has occured.
1467 *
1468 * It is not necessary to lock a directory in order to lock namespace under
1469 * that directory. In fact, it is explicitly not allowed to do that. A
1470 * directory is typically only locked when being created, renamed, or
1471 * destroyed.
1472 *
1473 * The directory (par) may be unresolved, in which case any returned child
1474 * will likely also be marked unresolved. Likely but not guarenteed. Since
fad57d0e
MD
1475 * the filesystem lookup requires a resolved directory vnode the caller is
1476 * responsible for resolving the namecache chain top-down. This API
14c92d03
MD
1477 * specifically allows whole chains to be created in an unresolved state.
1478 */
1479struct namecache *
690a3127 1480cache_nlookup(struct namecache *par, struct nlcomponent *nlc)
14c92d03 1481{
690a3127
MD
1482 struct namecache *ncp;
1483 struct namecache *new_ncp;
1484 struct nchashhead *nchpp;
1485 u_int32_t hash;
1486 globaldata_t gd;
1487
1488 numcalls++;
1489 gd = mycpu;
1490
690a3127
MD
1491 /*
1492 * Try to locate an existing entry
1493 */
1494 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
1495 hash = fnv_32_buf(&par, sizeof(par), hash);
1496 new_ncp = NULL;
1497restart:
1498 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1499 numchecks++;
1500
1501 /*
1502 * Zap entries that have timed out.
1503 */
1504 if (ncp->nc_timeout &&
67773eb3
MD
1505 (int)(ncp->nc_timeout - ticks) < 0 &&
1506 (ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
1507 ncp->nc_exlocks == 0
690a3127 1508 ) {
67773eb3 1509 cache_zap(cache_get(ncp));
690a3127
MD
1510 goto restart;
1511 }
1512
1513 /*
1514 * Break out if we find a matching entry. Note that
e09206ba
MD
1515 * UNRESOLVED entries may match, but DESTROYED entries
1516 * do not.
690a3127
MD
1517 */
1518 if (ncp->nc_parent == par &&
1519 ncp->nc_nlen == nlc->nlc_namelen &&
e09206ba
MD
1520 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
1521 (ncp->nc_flag & NCF_DESTROYED) == 0
690a3127 1522 ) {
67773eb3
MD
1523 if (cache_get_nonblock(ncp) == 0) {
1524 if (new_ncp)
1525 cache_free(new_ncp);
1526 goto found;
1527 }
8c361dda 1528 cache_get(ncp);
67773eb3
MD
1529 cache_put(ncp);
1530 goto restart;
690a3127
MD
1531 }
1532 }
1533
1534 /*
1535 * We failed to locate an entry, create a new entry and add it to
1536 * the cache. We have to relookup after possibly blocking in
1537 * malloc.
1538 */
1539 if (new_ncp == NULL) {
524c845c 1540 new_ncp = cache_alloc(nlc->nlc_namelen);
690a3127
MD
1541 goto restart;
1542 }
1543
1544 ncp = new_ncp;
1545
1546 /*
1547 * Initialize as a new UNRESOLVED entry, lock (non-blocking),
fad57d0e
MD
1548 * and link to the parent. The mount point is usually inherited
1549 * from the parent unless this is a special case such as a mount
1550 * point where nlc_namelen is 0. The caller is responsible for
1551 * setting nc_mount in that case. If nlc_namelen is 0 nc_name will
1552 * be NULL.
690a3127 1553 */
4fcb1cf7
MD
1554 if (nlc->nlc_namelen) {
1555 bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen);
fad57d0e 1556 ncp->nc_name[nlc->nlc_namelen] = 0;
4fcb1cf7
MD
1557 ncp->nc_mount = par->nc_mount;
1558 }
690a3127
MD
1559 nchpp = NCHHASH(hash);
1560 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
1561 ncp->nc_flag |= NCF_HASHED;
690a3127 1562 cache_link_parent(ncp, par);
690a3127 1563found:
fad57d0e
MD
1564 /*
1565 * stats and namecache size management
1566 */
1567 if (ncp->nc_flag & NCF_UNRESOLVED)
1568 ++gd->gd_nchstats->ncs_miss;
1569 else if (ncp->nc_vp)
1570 ++gd->gd_nchstats->ncs_goodhits;
1571 else
1572 ++gd->gd_nchstats->ncs_neghits;
62d0f1f0 1573 cache_hysteresis();
690a3127
MD
1574 return(ncp);
1575}
1576
9b1b3591
MD
1577/*
1578 * Given a locked ncp, validate that the vnode, if present, is actually
1579 * usable. If it is not usable set the ncp to an unresolved state.
1580 */
1581void
1582cache_validate(struct namecache *ncp)
1583{
1584 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1585 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1586 cache_setunresolved(ncp);
1587 }
1588}
1589
690a3127 1590/*
21739618 1591 * Resolve an unresolved namecache entry, generally by looking it up.
67773eb3 1592 * The passed ncp must be locked and refd.
21739618
MD
1593 *
1594 * Theoretically since a vnode cannot be recycled while held, and since
1595 * the nc_parent chain holds its vnode as long as children exist, the
1596 * direct parent of the cache entry we are trying to resolve should
1597 * have a valid vnode. If not then generate an error that we can
1598 * determine is related to a resolver bug.
fad57d0e 1599 *
9b1b3591
MD
1600 * However, if a vnode was in the middle of a recyclement when the NCP
1601 * got locked, ncp->nc_vp might point to a vnode that is about to become
1602 * invalid. cache_resolve() handles this case by unresolving the entry
1603 * and then re-resolving it.
1604 *
fad57d0e
MD
1605 * Note that successful resolution does not necessarily return an error
1606 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
1607 * will be returned.
690a3127
MD
1608 */
1609int
21739618 1610cache_resolve(struct namecache *ncp, struct ucred *cred)
690a3127 1611{
21739618 1612 struct namecache *par;
67773eb3 1613 int error;
8e005a45 1614
67773eb3 1615restart:
8e005a45 1616 /*
9b1b3591
MD
1617 * If the ncp is already resolved we have nothing to do. However,
1618 * we do want to guarentee that a usable vnode is returned when
1619 * a vnode is present, so make sure it hasn't been reclaimed.
8e005a45 1620 */
9b1b3591
MD
1621 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1622 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1623 cache_setunresolved(ncp);
1624 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1625 return (ncp->nc_error);
1626 }
21739618 1627
646a1cda
MD
1628 /*
1629 * Mount points need special handling because the parent does not
1630 * belong to the same filesystem as the ncp.
1631 */
8e005a45 1632 if (ncp->nc_flag & NCF_MOUNTPT)
646a1cda 1633 return (cache_resolve_mp(ncp));
646a1cda
MD
1634
1635 /*
1636 * We expect an unbroken chain of ncps to at least the mount point,
1637 * and even all the way to root (but this code doesn't have to go
1638 * past the mount point).
1639 */
1640 if (ncp->nc_parent == NULL) {
8e005a45 1641 printf("EXDEV case 1 %p %*.*s\n", ncp,
646a1cda 1642 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
21739618 1643 ncp->nc_error = EXDEV;
646a1cda
MD
1644 return(ncp->nc_error);
1645 }
1646
1647 /*
1648 * The vp's of the parent directories in the chain are held via vhold()
1649 * due to the existance of the child, and should not disappear.
1650 * However, there are cases where they can disappear:
1651 *
1652 * - due to filesystem I/O errors.
1653 * - due to NFS being stupid about tracking the namespace and
1654 * destroys the namespace for entire directories quite often.
1655 * - due to forced unmounts.
e09206ba 1656 * - due to an rmdir (parent will be marked DESTROYED)
646a1cda
MD
1657 *
1658 * When this occurs we have to track the chain backwards and resolve
1659 * it, looping until the resolver catches up to the current node. We
1660 * could recurse here but we might run ourselves out of kernel stack
1661 * so we do it in a more painful manner. This situation really should
1662 * not occur all that often, or if it does not have to go back too
1663 * many nodes to resolve the ncp.
1664 */
1665 while (ncp->nc_parent->nc_vp == NULL) {
e09206ba
MD
1666 /*
1667 * This case can occur if a process is CD'd into a
1668 * directory which is then rmdir'd. If the parent is marked
1669 * destroyed there is no point trying to resolve it.
1670 */
1671 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
1672 return(ENOENT);
1673
646a1cda
MD
1674 par = ncp->nc_parent;
1675 while (par->nc_parent && par->nc_parent->nc_vp == NULL)
1676 par = par->nc_parent;
1677 if (par->nc_parent == NULL) {
1678 printf("EXDEV case 2 %*.*s\n",
1679 par->nc_nlen, par->nc_nlen, par->nc_name);
1680 return (EXDEV);
1681 }
1682 printf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
1683 par->nc_nlen, par->nc_nlen, par->nc_name);
1684 /*
67773eb3
MD
1685 * The parent is not set in stone, ref and lock it to prevent
1686 * it from disappearing. Also note that due to renames it
1687 * is possible for our ncp to move and for par to no longer
1688 * be one of its parents. We resolve it anyway, the loop
1689 * will handle any moves.
646a1cda
MD
1690 */
1691 cache_get(par);
1692 if (par->nc_flag & NCF_MOUNTPT) {
1693 cache_resolve_mp(par);
8e005a45
MD
1694 } else if (par->nc_parent->nc_vp == NULL) {
1695 printf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
1696 cache_put(par);
1697 continue;
fad57d0e
MD
1698 } else if (par->nc_flag & NCF_UNRESOLVED) {
1699 par->nc_error = VOP_NRESOLVE(par, cred);
646a1cda 1700 }
67773eb3
MD
1701 if ((error = par->nc_error) != 0) {
1702 if (par->nc_error != EAGAIN) {
1703 printf("EXDEV case 3 %*.*s error %d\n",
1704 par->nc_nlen, par->nc_nlen, par->nc_name,
1705 par->nc_error);
1706 cache_put(par);
1707 return(error);
1708 }
1709 printf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
1710 par, par->nc_nlen, par->nc_nlen, par->nc_name);
646a1cda 1711 }
67773eb3
MD
1712 cache_put(par);
1713 /* loop */
646a1cda 1714 }
8e005a45
MD
1715
1716 /*
fad57d0e 1717 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
8e005a45
MD
1718 * ncp's and reattach them. If this occurs the original ncp is marked
1719 * EAGAIN to force a relookup.
fad57d0e
MD
1720 *
1721 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
1722 * ncp must already be resolved.
8e005a45
MD
1723 */
1724 KKASSERT((ncp->nc_flag & NCF_MOUNTPT) == 0);
fad57d0e 1725 ncp->nc_error = VOP_NRESOLVE(ncp, cred);
6ddb7618 1726 /*vop_nresolve(*ncp->nc_parent->nc_vp->v_ops, ncp, cred);*/
67773eb3
MD
1727 if (ncp->nc_error == EAGAIN) {
1728 printf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
1729 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
1730 goto restart;
1731 }
646a1cda
MD
1732 return(ncp->nc_error);
1733}
1734
1735/*
1736 * Resolve the ncp associated with a mount point. Such ncp's almost always
1737 * remain resolved and this routine is rarely called. NFS MPs tends to force
1738 * re-resolution more often due to its mac-truck-smash-the-namecache
1739 * method of tracking namespace changes.
1740 *
6215aa92
MD
1741 * The semantics for this call is that the passed ncp must be locked on
1742 * entry and will be locked on return. However, if we actually have to
1743 * resolve the mount point we temporarily unlock the entry in order to
1744 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
1745 * the unlock we have to recheck the flags after we relock.
646a1cda
MD
1746 */
1747static int
1748cache_resolve_mp(struct namecache *ncp)
1749{
1750 struct vnode *vp;
1751 struct mount *mp = ncp->nc_mount;
6215aa92 1752 int error;
646a1cda
MD
1753
1754 KKASSERT(mp != NULL);
9b1b3591
MD
1755
1756 /*
1757 * If the ncp is already resolved we have nothing to do. However,
1758 * we do want to guarentee that a usable vnode is returned when
1759 * a vnode is present, so make sure it hasn't been reclaimed.
1760 */
1761 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1762 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1763 cache_setunresolved(ncp);
1764 }
1765
646a1cda 1766 if (ncp->nc_flag & NCF_UNRESOLVED) {
6215aa92 1767 cache_unlock(ncp);
f9642f56 1768 while (vfs_busy(mp, 0))
646a1cda 1769 ;
6215aa92
MD
1770 error = VFS_ROOT(mp, &vp);
1771 cache_lock(ncp);
1772
1773 /*
1774 * recheck the ncp state after relocking.
1775 */
1776 if (ncp->nc_flag & NCF_UNRESOLVED) {
1777 ncp->nc_error = error;
1778 if (error == 0) {
1779 cache_setvp(ncp, vp);
1780 vput(vp);
1781 } else {
1782 printf("[diagnostic] cache_resolve_mp: failed to resolve mount %p\n", mp);
1783 cache_setvp(ncp, NULL);
1784 }
1785 } else if (error == 0) {
646a1cda 1786 vput(vp);
646a1cda 1787 }
f9642f56 1788 vfs_unbusy(mp);
21739618
MD
1789 }
1790 return(ncp->nc_error);
14c92d03
MD
1791}
1792
62d0f1f0
MD
1793void
1794cache_cleanneg(int count)
1795{
1796 struct namecache *ncp;
7ea21ed1
MD
1797
1798 /*
62d0f1f0
MD
1799 * Automode from the vnlru proc - clean out 10% of the negative cache
1800 * entries.
7ea21ed1 1801 */
62d0f1f0
MD
1802 if (count == 0)
1803 count = numneg / 10 + 1;
1804
1805 /*
1806 * Attempt to clean out the specified number of negative cache
1807 * entries.
1808 */
1809 while (count) {
7ea21ed1 1810 ncp = TAILQ_FIRST(&ncneglist);
eb82ae62
MD
1811 if (ncp == NULL) {
1812 KKASSERT(numneg == 0);
1813 break;
1814 }
62d0f1f0
MD
1815 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
1816 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
67773eb3
MD
1817 if (cache_get_nonblock(ncp) == 0)
1818 cache_zap(ncp);
62d0f1f0 1819 --count;
984263bc
MD
1820 }
1821}
1822
fad57d0e
MD
1823/*
1824 * Rehash a ncp. Rehashing is typically required if the name changes (should
1825 * not generally occur) or the parent link changes. This function will
1826 * unhash the ncp if the ncp is no longer hashable.
1827 */
8c361dda
MD
1828static void
1829cache_rehash(struct namecache *ncp)
1830{
1831 struct nchashhead *nchpp;
1832 u_int32_t hash;
1833
1834 if (ncp->nc_flag & NCF_HASHED) {
1835 ncp->nc_flag &= ~NCF_HASHED;
1836 LIST_REMOVE(ncp, nc_hash);
1837 }
fad57d0e
MD
1838 if (ncp->nc_nlen && ncp->nc_parent) {
1839 hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT);
1840 hash = fnv_32_buf(&ncp->nc_parent,
1841 sizeof(ncp->nc_parent), hash);
1842 nchpp = NCHHASH(hash);
1843 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
1844 ncp->nc_flag |= NCF_HASHED;
1845 }
8c361dda
MD
1846}
1847
984263bc 1848/*
24e51f36 1849 * Name cache initialization, from vfsinit() when we are booting
984263bc
MD
1850 */
1851void
8987aad7 1852nchinit(void)
984263bc 1853{
24e51f36
HP
1854 int i;
1855 globaldata_t gd;
1856
1857 /* initialise per-cpu namecache effectiveness statistics. */
1858 for (i = 0; i < ncpus; ++i) {
1859 gd = globaldata_find(i);
1860 gd->gd_nchstats = &nchstats[i];
1861 }
7ea21ed1 1862 TAILQ_INIT(&ncneglist);
984263bc 1863 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash);
fc21741a 1864 nclockwarn = 1 * hz;
21739618
MD
1865}
1866
1867/*
1868 * Called from start_init() to bootstrap the root filesystem. Returns
1869 * a referenced, unlocked namecache record.
1870 */
1871struct namecache *
5fd012e0 1872cache_allocroot(struct mount *mp, struct vnode *vp)
21739618 1873{
524c845c 1874 struct namecache *ncp = cache_alloc(0);
21739618 1875
21739618 1876 ncp->nc_flag |= NCF_MOUNTPT | NCF_ROOT;
5fd012e0 1877 ncp->nc_mount = mp;
8c361dda
MD
1878 cache_setvp(ncp, vp);
1879 return(ncp);
984263bc
MD
1880}
1881
1882/*
7ea21ed1 1883 * vfs_cache_setroot()
984263bc 1884 *
7ea21ed1
MD
1885 * Create an association between the root of our namecache and
1886 * the root vnode. This routine may be called several times during
1887 * booting.
690a3127
MD
1888 *
1889 * If the caller intends to save the returned namecache pointer somewhere
1890 * it must cache_hold() it.
7ea21ed1 1891 */
21739618
MD
1892void
1893vfs_cache_setroot(struct vnode *nvp, struct namecache *ncp)
7ea21ed1 1894{
21739618
MD
1895 struct vnode *ovp;
1896 struct namecache *oncp;
1897
1898 ovp = rootvnode;
1899 oncp = rootncp;
1900 rootvnode = nvp;
1901 rootncp = ncp;
1902
1903 if (ovp)
1904 vrele(ovp);
1905 if (oncp)
1906 cache_drop(oncp);
7ea21ed1
MD
1907}
1908
1909/*
fad57d0e
MD
1910 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
1911 * topology and is being removed as quickly as possible. The new VOP_N*()
1912 * API calls are required to make specific adjustments using the supplied
1913 * ncp pointers rather then just bogusly purging random vnodes.
1914 *
7ea21ed1
MD
1915 * Invalidate all namecache entries to a particular vnode as well as
1916 * any direct children of that vnode in the namecache. This is a
1917 * 'catch all' purge used by filesystems that do not know any better.
1918 *
7ea21ed1
MD
1919 * Note that the linkage between the vnode and its namecache entries will
1920 * be removed, but the namecache entries themselves might stay put due to
1921 * active references from elsewhere in the system or due to the existance of
1922 * the children. The namecache topology is left intact even if we do not
1923 * know what the vnode association is. Such entries will be marked
1924 * NCF_UNRESOLVED.
984263bc 1925 */
984263bc 1926void
8987aad7 1927cache_purge(struct vnode *vp)
984263bc 1928{
6b008938 1929 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
984263bc
MD
1930}
1931
1932/*
1933 * Flush all entries referencing a particular filesystem.
1934 *
1935 * Since we need to check it anyway, we will flush all the invalid
1936 * entries at the same time.
1937 */
1938void
8987aad7 1939cache_purgevfs(struct mount *mp)
984263bc 1940{
bc0c094e 1941 struct nchashhead *nchpp;
984263bc
MD
1942 struct namecache *ncp, *nnp;
1943
7ea21ed1
MD
1944 /*
1945 * Scan hash tables for applicable entries.
1946 */
bc0c094e
MD
1947 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) {
1948 ncp = LIST_FIRST(nchpp);
7ea21ed1
MD
1949 if (ncp)
1950 cache_hold(ncp);
1951 while (ncp) {
984263bc 1952 nnp = LIST_NEXT(ncp, nc_hash);
7ea21ed1
MD
1953 if (nnp)
1954 cache_hold(nnp);
4fcb1cf7 1955 if (ncp->nc_mount == mp) {
67773eb3 1956 cache_lock(ncp);
984263bc 1957 cache_zap(ncp);
67773eb3 1958 } else {
7ea21ed1 1959 cache_drop(ncp);
67773eb3 1960 }
7ea21ed1 1961 ncp = nnp;
984263bc
MD
1962 }
1963 }
1964}
1965
6b008938
MD
1966/*
1967 * Create a new (theoretically) unique fsmid
1968 */
1969int64_t
1970cache_getnewfsmid(void)
1971{
1972 static int fsmid_roller;
1973 int64_t fsmid;
1974
1975 ++fsmid_roller;
1976 fsmid = ((int64_t)time_second << 32) |
1977 (fsmid_roller & 0x7FFFFFFF);
1978 return (fsmid);
1979}
1980
1981
984263bc
MD
1982static int disablecwd;
1983SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
1984
1985static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
1986static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
1987static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
1988static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
1989static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
1990static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
41c20dac 1991
984263bc 1992int
753fd850 1993sys___getcwd(struct __getcwd_args *uap)
63f58b90 1994{
02680f1b 1995 int buflen;
63f58b90 1996 int error;
02680f1b
MD
1997 char *buf;
1998 char *bp;
1999
2000 if (disablecwd)
2001 return (ENODEV);
2002
2003 buflen = uap->buflen;
2004 if (buflen < 2)
2005 return (EINVAL);
2006 if (buflen > MAXPATHLEN)
2007 buflen = MAXPATHLEN;
63f58b90 2008
02680f1b
MD
2009 buf = malloc(buflen, M_TEMP, M_WAITOK);
2010 bp = kern_getcwd(buf, buflen, &error);
63f58b90 2011 if (error == 0)
02680f1b
MD
2012 error = copyout(bp, uap->buf, strlen(bp) + 1);
2013 free(buf, M_TEMP);
63f58b90
EN
2014 return (error);
2015}
2016
02680f1b
MD
2017char *
2018kern_getcwd(char *buf, size_t buflen, int *error)
984263bc 2019{
41c20dac 2020 struct proc *p = curproc;
63f58b90 2021 char *bp;
02680f1b 2022 int i, slash_prefixed;
984263bc
MD
2023 struct filedesc *fdp;
2024 struct namecache *ncp;
984263bc
MD
2025
2026 numcwdcalls++;
63f58b90
EN
2027 bp = buf;
2028 bp += buflen - 1;
984263bc
MD
2029 *bp = '\0';
2030 fdp = p->p_fd;
2031 slash_prefixed = 0;
524c845c
MD
2032
2033 ncp = fdp->fd_ncdir;
2034 while (ncp && ncp != fdp->fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) {
2035 if (ncp->nc_flag & NCF_MOUNTPT) {
2036 if (ncp->nc_mount == NULL) {
2037 *error = EBADF; /* forced unmount? */
02680f1b 2038 return(NULL);
984263bc 2039 }
524c845c 2040 ncp = ncp->nc_parent;
984263bc
MD
2041 continue;
2042 }
984263bc
MD
2043 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
2044 if (bp == buf) {
2045 numcwdfail4++;
02680f1b
MD
2046 *error = ENOMEM;
2047 return(NULL);
984263bc
MD
2048 }
2049 *--bp = ncp->nc_name[i];
2050 }
2051 if (bp == buf) {
2052 numcwdfail4++;
02680f1b
MD
2053 *error = ENOMEM;
2054 return(NULL);
984263bc
MD
2055 }
2056 *--bp = '/';
2057 slash_prefixed = 1;
524c845c
MD
2058 ncp = ncp->nc_parent;
2059 }
2060 if (ncp == NULL) {
2061 numcwdfail2++;
2062 *error = ENOENT;
2063 return(NULL);
984263bc
MD
2064 }
2065 if (!slash_prefixed) {
2066 if (bp == buf) {
2067 numcwdfail4++;
02680f1b
MD
2068 *error = ENOMEM;
2069 return(NULL);
984263bc
MD
2070 }
2071 *--bp = '/';
2072 }
2073 numcwdfound++;
02680f1b
MD
2074 *error = 0;
2075 return (bp);
984263bc
MD
2076}
2077
2078/*
2079 * Thus begins the fullpath magic.
2080 */
2081
2082#undef STATNODE
2083#define STATNODE(name) \
2084 static u_int name; \
2085 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
2086
2087static int disablefullpath;
2088SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
2089 &disablefullpath, 0, "");
2090
2091STATNODE(numfullpathcalls);
2092STATNODE(numfullpathfail1);
2093STATNODE(numfullpathfail2);
2094STATNODE(numfullpathfail3);
2095STATNODE(numfullpathfail4);
2096STATNODE(numfullpathfound);
2097
2098int
b6372d22 2099cache_fullpath(struct proc *p, struct namecache *ncp, char **retbuf, char **freebuf)
8987aad7 2100{
984263bc
MD
2101 char *bp, *buf;
2102 int i, slash_prefixed;
75ffff0d 2103 struct namecache *fd_nrdir;
984263bc 2104
b6372d22 2105 numfullpathcalls--;
b310dfc4 2106
984263bc
MD
2107 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
2108 bp = buf + MAXPATHLEN - 1;
2109 *bp = '\0';
75ffff0d
JS
2110 if (p != NULL)
2111 fd_nrdir = p->p_fd->fd_nrdir;
2112 else
2113 fd_nrdir = NULL;
984263bc 2114 slash_prefixed = 0;
75ffff0d 2115 while (ncp && ncp != fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) {
524c845c
MD
2116 if (ncp->nc_flag & NCF_MOUNTPT) {
2117 if (ncp->nc_mount == NULL) {
984263bc 2118 free(buf, M_TEMP);
524c845c 2119 return(EBADF);
984263bc 2120 }
524c845c 2121 ncp = ncp->nc_parent;
984263bc
MD
2122 continue;
2123 }
984263bc
MD
2124 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
2125 if (bp == buf) {
2126 numfullpathfail4++;
2127 free(buf, M_TEMP);
b6372d22 2128 return(ENOMEM);
984263bc
MD
2129 }
2130 *--bp = ncp->nc_name[i];
2131 }
2132 if (bp == buf) {
2133 numfullpathfail4++;
2134 free(buf, M_TEMP);
b6372d22 2135 return(ENOMEM);
984263bc
MD
2136 }
2137 *--bp = '/';
2138 slash_prefixed = 1;
524c845c
MD
2139 ncp = ncp->nc_parent;
2140 }
2141 if (ncp == NULL) {
2142 numfullpathfail2++;
2143 free(buf, M_TEMP);
b6372d22 2144 return(ENOENT);
984263bc 2145 }
75ffff0d 2146 if (p != NULL && (ncp->nc_flag & NCF_ROOT) && ncp != fd_nrdir) {
872b00c0
JS
2147 bp = buf + MAXPATHLEN - 1;
2148 *bp = '\0';
2149 slash_prefixed = 0;
2150 }
984263bc
MD
2151 if (!slash_prefixed) {
2152 if (bp == buf) {
2153 numfullpathfail4++;
2154 free(buf, M_TEMP);
b6372d22 2155 return(ENOMEM);
984263bc
MD
2156 }
2157 *--bp = '/';
2158 }
2159 numfullpathfound++;
2160 *retbuf = bp;
b310dfc4 2161 *freebuf = buf;
6a506bad
JS
2162
2163 return(0);
984263bc 2164}
8987aad7 2165
b6372d22
JS
2166int
2167vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf)
2168{
b6372d22
JS
2169 struct namecache *ncp;
2170
2171 numfullpathcalls++;
2172 if (disablefullpath)
2173 return (ENODEV);
2174
2175 if (p == NULL)
2176 return (EINVAL);
2177
2178 /* vn is NULL, client wants us to use p->p_textvp */
2179 if (vn == NULL) {
2180 if ((vn = p->p_textvp) == NULL)
2181 return (EINVAL);
2182 }
2183 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
2184 if (ncp->nc_nlen)
2185 break;
2186 }
2187 if (ncp == NULL)
2188 return (EINVAL);
2189
2190 numfullpathcalls--;
2191 return(cache_fullpath(p, ncp, retbuf, freebuf));
2192}