Simplify vn_lock(), VOP_LOCK(), and VOP_UNLOCK() by removing the thread_t
[dragonfly.git] / sys / kern / vfs_cache.c
CommitLineData
984263bc 1/*
8c10bfcf
MD
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
984263bc
MD
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
36 *
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $
ca466bae 70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.65 2006/05/05 21:15:09 dillon Exp $
984263bc
MD
71 */
72
73#include <sys/param.h>
74#include <sys/systm.h>
75#include <sys/kernel.h>
76#include <sys/sysctl.h>
77#include <sys/mount.h>
78#include <sys/vnode.h>
984263bc
MD
79#include <sys/malloc.h>
80#include <sys/sysproto.h>
81#include <sys/proc.h>
dadab5e9 82#include <sys/namei.h>
690a3127 83#include <sys/nlookup.h>
984263bc
MD
84#include <sys/filedesc.h>
85#include <sys/fnv_hash.h>
24e51f36 86#include <sys/globaldata.h>
63f58b90 87#include <sys/kern_syscall.h>
fad57d0e 88#include <sys/dirent.h>
8c361dda 89#include <ddb/ddb.h>
984263bc
MD
90
91/*
7ea21ed1 92 * Random lookups in the cache are accomplished with a hash table using
8987aad7 93 * a hash key of (nc_src_vp, name).
984263bc 94 *
7ea21ed1 95 * Negative entries may exist and correspond to structures where nc_vp
8987aad7
MD
96 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry
97 * corresponds to a whited-out directory entry (verses simply not finding the
98 * entry at all).
984263bc 99 *
8987aad7
MD
100 * Upon reaching the last segment of a path, if the reference is for DELETE,
101 * or NOCACHE is set (rewrite), and the name is located in the cache, it
102 * will be dropped.
984263bc
MD
103 */
104
105/*
106 * Structures associated with name cacheing.
107 */
8987aad7 108#define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
f517a1bb 109#define MINNEG 1024
8987aad7 110
24e51f36
HP
111MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
112
984263bc 113static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */
7ea21ed1 114static struct namecache_list ncneglist; /* instead of vnode */
8987aad7 115
fad57d0e
MD
116/*
117 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
118 * to create the namecache infrastructure leading to a dangling vnode.
119 *
120 * 0 Only errors are reported
121 * 1 Successes are reported
122 * 2 Successes + the whole directory scan is reported
123 * 3 Force the directory scan code run as if the parent vnode did not
124 * have a namecache record, even if it does have one.
125 */
126static int ncvp_debug;
127SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, "");
128
984263bc
MD
129static u_long nchash; /* size of hash table */
130SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
8987aad7 131
984263bc
MD
132static u_long ncnegfactor = 16; /* ratio of negative entries */
133SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
8987aad7 134
fc21741a
MD
135static int nclockwarn; /* warn on locked entries in ticks */
136SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, "");
137
984263bc
MD
138static u_long numneg; /* number of cache entries allocated */
139SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
8987aad7 140
984263bc
MD
141static u_long numcache; /* number of cache entries allocated */
142SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
8987aad7 143
f517a1bb
MD
144static u_long numunres; /* number of unresolved entries */
145SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, "");
146
984263bc
MD
147SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
148SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
149
646a1cda 150static int cache_resolve_mp(struct namecache *ncp);
8e005a45 151static void cache_rehash(struct namecache *ncp);
646a1cda 152
984263bc
MD
153/*
154 * The new name cache statistics
155 */
156SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
157#define STATNODE(mode, name, var) \
158 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
159STATNODE(CTLFLAG_RD, numneg, &numneg);
160STATNODE(CTLFLAG_RD, numcache, &numcache);
161static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
162static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
163static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
164static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
165static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
166static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
167static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
168static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
169static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
170static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
171
24e51f36
HP
172struct nchstats nchstats[SMP_MAXCPU];
173/*
174 * Export VFS cache effectiveness statistics to user-land.
175 *
176 * The statistics are left for aggregation to user-land so
177 * neat things can be achieved, like observing per-CPU cache
178 * distribution.
179 */
180static int
3736bb9b 181sysctl_nchstats(SYSCTL_HANDLER_ARGS)
24e51f36
HP
182{
183 struct globaldata *gd;
184 int i, error;
185
186 error = 0;
187 for (i = 0; i < ncpus; ++i) {
188 gd = globaldata_find(i);
189 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
190 sizeof(struct nchstats))))
191 break;
192 }
984263bc 193
24e51f36
HP
194 return (error);
195}
196SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
3736bb9b 197 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
984263bc 198
24e51f36 199static void cache_zap(struct namecache *ncp);
984263bc
MD
200
201/*
7ea21ed1
MD
202 * cache_hold() and cache_drop() prevent the premature deletion of a
203 * namecache entry but do not prevent operations (such as zapping) on
204 * that namecache entry.
984263bc 205 */
7ea21ed1
MD
206static __inline
207struct namecache *
bc0c094e 208_cache_hold(struct namecache *ncp)
7ea21ed1
MD
209{
210 ++ncp->nc_refs;
211 return(ncp);
212}
213
8c361dda 214/*
67773eb3
MD
215 * When dropping an entry, if only one ref remains and the entry has not
216 * been resolved, zap it. Since the one reference is being dropped the
217 * entry had better not be locked.
8c361dda 218 */
7ea21ed1
MD
219static __inline
220void
bc0c094e 221_cache_drop(struct namecache *ncp)
7ea21ed1
MD
222{
223 KKASSERT(ncp->nc_refs > 0);
f517a1bb
MD
224 if (ncp->nc_refs == 1 &&
225 (ncp->nc_flag & NCF_UNRESOLVED) &&
226 TAILQ_EMPTY(&ncp->nc_list)
227 ) {
67773eb3
MD
228 KKASSERT(ncp->nc_exlocks == 0);
229 cache_lock(ncp);
7ea21ed1 230 cache_zap(ncp);
f517a1bb 231 } else {
7ea21ed1 232 --ncp->nc_refs;
f517a1bb 233 }
7ea21ed1 234}
8987aad7 235
690a3127
MD
236/*
237 * Link a new namecache entry to its parent. Be careful to avoid races
238 * if vhold() blocks in the future.
8c361dda
MD
239 *
240 * If we are creating a child under an oldapi parent we must mark the
241 * child as being an oldapi entry as well.
690a3127
MD
242 */
243static void
244cache_link_parent(struct namecache *ncp, struct namecache *par)
245{
246 KKASSERT(ncp->nc_parent == NULL);
247 ncp->nc_parent = par;
248 if (TAILQ_EMPTY(&par->nc_list)) {
249 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
21739618
MD
250 /*
251 * Any vp associated with an ncp which has children must
55361147 252 * be held to prevent it from being recycled.
21739618 253 */
690a3127
MD
254 if (par->nc_vp)
255 vhold(par->nc_vp);
256 } else {
257 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
258 }
259}
260
261/*
b8997912
MD
262 * Remove the parent association from a namecache structure. If this is
263 * the last child of the parent the cache_drop(par) will attempt to
264 * recursively zap the parent.
690a3127
MD
265 */
266static void
267cache_unlink_parent(struct namecache *ncp)
268{
269 struct namecache *par;
270
271 if ((par = ncp->nc_parent) != NULL) {
272 ncp->nc_parent = NULL;
273 par = cache_hold(par);
274 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
275 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
276 vdrop(par->nc_vp);
277 cache_drop(par);
278 }
279}
280
281/*
fad57d0e
MD
282 * Allocate a new namecache structure. Most of the code does not require
283 * zero-termination of the string but it makes vop_compat_ncreate() easier.
690a3127
MD
284 */
285static struct namecache *
524c845c 286cache_alloc(int nlen)
690a3127
MD
287{
288 struct namecache *ncp;
289
290 ncp = malloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
524c845c 291 if (nlen)
fad57d0e 292 ncp->nc_name = malloc(nlen + 1, M_VFSCACHE, M_WAITOK);
524c845c 293 ncp->nc_nlen = nlen;
690a3127
MD
294 ncp->nc_flag = NCF_UNRESOLVED;
295 ncp->nc_error = ENOTCONN; /* needs to be resolved */
8c361dda 296 ncp->nc_refs = 1;
e4bff3c8
MD
297
298 /*
299 * Construct a fake FSMID based on the time of day and a 32 bit
300 * roller for uniqueness. This is used to generate a useful
301 * FSMID for filesystems which do not support it.
302 */
6b008938 303 ncp->nc_fsmid = cache_getnewfsmid();
690a3127 304 TAILQ_INIT(&ncp->nc_list);
8c361dda 305 cache_lock(ncp);
690a3127
MD
306 return(ncp);
307}
308
8c361dda
MD
309static void
310cache_free(struct namecache *ncp)
311{
312 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1);
313 if (ncp->nc_name)
314 free(ncp->nc_name, M_VFSCACHE);
315 free(ncp, M_VFSCACHE);
316}
690a3127
MD
317
318/*
319 * Ref and deref a namecache structure.
320 */
bc0c094e
MD
321struct namecache *
322cache_hold(struct namecache *ncp)
323{
324 return(_cache_hold(ncp));
325}
326
327void
328cache_drop(struct namecache *ncp)
329{
330 _cache_drop(ncp);
331}
332
14c92d03
MD
333/*
334 * Namespace locking. The caller must already hold a reference to the
21739618
MD
335 * namecache structure in order to lock/unlock it. This function prevents
336 * the namespace from being created or destroyed by accessors other then
337 * the lock holder.
14c92d03 338 *
55361147
MD
339 * Note that holding a locked namecache structure prevents other threads
340 * from making namespace changes (e.g. deleting or creating), prevents
341 * vnode association state changes by other threads, and prevents the
342 * namecache entry from being resolved or unresolved by other threads.
343 *
344 * The lock owner has full authority to associate/disassociate vnodes
345 * and resolve/unresolve the locked ncp.
346 *
9b1b3591
MD
347 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
348 * or recycled, but it does NOT help you if the vnode had already initiated
349 * a recyclement. If this is important, use cache_get() rather then
350 * cache_lock() (and deal with the differences in the way the refs counter
351 * is handled). Or, alternatively, make an unconditional call to
352 * cache_validate() or cache_resolve() after cache_lock() returns.
14c92d03
MD
353 */
354void
355cache_lock(struct namecache *ncp)
356{
55361147
MD
357 thread_t td;
358 int didwarn;
14c92d03
MD
359
360 KKASSERT(ncp->nc_refs != 0);
55361147
MD
361 didwarn = 0;
362 td = curthread;
363
14c92d03
MD
364 for (;;) {
365 if (ncp->nc_exlocks == 0) {
366 ncp->nc_exlocks = 1;
367 ncp->nc_locktd = td;
55361147
MD
368 /*
369 * The vp associated with a locked ncp must be held
370 * to prevent it from being recycled (which would
371 * cause the ncp to become unresolved).
372 *
9b1b3591
MD
373 * WARNING! If VRECLAIMED is set the vnode could
374 * already be in the middle of a recycle. Callers
375 * should not assume that nc_vp is usable when
376 * not NULL. cache_vref() or cache_vget() must be
377 * called.
378 *
55361147
MD
379 * XXX loop on race for later MPSAFE work.
380 */
381 if (ncp->nc_vp)
382 vhold(ncp->nc_vp);
14c92d03
MD
383 break;
384 }
385 if (ncp->nc_locktd == td) {
386 ++ncp->nc_exlocks;
387 break;
388 }
389 ncp->nc_flag |= NCF_LOCKREQ;
fc21741a 390 if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) {
5fd012e0
MD
391 if (didwarn)
392 continue;
393 didwarn = 1;
394 printf("[diagnostic] cache_lock: blocked on %p", ncp);
395 if ((ncp->nc_flag & NCF_MOUNTPT) && ncp->nc_mount)
75ffff0d 396 printf(" [MOUNTFROM %s]\n", ncp->nc_mount->mnt_stat.f_mntfromname);
5fd012e0
MD
397 else
398 printf(" \"%*.*s\"\n",
399 ncp->nc_nlen, ncp->nc_nlen,
400 ncp->nc_name);
14c92d03
MD
401 }
402 }
55361147 403
14c92d03 404 if (didwarn == 1) {
21739618 405 printf("[diagnostic] cache_lock: unblocked %*.*s\n",
14c92d03
MD
406 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
407 }
408}
409
fad57d0e
MD
410int
411cache_lock_nonblock(struct namecache *ncp)
412{
413 thread_t td;
414
415 KKASSERT(ncp->nc_refs != 0);
416 td = curthread;
417 if (ncp->nc_exlocks == 0) {
418 ncp->nc_exlocks = 1;
419 ncp->nc_locktd = td;
420 /*
421 * The vp associated with a locked ncp must be held
422 * to prevent it from being recycled (which would
423 * cause the ncp to become unresolved).
424 *
9b1b3591
MD
425 * WARNING! If VRECLAIMED is set the vnode could
426 * already be in the middle of a recycle. Callers
427 * should not assume that nc_vp is usable when
428 * not NULL. cache_vref() or cache_vget() must be
429 * called.
430 *
fad57d0e
MD
431 * XXX loop on race for later MPSAFE work.
432 */
433 if (ncp->nc_vp)
434 vhold(ncp->nc_vp);
435 return(0);
436 } else {
437 return(EWOULDBLOCK);
438 }
439}
440
14c92d03
MD
441void
442cache_unlock(struct namecache *ncp)
443{
444 thread_t td = curthread;
445
446 KKASSERT(ncp->nc_refs > 0);
447 KKASSERT(ncp->nc_exlocks > 0);
448 KKASSERT(ncp->nc_locktd == td);
449 if (--ncp->nc_exlocks == 0) {
55361147
MD
450 if (ncp->nc_vp)
451 vdrop(ncp->nc_vp);
14c92d03
MD
452 ncp->nc_locktd = NULL;
453 if (ncp->nc_flag & NCF_LOCKREQ) {
454 ncp->nc_flag &= ~NCF_LOCKREQ;
fc21741a 455 wakeup(ncp);
14c92d03
MD
456 }
457 }
458}
459
460/*
690a3127 461 * ref-and-lock, unlock-and-deref functions.
9b1b3591
MD
462 *
463 * This function is primarily used by nlookup. Even though cache_lock
464 * holds the vnode, it is possible that the vnode may have already
465 * initiated a recyclement. We want cache_get() to return a definitively
466 * usable vnode or a definitively unresolved ncp.
14c92d03 467 */
21739618 468struct namecache *
690a3127
MD
469cache_get(struct namecache *ncp)
470{
471 _cache_hold(ncp);
472 cache_lock(ncp);
9b1b3591
MD
473 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
474 cache_setunresolved(ncp);
21739618 475 return(ncp);
690a3127
MD
476}
477
8e005a45
MD
478int
479cache_get_nonblock(struct namecache *ncp)
480{
481 /* XXX MP */
482 if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) {
483 _cache_hold(ncp);
484 cache_lock(ncp);
9b1b3591
MD
485 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
486 cache_setunresolved(ncp);
8e005a45
MD
487 return(0);
488 }
489 return(EWOULDBLOCK);
490}
491
14c92d03
MD
492void
493cache_put(struct namecache *ncp)
494{
495 cache_unlock(ncp);
496 _cache_drop(ncp);
497}
498
690a3127
MD
499/*
500 * Resolve an unresolved ncp by associating a vnode with it. If the
501 * vnode is NULL, a negative cache entry is created.
502 *
503 * The ncp should be locked on entry and will remain locked on return.
504 */
505void
506cache_setvp(struct namecache *ncp, struct vnode *vp)
ce6da7e4 507{
690a3127 508 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
ce6da7e4
MD
509 ncp->nc_vp = vp;
510 if (vp != NULL) {
21739618
MD
511 /*
512 * Any vp associated with an ncp which has children must
55361147 513 * be held. Any vp associated with a locked ncp must be held.
21739618
MD
514 */
515 if (!TAILQ_EMPTY(&ncp->nc_list))
516 vhold(vp);
ce6da7e4 517 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
55361147
MD
518 if (ncp->nc_exlocks)
519 vhold(vp);
21739618
MD
520
521 /*
522 * Set auxillary flags
523 */
690a3127
MD
524 switch(vp->v_type) {
525 case VDIR:
21739618
MD
526 ncp->nc_flag |= NCF_ISDIR;
527 break;
690a3127 528 case VLNK:
21739618
MD
529 ncp->nc_flag |= NCF_ISSYMLINK;
530 /* XXX cache the contents of the symlink */
531 break;
690a3127 532 default:
21739618 533 break;
690a3127 534 }
ce6da7e4 535 ++numcache;
21739618 536 ncp->nc_error = 0;
ce6da7e4 537 } else {
1345c2b6 538 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
ce6da7e4 539 ++numneg;
21739618 540 ncp->nc_error = ENOENT;
ce6da7e4 541 }
690a3127 542 ncp->nc_flag &= ~NCF_UNRESOLVED;
ce6da7e4
MD
543}
544
fad57d0e
MD
545void
546cache_settimeout(struct namecache *ncp, int nticks)
547{
548 if ((ncp->nc_timeout = ticks + nticks) == 0)
549 ncp->nc_timeout = 1;
550}
551
690a3127
MD
552/*
553 * Disassociate the vnode or negative-cache association and mark a
554 * namecache entry as unresolved again. Note that the ncp is still
555 * left in the hash table and still linked to its parent.
556 *
67773eb3
MD
557 * The ncp should be locked and refd on entry and will remain locked and refd
558 * on return.
8c361dda
MD
559 *
560 * This routine is normally never called on a directory containing children.
561 * However, NFS often does just that in its rename() code as a cop-out to
562 * avoid complex namespace operations. This disconnects a directory vnode
563 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
564 * sync.
f2e3ccf2
MD
565 *
566 * NOTE: NCF_FSMID must be cleared so a refurbishment of the ncp, such as
567 * in a create, properly propogates flag up the chain.
690a3127
MD
568 */
569void
570cache_setunresolved(struct namecache *ncp)
14c92d03 571{
690a3127 572 struct vnode *vp;
14c92d03 573
690a3127
MD
574 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
575 ncp->nc_flag |= NCF_UNRESOLVED;
fad57d0e 576 ncp->nc_timeout = 0;
690a3127
MD
577 ncp->nc_error = ENOTCONN;
578 ++numunres;
579 if ((vp = ncp->nc_vp) != NULL) {
580 --numcache;
fad57d0e 581 ncp->nc_vp = NULL;
690a3127 582 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
55361147
MD
583
584 /*
585 * Any vp associated with an ncp with children is
586 * held by that ncp. Any vp associated with a locked
587 * ncp is held by that ncp. These conditions must be
588 * undone when the vp is cleared out from the ncp.
589 */
6b008938
MD
590 if (ncp->nc_flag & NCF_FSMID)
591 vupdatefsmid(vp);
690a3127
MD
592 if (!TAILQ_EMPTY(&ncp->nc_list))
593 vdrop(vp);
55361147
MD
594 if (ncp->nc_exlocks)
595 vdrop(vp);
690a3127
MD
596 } else {
597 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
598 --numneg;
599 }
6b008938
MD
600 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK|
601 NCF_FSMID);
8e005a45
MD
602 }
603}
8c361dda 604
8e005a45 605/*
e09206ba
MD
606 * Invalidate portions of the namecache topology given a starting entry.
607 * The passed ncp is set to an unresolved state and:
8e005a45 608 *
e09206ba
MD
609 * The passed ncp must be locked.
610 *
611 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
612 * that the physical underlying nodes have been
613 * destroyed... as in deleted. For example, when
614 * a directory is removed. This will cause record
615 * lookups on the name to no longer be able to find
616 * the record and tells the resolver to return failure
617 * rather then trying to resolve through the parent.
618 *
619 * The topology itself, including ncp->nc_name,
620 * remains intact.
621 *
622 * This only applies to the passed ncp, if CINV_CHILDREN
623 * is specified the children are not flagged.
624 *
625 * CINV_CHILDREN - Set all children (recursively) to an unresolved
626 * state as well.
627 *
628 * Note that this will also have the side effect of
629 * cleaning out any unreferenced nodes in the topology
630 * from the leaves up as the recursion backs out.
631 *
632 * Note that the topology for any referenced nodes remains intact.
25cb3304
MD
633 *
634 * It is possible for cache_inval() to race a cache_resolve(), meaning that
635 * the namecache entry may not actually be invalidated on return if it was
636 * revalidated while recursing down into its children. This code guarentees
637 * that the node(s) will go through an invalidation cycle, but does not
638 * guarentee that they will remain in an invalidated state.
639 *
640 * Returns non-zero if a revalidation was detected during the invalidation
641 * recursion, zero otherwise. Note that since only the original ncp is
642 * locked the revalidation ultimately can only indicate that the original ncp
643 * *MIGHT* no have been reresolved.
8e005a45 644 */
25cb3304 645int
8e005a45
MD
646cache_inval(struct namecache *ncp, int flags)
647{
648 struct namecache *kid;
b8997912 649 struct namecache *nextkid;
25cb3304 650 int rcnt = 0;
8e005a45 651
e09206ba 652 KKASSERT(ncp->nc_exlocks);
25cb3304 653
e09206ba
MD
654 cache_setunresolved(ncp);
655 if (flags & CINV_DESTROY)
656 ncp->nc_flag |= NCF_DESTROYED;
b8997912 657
e09206ba
MD
658 if ((flags & CINV_CHILDREN) &&
659 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL
660 ) {
661 cache_hold(kid);
662 cache_unlock(ncp);
b8997912
MD
663 while (kid) {
664 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
665 cache_hold(nextkid);
e09206ba
MD
666 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
667 TAILQ_FIRST(&kid->nc_list)
b8997912 668 ) {
e09206ba 669 cache_lock(kid);
25cb3304 670 rcnt += cache_inval(kid, flags & ~CINV_DESTROY);
e09206ba 671 cache_unlock(kid);
b8997912 672 }
b8997912 673 cache_drop(kid);
fad57d0e 674 kid = nextkid;
8e005a45 675 }
e09206ba 676 cache_lock(ncp);
8e005a45 677 }
25cb3304
MD
678
679 /*
680 * Someone could have gotten in there while ncp was unlocked,
681 * retry if so.
682 */
683 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
684 ++rcnt;
685 return (rcnt);
8e005a45
MD
686}
687
e09206ba 688/*
25cb3304
MD
689 * Invalidate a vnode's namecache associations. To avoid races against
690 * the resolver we do not invalidate a node which we previously invalidated
691 * but which was then re-resolved while we were in the invalidation loop.
692 *
693 * Returns non-zero if any namecache entries remain after the invalidation
694 * loop completed.
2aefb2c5
MD
695 *
696 * NOTE: unlike the namecache topology which guarentees that ncp's will not
697 * be ripped out of the topology while held, the vnode's v_namecache list
698 * has no such restriction. NCP's can be ripped out of the list at virtually
699 * any time if not locked, even if held.
e09206ba 700 */
25cb3304 701int
6b008938 702cache_inval_vp(struct vnode *vp, int flags)
8e005a45
MD
703{
704 struct namecache *ncp;
25cb3304
MD
705 struct namecache *next;
706
2aefb2c5 707restart:
25cb3304
MD
708 ncp = TAILQ_FIRST(&vp->v_namecache);
709 if (ncp)
710 cache_hold(ncp);
711 while (ncp) {
712 /* loop entered with ncp held */
2aefb2c5 713 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
25cb3304
MD
714 cache_hold(next);
715 cache_lock(ncp);
2aefb2c5
MD
716 if (ncp->nc_vp != vp) {
717 printf("Warning: cache_inval_vp: race-A detected on "
718 "%s\n", ncp->nc_name);
719 cache_put(ncp);
69313361
MD
720 if (next)
721 cache_drop(next);
2aefb2c5
MD
722 goto restart;
723 }
e09206ba 724 cache_inval(ncp, flags);
25cb3304
MD
725 cache_put(ncp); /* also releases reference */
726 ncp = next;
2aefb2c5
MD
727 if (ncp && ncp->nc_vp != vp) {
728 printf("Warning: cache_inval_vp: race-B detected on "
729 "%s\n", ncp->nc_name);
730 cache_drop(ncp);
731 goto restart;
732 }
690a3127 733 }
25cb3304 734 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
14c92d03 735}
14c92d03 736
fad57d0e
MD
737/*
738 * The source ncp has been renamed to the target ncp. Both fncp and tncp
739 * must be locked. Both will be set to unresolved, any children of tncp
740 * will be disconnected (the prior contents of the target is assumed to be
741 * destroyed by the rename operation, e.g. renaming over an empty directory),
742 * and all children of fncp will be moved to tncp.
743 *
e09206ba
MD
744 * XXX the disconnection could pose a problem, check code paths to make
745 * sure any code that blocks can handle the parent being changed out from
746 * under it. Maybe we should lock the children (watch out for deadlocks) ?
747 *
fad57d0e
MD
748 * After we return the caller has the option of calling cache_setvp() if
749 * the vnode of the new target ncp is known.
750 *
751 * Any process CD'd into any of the children will no longer be able to ".."
752 * back out. An rm -rf can cause this situation to occur.
753 */
754void
755cache_rename(struct namecache *fncp, struct namecache *tncp)
756{
757 struct namecache *scan;
25cb3304 758 int didwarn = 0;
fad57d0e
MD
759
760 cache_setunresolved(fncp);
761 cache_setunresolved(tncp);
25cb3304
MD
762 while (cache_inval(tncp, CINV_CHILDREN) != 0) {
763 if (didwarn++ % 10 == 0) {
764 printf("Warning: cache_rename: race during "
765 "rename %s->%s\n",
766 fncp->nc_name, tncp->nc_name);
767 }
768 tsleep(tncp, 0, "mvrace", hz / 10);
769 cache_setunresolved(tncp);
770 }
fad57d0e
MD
771 while ((scan = TAILQ_FIRST(&fncp->nc_list)) != NULL) {
772 cache_hold(scan);
773 cache_unlink_parent(scan);
774 cache_link_parent(scan, tncp);
775 if (scan->nc_flag & NCF_HASHED)
776 cache_rehash(scan);
777 cache_drop(scan);
778 }
779}
780
21739618
MD
781/*
782 * vget the vnode associated with the namecache entry. Resolve the namecache
783 * entry if necessary and deal with namecache/vp races. The passed ncp must
784 * be referenced and may be locked. The ncp's ref/locking state is not
785 * effected by this call.
786 *
787 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
788 * (depending on the passed lk_type) will be returned in *vpp with an error
789 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
790 * most typical error is ENOENT, meaning that the ncp represents a negative
791 * cache hit and there is no vnode to retrieve, but other errors can occur
792 * too.
793 *
794 * The main race we have to deal with are namecache zaps. The ncp itself
795 * will not disappear since it is referenced, and it turns out that the
796 * validity of the vp pointer can be checked simply by rechecking the
797 * contents of ncp->nc_vp.
798 */
799int
800cache_vget(struct namecache *ncp, struct ucred *cred,
801 int lk_type, struct vnode **vpp)
802{
803 struct vnode *vp;
804 int error;
805
806again:
807 vp = NULL;
808 if (ncp->nc_flag & NCF_UNRESOLVED) {
809 cache_lock(ncp);
810 error = cache_resolve(ncp, cred);
811 cache_unlock(ncp);
812 } else {
813 error = 0;
814 }
815 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
9b1b3591
MD
816 /*
817 * Accessing the vnode from the namecache is a bit
818 * dangerous. Because there are no refs on the vnode, it
819 * could be in the middle of a reclaim.
820 */
821 if (vp->v_flag & VRECLAIMED) {
822 printf("Warning: vnode reclaim race detected in cache_vget on %p (%s)\n", vp, ncp->nc_name);
823 cache_lock(ncp);
824 cache_setunresolved(ncp);
825 cache_unlock(ncp);
826 goto again;
827 }
5fd012e0 828 error = vget(vp, lk_type, curthread);
21739618 829 if (error) {
9b1b3591 830 if (vp != ncp->nc_vp)
21739618
MD
831 goto again;
832 vp = NULL;
9b1b3591 833 } else if (vp != ncp->nc_vp) {
21739618
MD
834 vput(vp);
835 goto again;
9b1b3591
MD
836 } else if (vp->v_flag & VRECLAIMED) {
837 panic("vget succeeded on a VRECLAIMED node! vp %p", vp);
21739618
MD
838 }
839 }
840 if (error == 0 && vp == NULL)
841 error = ENOENT;
842 *vpp = vp;
843 return(error);
844}
845
846int
847cache_vref(struct namecache *ncp, struct ucred *cred, struct vnode **vpp)
848{
849 struct vnode *vp;
850 int error;
851
852again:
853 vp = NULL;
854 if (ncp->nc_flag & NCF_UNRESOLVED) {
855 cache_lock(ncp);
856 error = cache_resolve(ncp, cred);
857 cache_unlock(ncp);
858 } else {
859 error = 0;
860 }
861 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
9b1b3591
MD
862 /*
863 * Since we did not obtain any locks, a cache zap
864 * race can occur here if the vnode is in the middle
865 * of being reclaimed and has not yet been able to
866 * clean out its cache node. If that case occurs,
867 * we must lock and unresolve the cache, then loop
868 * to retry.
869 */
870 if (vp->v_flag & VRECLAIMED) {
871 printf("Warning: vnode reclaim race detected on cache_vref %p (%s)\n", vp, ncp->nc_name);
872 cache_lock(ncp);
873 cache_setunresolved(ncp);
874 cache_unlock(ncp);
21739618
MD
875 goto again;
876 }
9b1b3591 877 vref(vp);
21739618
MD
878 }
879 if (error == 0 && vp == NULL)
880 error = ENOENT;
881 *vpp = vp;
882 return(error);
883}
884
dc1be39c
MD
885/*
886 * Recursively set the FSMID update flag for namecache nodes leading
f2e3ccf2
MD
887 * to root. This will cause the next getattr or reclaim to increment the
888 * fsmid and mark the inode for lazy updating.
889 *
890 * Stop recursing when we hit a node whos NCF_FSMID flag is already set.
891 * This makes FSMIDs work in an Einsteinian fashion - where the observation
892 * effects the result. In this case a program monitoring a higher level
893 * node will have detected some prior change and started its scan (clearing
894 * NCF_FSMID in higher level nodes), but since it has not yet observed the
895 * node where we find NCF_FSMID still set, we can safely make the related
896 * modification without interfering with the theorized program.
897 *
898 * This also means that FSMIDs cannot represent time-domain quantities
899 * in a hierarchical sense. But the main reason for doing it this way
900 * is to reduce the amount of recursion that occurs in the critical path
901 * when e.g. a program is writing to a file that sits deep in a directory
902 * hierarchy.
dc1be39c 903 */
7d15906a
MD
904void
905cache_update_fsmid(struct namecache *ncp)
906{
907 struct vnode *vp;
908 struct namecache *scan;
7d15906a 909
9b1b3591
MD
910 /*
911 * Warning: even if we get a non-NULL vp it could still be in the
912 * middle of a recyclement. Don't do anything fancy, just set
913 * NCF_FSMID.
914 */
7d15906a
MD
915 if ((vp = ncp->nc_vp) != NULL) {
916 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
f2e3ccf2
MD
917 for (scan = ncp; scan; scan = scan->nc_parent) {
918 if (scan->nc_flag & NCF_FSMID)
919 break;
dc1be39c 920 scan->nc_flag |= NCF_FSMID;
f2e3ccf2 921 }
7d15906a
MD
922 }
923 } else {
f2e3ccf2 924 while (ncp && (ncp->nc_flag & NCF_FSMID) == 0) {
dc1be39c 925 ncp->nc_flag |= NCF_FSMID;
7d15906a
MD
926 ncp = ncp->nc_parent;
927 }
928 }
929}
930
931void
932cache_update_fsmid_vp(struct vnode *vp)
933{
934 struct namecache *ncp;
935 struct namecache *scan;
7d15906a
MD
936
937 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
f2e3ccf2
MD
938 for (scan = ncp; scan; scan = scan->nc_parent) {
939 if (scan->nc_flag & NCF_FSMID)
940 break;
dc1be39c 941 scan->nc_flag |= NCF_FSMID;
f2e3ccf2 942 }
7d15906a
MD
943 }
944}
945
dc1be39c
MD
946/*
947 * If getattr is called on a vnode (e.g. a stat call), the filesystem
948 * may call this routine to determine if the namecache has the hierarchical
949 * change flag set, requiring the fsmid to be updated.
950 *
951 * Since 0 indicates no support, make sure the filesystem fsmid is at least
952 * 1.
953 */
954int
955cache_check_fsmid_vp(struct vnode *vp, int64_t *fsmid)
956{
957 struct namecache *ncp;
958 int changed = 0;
959
960 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
961 if (ncp->nc_flag & NCF_FSMID) {
962 ncp->nc_flag &= ~NCF_FSMID;
963 changed = 1;
964 }
965 }
966 if (*fsmid == 0)
967 ++*fsmid;
968 if (changed)
969 ++*fsmid;
970 return(changed);
971}
972
fad57d0e
MD
973/*
974 * Convert a directory vnode to a namecache record without any other
975 * knowledge of the topology. This ONLY works with directory vnodes and
976 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
977 * returned ncp (if not NULL) will be held and unlocked.
978 *
979 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
980 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
981 * for dvp. This will fail only if the directory has been deleted out from
982 * under the caller.
983 *
984 * Callers must always check for a NULL return no matter the value of 'makeit'.
a0d57516
MD
985 *
986 * To avoid underflowing the kernel stack each recursive call increments
987 * the makeit variable.
fad57d0e
MD
988 */
989
990static int cache_inefficient_scan(struct namecache *ncp, struct ucred *cred,
991 struct vnode *dvp);
a0d57516 992static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
cc4c3b52 993 struct vnode **saved_dvp);
fad57d0e
MD
994
995struct namecache *
996cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit)
997{
998 struct namecache *ncp;
cc4c3b52 999 struct vnode *saved_dvp;
fad57d0e
MD
1000 struct vnode *pvp;
1001 int error;
1002
a0d57516 1003 ncp = NULL;
cc4c3b52 1004 saved_dvp = NULL;
a0d57516 1005
fad57d0e
MD
1006 /*
1007 * Temporary debugging code to force the directory scanning code
1008 * to be exercised.
1009 */
fad57d0e
MD
1010 if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) {
1011 ncp = TAILQ_FIRST(&dvp->v_namecache);
1012 printf("cache_fromdvp: forcing %s\n", ncp->nc_name);
1013 goto force;
1014 }
1015
1016 /*
1017 * Loop until resolution, inside code will break out on error.
1018 */
1019 while ((ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) {
1020force:
1021 /*
1022 * If dvp is the root of its filesystem it should already
1023 * have a namecache pointer associated with it as a side
1024 * effect of the mount, but it may have been disassociated.
1025 */
1026 if (dvp->v_flag & VROOT) {
1027 ncp = cache_get(dvp->v_mount->mnt_ncp);
1028 error = cache_resolve_mp(ncp);
1029 cache_put(ncp);
1030 if (ncvp_debug) {
1031 printf("cache_fromdvp: resolve root of mount %p error %d",
1032 dvp->v_mount, error);
1033 }
1034 if (error) {
1035 if (ncvp_debug)
1036 printf(" failed\n");
1037 ncp = NULL;
1038 break;
1039 }
1040 if (ncvp_debug)
1041 printf(" succeeded\n");
1042 continue;
1043 }
1044
a0d57516
MD
1045 /*
1046 * If we are recursed too deeply resort to an O(n^2)
1047 * algorithm to resolve the namecache topology. The
cc4c3b52 1048 * resolved pvp is left referenced in saved_dvp to
a0d57516
MD
1049 * prevent the tree from being destroyed while we loop.
1050 */
1051 if (makeit > 20) {
cc4c3b52 1052 error = cache_fromdvp_try(dvp, cred, &saved_dvp);
a0d57516
MD
1053 if (error) {
1054 printf("lookupdotdot(longpath) failed %d "
1055 "dvp %p\n", error, dvp);
1056 break;
1057 }
1058 continue;
1059 }
1060
fad57d0e
MD
1061 /*
1062 * Get the parent directory and resolve its ncp.
1063 */
6ddb7618 1064 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred);
fad57d0e 1065 if (error) {
a0d57516 1066 printf("lookupdotdot failed %d dvp %p\n", error, dvp);
fad57d0e
MD
1067 break;
1068 }
ca466bae 1069 VOP_UNLOCK(pvp, 0);
fad57d0e
MD
1070
1071 /*
a0d57516 1072 * Reuse makeit as a recursion depth counter.
fad57d0e 1073 */
a0d57516 1074 ncp = cache_fromdvp(pvp, cred, makeit + 1);
fad57d0e
MD
1075 vrele(pvp);
1076 if (ncp == NULL)
1077 break;
1078
1079 /*
1080 * Do an inefficient scan of pvp (embodied by ncp) to look
1081 * for dvp. This will create a namecache record for dvp on
1082 * success. We loop up to recheck on success.
1083 *
1084 * ncp and dvp are both held but not locked.
1085 */
1086 error = cache_inefficient_scan(ncp, cred, dvp);
1087 cache_drop(ncp);
1088 if (error) {
1089 printf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
1090 pvp, ncp->nc_name, dvp);
1091 ncp = NULL;
1092 break;
1093 }
1094 if (ncvp_debug) {
1095 printf("cache_fromdvp: scan %p (%s) succeeded\n",
1096 pvp, ncp->nc_name);
1097 }
1098 }
1099 if (ncp)
1100 cache_hold(ncp);
cc4c3b52
MD
1101 if (saved_dvp)
1102 vrele(saved_dvp);
fad57d0e
MD
1103 return (ncp);
1104}
1105
a0d57516
MD
1106/*
1107 * Go up the chain of parent directories until we find something
1108 * we can resolve into the namecache. This is very inefficient.
1109 */
1110static
1111int
1112cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
cc4c3b52 1113 struct vnode **saved_dvp)
a0d57516
MD
1114{
1115 struct namecache *ncp;
1116 struct vnode *pvp;
1117 int error;
1118 static time_t last_fromdvp_report;
1119
1120 /*
1121 * Loop getting the parent directory vnode until we get something we
1122 * can resolve in the namecache.
1123 */
1124 vref(dvp);
1125 for (;;) {
1126 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred);
1127 if (error) {
1128 vrele(dvp);
1129 return (error);
1130 }
ca466bae 1131 VOP_UNLOCK(pvp, 0);
a0d57516
MD
1132 if ((ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
1133 cache_hold(ncp);
1134 vrele(pvp);
1135 break;
1136 }
1137 if (pvp->v_flag & VROOT) {
1138 ncp = cache_get(pvp->v_mount->mnt_ncp);
1139 error = cache_resolve_mp(ncp);
1140 cache_unlock(ncp);
1141 vrele(pvp);
1142 if (error) {
1143 cache_drop(ncp);
1144 vrele(dvp);
1145 return (error);
1146 }
1147 break;
1148 }
1149 vrele(dvp);
1150 dvp = pvp;
1151 }
1152 if (last_fromdvp_report != time_second) {
1153 last_fromdvp_report = time_second;
1154 printf("Warning: extremely inefficient path resolution on %s\n",
1155 ncp->nc_name);
1156 }
1157 error = cache_inefficient_scan(ncp, cred, dvp);
cc4c3b52
MD
1158
1159 /*
1160 * Hopefully dvp now has a namecache record associated with it.
1161 * Leave it referenced to prevent the kernel from recycling the
1162 * vnode. Otherwise extremely long directory paths could result
1163 * in endless recycling.
1164 */
1165 if (*saved_dvp)
1166 vrele(*saved_dvp);
1167 *saved_dvp = dvp;
a0d57516
MD
1168 return (error);
1169}
1170
1171
fad57d0e
MD
1172/*
1173 * Do an inefficient scan of the directory represented by ncp looking for
1174 * the directory vnode dvp. ncp must be held but not locked on entry and
1175 * will be held on return. dvp must be refd but not locked on entry and
1176 * will remain refd on return.
1177 *
1178 * Why do this at all? Well, due to its stateless nature the NFS server
1179 * converts file handles directly to vnodes without necessarily going through
1180 * the namecache ops that would otherwise create the namecache topology
1181 * leading to the vnode. We could either (1) Change the namecache algorithms
1182 * to allow disconnect namecache records that are re-merged opportunistically,
1183 * or (2) Make the NFS server backtrack and scan to recover a connected
1184 * namecache topology in order to then be able to issue new API lookups.
1185 *
1186 * It turns out that (1) is a huge mess. It takes a nice clean set of
1187 * namecache algorithms and introduces a lot of complication in every subsystem
1188 * that calls into the namecache to deal with the re-merge case, especially
1189 * since we are using the namecache to placehold negative lookups and the
1190 * vnode might not be immediately assigned. (2) is certainly far less
1191 * efficient then (1), but since we are only talking about directories here
1192 * (which are likely to remain cached), the case does not actually run all
1193 * that often and has the supreme advantage of not polluting the namecache
1194 * algorithms.
1195 */
1196static int
1197cache_inefficient_scan(struct namecache *ncp, struct ucred *cred,
1198 struct vnode *dvp)
1199{
1200 struct nlcomponent nlc;
1201 struct namecache *rncp;
1202 struct dirent *den;
1203 struct vnode *pvp;
1204 struct vattr vat;
1205 struct iovec iov;
1206 struct uio uio;
fad57d0e
MD
1207 int blksize;
1208 int eofflag;
4d22f42a 1209 int bytes;
fad57d0e
MD
1210 char *rbuf;
1211 int error;
fad57d0e
MD
1212
1213 vat.va_blocksize = 0;
1214 if ((error = VOP_GETATTR(dvp, &vat, curthread)) != 0)
1215 return (error);
1216 if ((error = cache_vget(ncp, cred, LK_SHARED, &pvp)) != 0)
1217 return (error);
1218 if (ncvp_debug)
1219 printf("inefficient_scan: directory iosize %ld vattr fileid = %ld\n", vat.va_blocksize, (long)vat.va_fileid);
1220 if ((blksize = vat.va_blocksize) == 0)
1221 blksize = DEV_BSIZE;
1222 rbuf = malloc(blksize, M_TEMP, M_WAITOK);
1223 rncp = NULL;
1224
1225 eofflag = 0;
1226 uio.uio_offset = 0;
fad57d0e 1227again:
fad57d0e
MD
1228 iov.iov_base = rbuf;
1229 iov.iov_len = blksize;
1230 uio.uio_iov = &iov;
1231 uio.uio_iovcnt = 1;
1232 uio.uio_resid = blksize;
1233 uio.uio_segflg = UIO_SYSSPACE;
1234 uio.uio_rw = UIO_READ;
1235 uio.uio_td = curthread;
1236
fad57d0e 1237 if (ncvp_debug >= 2)
4d22f42a
MD
1238 printf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
1239 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
fad57d0e 1240 if (error == 0) {
4d22f42a
MD
1241 den = (struct dirent *)rbuf;
1242 bytes = blksize - uio.uio_resid;
1243
1244 while (bytes > 0) {
1245 if (ncvp_debug >= 2) {
fad57d0e 1246 printf("cache_inefficient_scan: %*.*s\n",
4d22f42a
MD
1247 den->d_namlen, den->d_namlen,
1248 den->d_name);
1249 }
fad57d0e 1250 if (den->d_type != DT_WHT &&
01f31ab3 1251 den->d_ino == vat.va_fileid) {
4d22f42a
MD
1252 if (ncvp_debug) {
1253 printf("cache_inefficient_scan: "
1254 "MATCHED inode %ld path %s/%*.*s\n",
1255 vat.va_fileid, ncp->nc_name,
1256 den->d_namlen, den->d_namlen,
1257 den->d_name);
1258 }
fad57d0e
MD
1259 nlc.nlc_nameptr = den->d_name;
1260 nlc.nlc_namelen = den->d_namlen;
ca466bae 1261 VOP_UNLOCK(pvp, 0);
fad57d0e
MD
1262 rncp = cache_nlookup(ncp, &nlc);
1263 KKASSERT(rncp != NULL);
1264 break;
1265 }
01f31ab3
JS
1266 bytes -= _DIRENT_DIRSIZ(den);
1267 den = _DIRENT_NEXT(den);
fad57d0e
MD
1268 }
1269 if (rncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
1270 goto again;
1271 }
fad57d0e
MD
1272 if (rncp) {
1273 vrele(pvp);
1274 if (rncp->nc_flag & NCF_UNRESOLVED) {
1275 cache_setvp(rncp, dvp);
1276 if (ncvp_debug >= 2) {
1277 printf("cache_inefficient_scan: setvp %s/%s = %p\n",
1278 ncp->nc_name, rncp->nc_name, dvp);
1279 }
1280 } else {
1281 if (ncvp_debug >= 2) {
1282 printf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
1283 ncp->nc_name, rncp->nc_name, dvp,
1284 rncp->nc_vp);
1285 }
1286 }
1287 if (rncp->nc_vp == NULL)
1288 error = rncp->nc_error;
1289 cache_put(rncp);
1290 } else {
1291 printf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
1292 dvp, ncp->nc_name);
1293 vput(pvp);
1294 error = ENOENT;
1295 }
1296 free(rbuf, M_TEMP);
1297 return (error);
1298}
1299
984263bc 1300/*
67773eb3
MD
1301 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
1302 * state, which disassociates it from its vnode or ncneglist.
7ea21ed1 1303 *
67773eb3
MD
1304 * Then, if there are no additional references to the ncp and no children,
1305 * the ncp is removed from the topology and destroyed. This function will
1306 * also run through the nc_parent chain and destroy parent ncps if possible.
1307 * As a side benefit, it turns out the only conditions that allow running
1308 * up the chain are also the conditions to ensure no deadlock will occur.
7ea21ed1 1309 *
67773eb3
MD
1310 * References and/or children may exist if the ncp is in the middle of the
1311 * topology, preventing the ncp from being destroyed.
7ea21ed1 1312 *
67773eb3
MD
1313 * This function must be called with the ncp held and locked and will unlock
1314 * and drop it during zapping.
984263bc
MD
1315 */
1316static void
8987aad7 1317cache_zap(struct namecache *ncp)
984263bc 1318{
7ea21ed1 1319 struct namecache *par;
7ea21ed1
MD
1320
1321 /*
1322 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
1323 */
690a3127 1324 cache_setunresolved(ncp);
7ea21ed1
MD
1325
1326 /*
1327 * Try to scrap the entry and possibly tail-recurse on its parent.
1328 * We only scrap unref'd (other then our ref) unresolved entries,
1329 * we do not scrap 'live' entries.
1330 */
1331 while (ncp->nc_flag & NCF_UNRESOLVED) {
1332 /*
1333 * Someone other then us has a ref, stop.
1334 */
1335 if (ncp->nc_refs > 1)
1336 goto done;
1337
1338 /*
1339 * We have children, stop.
1340 */
1341 if (!TAILQ_EMPTY(&ncp->nc_list))
1342 goto done;
1343
67773eb3
MD
1344 /*
1345 * Remove ncp from the topology: hash table and parent linkage.
1346 */
7ea21ed1
MD
1347 if (ncp->nc_flag & NCF_HASHED) {
1348 ncp->nc_flag &= ~NCF_HASHED;
1349 LIST_REMOVE(ncp, nc_hash);
1350 }
7ea21ed1
MD
1351 if ((par = ncp->nc_parent) != NULL) {
1352 par = cache_hold(par);
1353 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
67773eb3 1354 ncp->nc_parent = NULL;
7ea21ed1
MD
1355 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
1356 vdrop(par->nc_vp);
1357 }
67773eb3
MD
1358
1359 /*
1360 * ncp should not have picked up any refs. Physically
1361 * destroy the ncp.
1362 */
1363 KKASSERT(ncp->nc_refs == 1);
f517a1bb 1364 --numunres;
67773eb3 1365 /* cache_unlock(ncp) not required */
7ea21ed1 1366 ncp->nc_refs = -1; /* safety */
7ea21ed1
MD
1367 if (ncp->nc_name)
1368 free(ncp->nc_name, M_VFSCACHE);
1369 free(ncp, M_VFSCACHE);
67773eb3
MD
1370
1371 /*
1372 * Loop on the parent (it may be NULL). Only bother looping
1373 * if the parent has a single ref (ours), which also means
1374 * we can lock it trivially.
1375 */
1376 ncp = par;
1377 if (ncp == NULL)
1378 return;
1379 if (ncp->nc_refs != 1) {
1380 cache_drop(ncp);
8c361dda 1381 return;
67773eb3
MD
1382 }
1383 KKASSERT(par->nc_exlocks == 0);
1384 cache_lock(ncp);
7ea21ed1
MD
1385 }
1386done:
67773eb3 1387 cache_unlock(ncp);
7ea21ed1 1388 --ncp->nc_refs;
984263bc
MD
1389}
1390
62d0f1f0
MD
1391static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW;
1392
1393static __inline
1394void
1395cache_hysteresis(void)
1396{
1397 /*
1398 * Don't cache too many negative hits. We use hysteresis to reduce
1399 * the impact on the critical path.
1400 */
1401 switch(cache_hysteresis_state) {
1402 case CHI_LOW:
1403 if (numneg > MINNEG && numneg * ncnegfactor > numcache) {
1404 cache_cleanneg(10);
1405 cache_hysteresis_state = CHI_HIGH;
1406 }
1407 break;
1408 case CHI_HIGH:
1409 if (numneg > MINNEG * 9 / 10 &&
1410 numneg * ncnegfactor * 9 / 10 > numcache
1411 ) {
1412 cache_cleanneg(10);
1413 } else {
1414 cache_hysteresis_state = CHI_LOW;
1415 }
1416 break;
1417 }
1418}
1419
14c92d03
MD
1420/*
1421 * NEW NAMECACHE LOOKUP API
1422 *
1423 * Lookup an entry in the cache. A locked, referenced, non-NULL
1424 * entry is *always* returned, even if the supplied component is illegal.
fad57d0e 1425 * The resulting namecache entry should be returned to the system with
14c92d03
MD
1426 * cache_put() or cache_unlock() + cache_drop().
1427 *
1428 * namecache locks are recursive but care must be taken to avoid lock order
1429 * reversals.
1430 *
1431 * Nobody else will be able to manipulate the associated namespace (e.g.
1432 * create, delete, rename, rename-target) until the caller unlocks the
1433 * entry.
1434 *
1435 * The returned entry will be in one of three states: positive hit (non-null
1436 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
1437 * Unresolved entries must be resolved through the filesystem to associate the
1438 * vnode and/or determine whether a positive or negative hit has occured.
1439 *
1440 * It is not necessary to lock a directory in order to lock namespace under
1441 * that directory. In fact, it is explicitly not allowed to do that. A
1442 * directory is typically only locked when being created, renamed, or
1443 * destroyed.
1444 *
1445 * The directory (par) may be unresolved, in which case any returned child
1446 * will likely also be marked unresolved. Likely but not guarenteed. Since
fad57d0e
MD
1447 * the filesystem lookup requires a resolved directory vnode the caller is
1448 * responsible for resolving the namecache chain top-down. This API
14c92d03
MD
1449 * specifically allows whole chains to be created in an unresolved state.
1450 */
1451struct namecache *
690a3127 1452cache_nlookup(struct namecache *par, struct nlcomponent *nlc)
14c92d03 1453{
690a3127
MD
1454 struct namecache *ncp;
1455 struct namecache *new_ncp;
1456 struct nchashhead *nchpp;
1457 u_int32_t hash;
1458 globaldata_t gd;
1459
1460 numcalls++;
1461 gd = mycpu;
1462
690a3127
MD
1463 /*
1464 * Try to locate an existing entry
1465 */
1466 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
1467 hash = fnv_32_buf(&par, sizeof(par), hash);
1468 new_ncp = NULL;
1469restart:
1470 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1471 numchecks++;
1472
1473 /*
1474 * Zap entries that have timed out.
1475 */
1476 if (ncp->nc_timeout &&
67773eb3
MD
1477 (int)(ncp->nc_timeout - ticks) < 0 &&
1478 (ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
1479 ncp->nc_exlocks == 0
690a3127 1480 ) {
67773eb3 1481 cache_zap(cache_get(ncp));
690a3127
MD
1482 goto restart;
1483 }
1484
1485 /*
1486 * Break out if we find a matching entry. Note that
e09206ba
MD
1487 * UNRESOLVED entries may match, but DESTROYED entries
1488 * do not.
690a3127
MD
1489 */
1490 if (ncp->nc_parent == par &&
1491 ncp->nc_nlen == nlc->nlc_namelen &&
e09206ba
MD
1492 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
1493 (ncp->nc_flag & NCF_DESTROYED) == 0
690a3127 1494 ) {
67773eb3
MD
1495 if (cache_get_nonblock(ncp) == 0) {
1496 if (new_ncp)
1497 cache_free(new_ncp);
1498 goto found;
1499 }
8c361dda 1500 cache_get(ncp);
67773eb3
MD
1501 cache_put(ncp);
1502 goto restart;
690a3127
MD
1503 }
1504 }
1505
1506 /*
1507 * We failed to locate an entry, create a new entry and add it to
1508 * the cache. We have to relookup after possibly blocking in
1509 * malloc.
1510 */
1511 if (new_ncp == NULL) {
524c845c 1512 new_ncp = cache_alloc(nlc->nlc_namelen);
690a3127
MD
1513 goto restart;
1514 }
1515
1516 ncp = new_ncp;
1517
1518 /*
1519 * Initialize as a new UNRESOLVED entry, lock (non-blocking),
fad57d0e
MD
1520 * and link to the parent. The mount point is usually inherited
1521 * from the parent unless this is a special case such as a mount
1522 * point where nlc_namelen is 0. The caller is responsible for
1523 * setting nc_mount in that case. If nlc_namelen is 0 nc_name will
1524 * be NULL.
690a3127 1525 */
4fcb1cf7
MD
1526 if (nlc->nlc_namelen) {
1527 bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen);
fad57d0e 1528 ncp->nc_name[nlc->nlc_namelen] = 0;
4fcb1cf7
MD
1529 ncp->nc_mount = par->nc_mount;
1530 }
690a3127
MD
1531 nchpp = NCHHASH(hash);
1532 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
1533 ncp->nc_flag |= NCF_HASHED;
690a3127 1534 cache_link_parent(ncp, par);
690a3127 1535found:
fad57d0e
MD
1536 /*
1537 * stats and namecache size management
1538 */
1539 if (ncp->nc_flag & NCF_UNRESOLVED)
1540 ++gd->gd_nchstats->ncs_miss;
1541 else if (ncp->nc_vp)
1542 ++gd->gd_nchstats->ncs_goodhits;
1543 else
1544 ++gd->gd_nchstats->ncs_neghits;
62d0f1f0 1545 cache_hysteresis();
690a3127
MD
1546 return(ncp);
1547}
1548
9b1b3591
MD
1549/*
1550 * Given a locked ncp, validate that the vnode, if present, is actually
1551 * usable. If it is not usable set the ncp to an unresolved state.
1552 */
1553void
1554cache_validate(struct namecache *ncp)
1555{
1556 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1557 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1558 cache_setunresolved(ncp);
1559 }
1560}
1561
690a3127 1562/*
21739618 1563 * Resolve an unresolved namecache entry, generally by looking it up.
67773eb3 1564 * The passed ncp must be locked and refd.
21739618
MD
1565 *
1566 * Theoretically since a vnode cannot be recycled while held, and since
1567 * the nc_parent chain holds its vnode as long as children exist, the
1568 * direct parent of the cache entry we are trying to resolve should
1569 * have a valid vnode. If not then generate an error that we can
1570 * determine is related to a resolver bug.
fad57d0e 1571 *
9b1b3591
MD
1572 * However, if a vnode was in the middle of a recyclement when the NCP
1573 * got locked, ncp->nc_vp might point to a vnode that is about to become
1574 * invalid. cache_resolve() handles this case by unresolving the entry
1575 * and then re-resolving it.
1576 *
fad57d0e
MD
1577 * Note that successful resolution does not necessarily return an error
1578 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
1579 * will be returned.
690a3127
MD
1580 */
1581int
21739618 1582cache_resolve(struct namecache *ncp, struct ucred *cred)
690a3127 1583{
21739618 1584 struct namecache *par;
67773eb3 1585 int error;
8e005a45 1586
67773eb3 1587restart:
8e005a45 1588 /*
9b1b3591
MD
1589 * If the ncp is already resolved we have nothing to do. However,
1590 * we do want to guarentee that a usable vnode is returned when
1591 * a vnode is present, so make sure it hasn't been reclaimed.
8e005a45 1592 */
9b1b3591
MD
1593 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1594 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1595 cache_setunresolved(ncp);
1596 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1597 return (ncp->nc_error);
1598 }
21739618 1599
646a1cda
MD
1600 /*
1601 * Mount points need special handling because the parent does not
1602 * belong to the same filesystem as the ncp.
1603 */
8e005a45 1604 if (ncp->nc_flag & NCF_MOUNTPT)
646a1cda 1605 return (cache_resolve_mp(ncp));
646a1cda
MD
1606
1607 /*
1608 * We expect an unbroken chain of ncps to at least the mount point,
1609 * and even all the way to root (but this code doesn't have to go
1610 * past the mount point).
1611 */
1612 if (ncp->nc_parent == NULL) {
8e005a45 1613 printf("EXDEV case 1 %p %*.*s\n", ncp,
646a1cda 1614 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
21739618 1615 ncp->nc_error = EXDEV;
646a1cda
MD
1616 return(ncp->nc_error);
1617 }
1618
1619 /*
1620 * The vp's of the parent directories in the chain are held via vhold()
1621 * due to the existance of the child, and should not disappear.
1622 * However, there are cases where they can disappear:
1623 *
1624 * - due to filesystem I/O errors.
1625 * - due to NFS being stupid about tracking the namespace and
1626 * destroys the namespace for entire directories quite often.
1627 * - due to forced unmounts.
e09206ba 1628 * - due to an rmdir (parent will be marked DESTROYED)
646a1cda
MD
1629 *
1630 * When this occurs we have to track the chain backwards and resolve
1631 * it, looping until the resolver catches up to the current node. We
1632 * could recurse here but we might run ourselves out of kernel stack
1633 * so we do it in a more painful manner. This situation really should
1634 * not occur all that often, or if it does not have to go back too
1635 * many nodes to resolve the ncp.
1636 */
1637 while (ncp->nc_parent->nc_vp == NULL) {
e09206ba
MD
1638 /*
1639 * This case can occur if a process is CD'd into a
1640 * directory which is then rmdir'd. If the parent is marked
1641 * destroyed there is no point trying to resolve it.
1642 */
1643 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
1644 return(ENOENT);
1645
646a1cda
MD
1646 par = ncp->nc_parent;
1647 while (par->nc_parent && par->nc_parent->nc_vp == NULL)
1648 par = par->nc_parent;
1649 if (par->nc_parent == NULL) {
1650 printf("EXDEV case 2 %*.*s\n",
1651 par->nc_nlen, par->nc_nlen, par->nc_name);
1652 return (EXDEV);
1653 }
1654 printf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
1655 par->nc_nlen, par->nc_nlen, par->nc_name);
1656 /*
67773eb3
MD
1657 * The parent is not set in stone, ref and lock it to prevent
1658 * it from disappearing. Also note that due to renames it
1659 * is possible for our ncp to move and for par to no longer
1660 * be one of its parents. We resolve it anyway, the loop
1661 * will handle any moves.
646a1cda
MD
1662 */
1663 cache_get(par);
1664 if (par->nc_flag & NCF_MOUNTPT) {
1665 cache_resolve_mp(par);
8e005a45
MD
1666 } else if (par->nc_parent->nc_vp == NULL) {
1667 printf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
1668 cache_put(par);
1669 continue;
fad57d0e
MD
1670 } else if (par->nc_flag & NCF_UNRESOLVED) {
1671 par->nc_error = VOP_NRESOLVE(par, cred);
646a1cda 1672 }
67773eb3
MD
1673 if ((error = par->nc_error) != 0) {
1674 if (par->nc_error != EAGAIN) {
1675 printf("EXDEV case 3 %*.*s error %d\n",
1676 par->nc_nlen, par->nc_nlen, par->nc_name,
1677 par->nc_error);
1678 cache_put(par);
1679 return(error);
1680 }
1681 printf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
1682 par, par->nc_nlen, par->nc_nlen, par->nc_name);
646a1cda 1683 }
67773eb3
MD
1684 cache_put(par);
1685 /* loop */
646a1cda 1686 }
8e005a45
MD
1687
1688 /*
fad57d0e 1689 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
8e005a45
MD
1690 * ncp's and reattach them. If this occurs the original ncp is marked
1691 * EAGAIN to force a relookup.
fad57d0e
MD
1692 *
1693 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
1694 * ncp must already be resolved.
8e005a45
MD
1695 */
1696 KKASSERT((ncp->nc_flag & NCF_MOUNTPT) == 0);
fad57d0e 1697 ncp->nc_error = VOP_NRESOLVE(ncp, cred);
6ddb7618 1698 /*vop_nresolve(*ncp->nc_parent->nc_vp->v_ops, ncp, cred);*/
67773eb3
MD
1699 if (ncp->nc_error == EAGAIN) {
1700 printf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
1701 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
1702 goto restart;
1703 }
646a1cda
MD
1704 return(ncp->nc_error);
1705}
1706
1707/*
1708 * Resolve the ncp associated with a mount point. Such ncp's almost always
1709 * remain resolved and this routine is rarely called. NFS MPs tends to force
1710 * re-resolution more often due to its mac-truck-smash-the-namecache
1711 * method of tracking namespace changes.
1712 *
6215aa92
MD
1713 * The semantics for this call is that the passed ncp must be locked on
1714 * entry and will be locked on return. However, if we actually have to
1715 * resolve the mount point we temporarily unlock the entry in order to
1716 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
1717 * the unlock we have to recheck the flags after we relock.
646a1cda
MD
1718 */
1719static int
1720cache_resolve_mp(struct namecache *ncp)
1721{
1722 struct vnode *vp;
1723 struct mount *mp = ncp->nc_mount;
6215aa92 1724 int error;
646a1cda
MD
1725
1726 KKASSERT(mp != NULL);
9b1b3591
MD
1727
1728 /*
1729 * If the ncp is already resolved we have nothing to do. However,
1730 * we do want to guarentee that a usable vnode is returned when
1731 * a vnode is present, so make sure it hasn't been reclaimed.
1732 */
1733 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1734 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1735 cache_setunresolved(ncp);
1736 }
1737
646a1cda 1738 if (ncp->nc_flag & NCF_UNRESOLVED) {
6215aa92 1739 cache_unlock(ncp);
861905fb 1740 while (vfs_busy(mp, 0, curthread))
646a1cda 1741 ;
6215aa92
MD
1742 error = VFS_ROOT(mp, &vp);
1743 cache_lock(ncp);
1744
1745 /*
1746 * recheck the ncp state after relocking.
1747 */
1748 if (ncp->nc_flag & NCF_UNRESOLVED) {
1749 ncp->nc_error = error;
1750 if (error == 0) {
1751 cache_setvp(ncp, vp);
1752 vput(vp);
1753 } else {
1754 printf("[diagnostic] cache_resolve_mp: failed to resolve mount %p\n", mp);
1755 cache_setvp(ncp, NULL);
1756 }
1757 } else if (error == 0) {
646a1cda 1758 vput(vp);
646a1cda
MD
1759 }
1760 vfs_unbusy(mp, curthread);
21739618
MD
1761 }
1762 return(ncp->nc_error);
14c92d03
MD
1763}
1764
62d0f1f0
MD
1765void
1766cache_cleanneg(int count)
1767{
1768 struct namecache *ncp;
7ea21ed1
MD
1769
1770 /*
62d0f1f0
MD
1771 * Automode from the vnlru proc - clean out 10% of the negative cache
1772 * entries.
7ea21ed1 1773 */
62d0f1f0
MD
1774 if (count == 0)
1775 count = numneg / 10 + 1;
1776
1777 /*
1778 * Attempt to clean out the specified number of negative cache
1779 * entries.
1780 */
1781 while (count) {
7ea21ed1 1782 ncp = TAILQ_FIRST(&ncneglist);
eb82ae62
MD
1783 if (ncp == NULL) {
1784 KKASSERT(numneg == 0);
1785 break;
1786 }
62d0f1f0
MD
1787 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
1788 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
67773eb3
MD
1789 if (cache_get_nonblock(ncp) == 0)
1790 cache_zap(ncp);
62d0f1f0 1791 --count;
984263bc
MD
1792 }
1793}
1794
fad57d0e
MD
1795/*
1796 * Rehash a ncp. Rehashing is typically required if the name changes (should
1797 * not generally occur) or the parent link changes. This function will
1798 * unhash the ncp if the ncp is no longer hashable.
1799 */
8c361dda
MD
1800static void
1801cache_rehash(struct namecache *ncp)
1802{
1803 struct nchashhead *nchpp;
1804 u_int32_t hash;
1805
1806 if (ncp->nc_flag & NCF_HASHED) {
1807 ncp->nc_flag &= ~NCF_HASHED;
1808 LIST_REMOVE(ncp, nc_hash);
1809 }
fad57d0e
MD
1810 if (ncp->nc_nlen && ncp->nc_parent) {
1811 hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT);
1812 hash = fnv_32_buf(&ncp->nc_parent,
1813 sizeof(ncp->nc_parent), hash);
1814 nchpp = NCHHASH(hash);
1815 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
1816 ncp->nc_flag |= NCF_HASHED;
1817 }
8c361dda
MD
1818}
1819
984263bc 1820/*
24e51f36 1821 * Name cache initialization, from vfsinit() when we are booting
984263bc
MD
1822 */
1823void
8987aad7 1824nchinit(void)
984263bc 1825{
24e51f36
HP
1826 int i;
1827 globaldata_t gd;
1828
1829 /* initialise per-cpu namecache effectiveness statistics. */
1830 for (i = 0; i < ncpus; ++i) {
1831 gd = globaldata_find(i);
1832 gd->gd_nchstats = &nchstats[i];
1833 }
7ea21ed1 1834 TAILQ_INIT(&ncneglist);
984263bc 1835 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash);
fc21741a 1836 nclockwarn = 1 * hz;
21739618
MD
1837}
1838
1839/*
1840 * Called from start_init() to bootstrap the root filesystem. Returns
1841 * a referenced, unlocked namecache record.
1842 */
1843struct namecache *
5fd012e0 1844cache_allocroot(struct mount *mp, struct vnode *vp)
21739618 1845{
524c845c 1846 struct namecache *ncp = cache_alloc(0);
21739618 1847
21739618 1848 ncp->nc_flag |= NCF_MOUNTPT | NCF_ROOT;
5fd012e0 1849 ncp->nc_mount = mp;
8c361dda
MD
1850 cache_setvp(ncp, vp);
1851 return(ncp);
984263bc
MD
1852}
1853
1854/*
7ea21ed1 1855 * vfs_cache_setroot()
984263bc 1856 *
7ea21ed1
MD
1857 * Create an association between the root of our namecache and
1858 * the root vnode. This routine may be called several times during
1859 * booting.
690a3127
MD
1860 *
1861 * If the caller intends to save the returned namecache pointer somewhere
1862 * it must cache_hold() it.
7ea21ed1 1863 */
21739618
MD
1864void
1865vfs_cache_setroot(struct vnode *nvp, struct namecache *ncp)
7ea21ed1 1866{
21739618
MD
1867 struct vnode *ovp;
1868 struct namecache *oncp;
1869
1870 ovp = rootvnode;
1871 oncp = rootncp;
1872 rootvnode = nvp;
1873 rootncp = ncp;
1874
1875 if (ovp)
1876 vrele(ovp);
1877 if (oncp)
1878 cache_drop(oncp);
7ea21ed1
MD
1879}
1880
1881/*
fad57d0e
MD
1882 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
1883 * topology and is being removed as quickly as possible. The new VOP_N*()
1884 * API calls are required to make specific adjustments using the supplied
1885 * ncp pointers rather then just bogusly purging random vnodes.
1886 *
7ea21ed1
MD
1887 * Invalidate all namecache entries to a particular vnode as well as
1888 * any direct children of that vnode in the namecache. This is a
1889 * 'catch all' purge used by filesystems that do not know any better.
1890 *
1891 * A new vnode v_id is generated. Note that no vnode will ever have a
1892 * v_id of 0.
1893 *
1894 * Note that the linkage between the vnode and its namecache entries will
1895 * be removed, but the namecache entries themselves might stay put due to
1896 * active references from elsewhere in the system or due to the existance of
1897 * the children. The namecache topology is left intact even if we do not
1898 * know what the vnode association is. Such entries will be marked
1899 * NCF_UNRESOLVED.
984263bc
MD
1900 *
1901 * XXX: Only time and the size of v_id prevents this from failing:
1902 * XXX: In theory we should hunt down all (struct vnode*, v_id)
1903 * XXX: soft references and nuke them, at least on the global
1904 * XXX: v_id wraparound. The period of resistance can be extended
1905 * XXX: by incrementing each vnodes v_id individually instead of
1906 * XXX: using the global v_id.
1907 */
984263bc 1908void
8987aad7 1909cache_purge(struct vnode *vp)
984263bc
MD
1910{
1911 static u_long nextid;
1912
6b008938 1913 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
984263bc 1914
7ea21ed1
MD
1915 /*
1916 * Calculate a new unique id for ".." handling
1917 */
8987aad7 1918 do {
984263bc 1919 nextid++;
7ea21ed1 1920 } while (nextid == vp->v_id || nextid == 0);
984263bc 1921 vp->v_id = nextid;
984263bc
MD
1922}
1923
1924/*
1925 * Flush all entries referencing a particular filesystem.
1926 *
1927 * Since we need to check it anyway, we will flush all the invalid
1928 * entries at the same time.
1929 */
1930void
8987aad7 1931cache_purgevfs(struct mount *mp)
984263bc 1932{
bc0c094e 1933 struct nchashhead *nchpp;
984263bc
MD
1934 struct namecache *ncp, *nnp;
1935
7ea21ed1
MD
1936 /*
1937 * Scan hash tables for applicable entries.
1938 */
bc0c094e
MD
1939 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) {
1940 ncp = LIST_FIRST(nchpp);
7ea21ed1
MD
1941 if (ncp)
1942 cache_hold(ncp);
1943 while (ncp) {
984263bc 1944 nnp = LIST_NEXT(ncp, nc_hash);
7ea21ed1
MD
1945 if (nnp)
1946 cache_hold(nnp);
4fcb1cf7 1947 if (ncp->nc_mount == mp) {
67773eb3 1948 cache_lock(ncp);
984263bc 1949 cache_zap(ncp);
67773eb3 1950 } else {
7ea21ed1 1951 cache_drop(ncp);
67773eb3 1952 }
7ea21ed1 1953 ncp = nnp;
984263bc
MD
1954 }
1955 }
1956}
1957
6b008938
MD
1958/*
1959 * Create a new (theoretically) unique fsmid
1960 */
1961int64_t
1962cache_getnewfsmid(void)
1963{
1964 static int fsmid_roller;
1965 int64_t fsmid;
1966
1967 ++fsmid_roller;
1968 fsmid = ((int64_t)time_second << 32) |
1969 (fsmid_roller & 0x7FFFFFFF);
1970 return (fsmid);
1971}
1972
1973
984263bc
MD
1974static int disablecwd;
1975SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
1976
1977static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
1978static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
1979static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
1980static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
1981static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
1982static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
41c20dac 1983
984263bc 1984int
41c20dac 1985__getcwd(struct __getcwd_args *uap)
63f58b90 1986{
02680f1b 1987 int buflen;
63f58b90 1988 int error;
02680f1b
MD
1989 char *buf;
1990 char *bp;
1991
1992 if (disablecwd)
1993 return (ENODEV);
1994
1995 buflen = uap->buflen;
1996 if (buflen < 2)
1997 return (EINVAL);
1998 if (buflen > MAXPATHLEN)
1999 buflen = MAXPATHLEN;
63f58b90 2000
02680f1b
MD
2001 buf = malloc(buflen, M_TEMP, M_WAITOK);
2002 bp = kern_getcwd(buf, buflen, &error);
63f58b90 2003 if (error == 0)
02680f1b
MD
2004 error = copyout(bp, uap->buf, strlen(bp) + 1);
2005 free(buf, M_TEMP);
63f58b90
EN
2006 return (error);
2007}
2008
02680f1b
MD
2009char *
2010kern_getcwd(char *buf, size_t buflen, int *error)
984263bc 2011{
41c20dac 2012 struct proc *p = curproc;
63f58b90 2013 char *bp;
02680f1b 2014 int i, slash_prefixed;
984263bc
MD
2015 struct filedesc *fdp;
2016 struct namecache *ncp;
984263bc
MD
2017
2018 numcwdcalls++;
63f58b90
EN
2019 bp = buf;
2020 bp += buflen - 1;
984263bc
MD
2021 *bp = '\0';
2022 fdp = p->p_fd;
2023 slash_prefixed = 0;
524c845c
MD
2024
2025 ncp = fdp->fd_ncdir;
2026 while (ncp && ncp != fdp->fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) {
2027 if (ncp->nc_flag & NCF_MOUNTPT) {
2028 if (ncp->nc_mount == NULL) {
2029 *error = EBADF; /* forced unmount? */
02680f1b 2030 return(NULL);
984263bc 2031 }
524c845c 2032 ncp = ncp->nc_parent;
984263bc
MD
2033 continue;
2034 }
984263bc
MD
2035 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
2036 if (bp == buf) {
2037 numcwdfail4++;
02680f1b
MD
2038 *error = ENOMEM;
2039 return(NULL);
984263bc
MD
2040 }
2041 *--bp = ncp->nc_name[i];
2042 }
2043 if (bp == buf) {
2044 numcwdfail4++;
02680f1b
MD
2045 *error = ENOMEM;
2046 return(NULL);
984263bc
MD
2047 }
2048 *--bp = '/';
2049 slash_prefixed = 1;
524c845c
MD
2050 ncp = ncp->nc_parent;
2051 }
2052 if (ncp == NULL) {
2053 numcwdfail2++;
2054 *error = ENOENT;
2055 return(NULL);
984263bc
MD
2056 }
2057 if (!slash_prefixed) {
2058 if (bp == buf) {
2059 numcwdfail4++;
02680f1b
MD
2060 *error = ENOMEM;
2061 return(NULL);
984263bc
MD
2062 }
2063 *--bp = '/';
2064 }
2065 numcwdfound++;
02680f1b
MD
2066 *error = 0;
2067 return (bp);
984263bc
MD
2068}
2069
2070/*
2071 * Thus begins the fullpath magic.
2072 */
2073
2074#undef STATNODE
2075#define STATNODE(name) \
2076 static u_int name; \
2077 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
2078
2079static int disablefullpath;
2080SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
2081 &disablefullpath, 0, "");
2082
2083STATNODE(numfullpathcalls);
2084STATNODE(numfullpathfail1);
2085STATNODE(numfullpathfail2);
2086STATNODE(numfullpathfail3);
2087STATNODE(numfullpathfail4);
2088STATNODE(numfullpathfound);
2089
2090int
b6372d22 2091cache_fullpath(struct proc *p, struct namecache *ncp, char **retbuf, char **freebuf)
8987aad7 2092{
984263bc
MD
2093 char *bp, *buf;
2094 int i, slash_prefixed;
75ffff0d 2095 struct namecache *fd_nrdir;
984263bc 2096
b6372d22 2097 numfullpathcalls--;
b310dfc4 2098
984263bc
MD
2099 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
2100 bp = buf + MAXPATHLEN - 1;
2101 *bp = '\0';
75ffff0d
JS
2102 if (p != NULL)
2103 fd_nrdir = p->p_fd->fd_nrdir;
2104 else
2105 fd_nrdir = NULL;
984263bc 2106 slash_prefixed = 0;
75ffff0d 2107 while (ncp && ncp != fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) {
524c845c
MD
2108 if (ncp->nc_flag & NCF_MOUNTPT) {
2109 if (ncp->nc_mount == NULL) {
984263bc 2110 free(buf, M_TEMP);
524c845c 2111 return(EBADF);
984263bc 2112 }
524c845c 2113 ncp = ncp->nc_parent;
984263bc
MD
2114 continue;
2115 }
984263bc
MD
2116 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
2117 if (bp == buf) {
2118 numfullpathfail4++;
2119 free(buf, M_TEMP);
b6372d22 2120 return(ENOMEM);
984263bc
MD
2121 }
2122 *--bp = ncp->nc_name[i];
2123 }
2124 if (bp == buf) {
2125 numfullpathfail4++;
2126 free(buf, M_TEMP);
b6372d22 2127 return(ENOMEM);
984263bc
MD
2128 }
2129 *--bp = '/';
2130 slash_prefixed = 1;
524c845c
MD
2131 ncp = ncp->nc_parent;
2132 }
2133 if (ncp == NULL) {
2134 numfullpathfail2++;
2135 free(buf, M_TEMP);
b6372d22 2136 return(ENOENT);
984263bc 2137 }
75ffff0d 2138 if (p != NULL && (ncp->nc_flag & NCF_ROOT) && ncp != fd_nrdir) {
872b00c0
JS
2139 bp = buf + MAXPATHLEN - 1;
2140 *bp = '\0';
2141 slash_prefixed = 0;
2142 }
984263bc
MD
2143 if (!slash_prefixed) {
2144 if (bp == buf) {
2145 numfullpathfail4++;
2146 free(buf, M_TEMP);
b6372d22 2147 return(ENOMEM);
984263bc
MD
2148 }
2149 *--bp = '/';
2150 }
2151 numfullpathfound++;
2152 *retbuf = bp;
b310dfc4 2153 *freebuf = buf;
6a506bad
JS
2154
2155 return(0);
984263bc 2156}
8987aad7 2157
b6372d22
JS
2158int
2159vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf)
2160{
b6372d22
JS
2161 struct namecache *ncp;
2162
2163 numfullpathcalls++;
2164 if (disablefullpath)
2165 return (ENODEV);
2166
2167 if (p == NULL)
2168 return (EINVAL);
2169
2170 /* vn is NULL, client wants us to use p->p_textvp */
2171 if (vn == NULL) {
2172 if ((vn = p->p_textvp) == NULL)
2173 return (EINVAL);
2174 }
2175 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
2176 if (ncp->nc_nlen)
2177 break;
2178 }
2179 if (ncp == NULL)
2180 return (EINVAL);
2181
2182 numfullpathcalls--;
2183 return(cache_fullpath(p, ncp, retbuf, freebuf));
2184}