Rename printf -> kprintf in sys/ and add some defines where necessary
[dragonfly.git] / sys / kern / vfs_cache.c
CommitLineData
984263bc 1/*
8c10bfcf
MD
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
984263bc
MD
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
36 *
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $
6ea70f76 70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.80 2006/12/23 00:35:04 swildner Exp $
984263bc
MD
71 */
72
73#include <sys/param.h>
74#include <sys/systm.h>
75#include <sys/kernel.h>
76#include <sys/sysctl.h>
77#include <sys/mount.h>
78#include <sys/vnode.h>
984263bc
MD
79#include <sys/malloc.h>
80#include <sys/sysproto.h>
81#include <sys/proc.h>
dadab5e9 82#include <sys/namei.h>
690a3127 83#include <sys/nlookup.h>
984263bc
MD
84#include <sys/filedesc.h>
85#include <sys/fnv_hash.h>
24e51f36 86#include <sys/globaldata.h>
63f58b90 87#include <sys/kern_syscall.h>
fad57d0e 88#include <sys/dirent.h>
8c361dda 89#include <ddb/ddb.h>
984263bc 90
bf40a153
MD
91#define MAX_RECURSION_DEPTH 64
92
984263bc 93/*
7ea21ed1 94 * Random lookups in the cache are accomplished with a hash table using
8987aad7 95 * a hash key of (nc_src_vp, name).
984263bc 96 *
7ea21ed1 97 * Negative entries may exist and correspond to structures where nc_vp
8987aad7
MD
98 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry
99 * corresponds to a whited-out directory entry (verses simply not finding the
100 * entry at all).
984263bc 101 *
8987aad7
MD
102 * Upon reaching the last segment of a path, if the reference is for DELETE,
103 * or NOCACHE is set (rewrite), and the name is located in the cache, it
104 * will be dropped.
984263bc
MD
105 */
106
107/*
108 * Structures associated with name cacheing.
109 */
8987aad7 110#define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
f517a1bb 111#define MINNEG 1024
8987aad7 112
24e51f36
HP
113MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
114
984263bc 115static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */
7ea21ed1 116static struct namecache_list ncneglist; /* instead of vnode */
8987aad7 117
fad57d0e
MD
118/*
119 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
120 * to create the namecache infrastructure leading to a dangling vnode.
121 *
122 * 0 Only errors are reported
123 * 1 Successes are reported
124 * 2 Successes + the whole directory scan is reported
125 * 3 Force the directory scan code run as if the parent vnode did not
126 * have a namecache record, even if it does have one.
127 */
128static int ncvp_debug;
129SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, "");
130
984263bc
MD
131static u_long nchash; /* size of hash table */
132SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
8987aad7 133
984263bc
MD
134static u_long ncnegfactor = 16; /* ratio of negative entries */
135SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
8987aad7 136
fc21741a
MD
137static int nclockwarn; /* warn on locked entries in ticks */
138SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, "");
139
984263bc
MD
140static u_long numneg; /* number of cache entries allocated */
141SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
8987aad7 142
984263bc
MD
143static u_long numcache; /* number of cache entries allocated */
144SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
8987aad7 145
f517a1bb
MD
146static u_long numunres; /* number of unresolved entries */
147SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, "");
148
984263bc
MD
149SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
150SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
151
28623bf9
MD
152static int cache_resolve_mp(struct mount *mp);
153static void _cache_rehash(struct namecache *ncp);
154static void _cache_lock(struct namecache *ncp);
155static void _cache_setunresolved(struct namecache *ncp);
646a1cda 156
984263bc
MD
157/*
158 * The new name cache statistics
159 */
160SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
161#define STATNODE(mode, name, var) \
162 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
163STATNODE(CTLFLAG_RD, numneg, &numneg);
164STATNODE(CTLFLAG_RD, numcache, &numcache);
165static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
166static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
167static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
168static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
169static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
170static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
171static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
172static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
173static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
174static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
175
24e51f36
HP
176struct nchstats nchstats[SMP_MAXCPU];
177/*
178 * Export VFS cache effectiveness statistics to user-land.
179 *
180 * The statistics are left for aggregation to user-land so
181 * neat things can be achieved, like observing per-CPU cache
182 * distribution.
183 */
184static int
3736bb9b 185sysctl_nchstats(SYSCTL_HANDLER_ARGS)
24e51f36
HP
186{
187 struct globaldata *gd;
188 int i, error;
189
190 error = 0;
191 for (i = 0; i < ncpus; ++i) {
192 gd = globaldata_find(i);
193 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
194 sizeof(struct nchstats))))
195 break;
196 }
984263bc 197
24e51f36
HP
198 return (error);
199}
200SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
3736bb9b 201 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
984263bc 202
24e51f36 203static void cache_zap(struct namecache *ncp);
984263bc
MD
204
205/*
7ea21ed1
MD
206 * cache_hold() and cache_drop() prevent the premature deletion of a
207 * namecache entry but do not prevent operations (such as zapping) on
208 * that namecache entry.
5b287bba 209 *
36e90efd
MD
210 * This routine may only be called from outside this source module if
211 * nc_refs is already at least 1.
5b287bba 212 *
36e90efd
MD
213 * This is a rare case where callers are allowed to hold a spinlock,
214 * so we can't ourselves.
984263bc 215 */
7ea21ed1
MD
216static __inline
217struct namecache *
bc0c094e 218_cache_hold(struct namecache *ncp)
7ea21ed1 219{
5b287bba 220 atomic_add_int(&ncp->nc_refs, 1);
7ea21ed1
MD
221 return(ncp);
222}
223
8c361dda 224/*
67773eb3
MD
225 * When dropping an entry, if only one ref remains and the entry has not
226 * been resolved, zap it. Since the one reference is being dropped the
227 * entry had better not be locked.
8c361dda 228 */
7ea21ed1
MD
229static __inline
230void
bc0c094e 231_cache_drop(struct namecache *ncp)
7ea21ed1
MD
232{
233 KKASSERT(ncp->nc_refs > 0);
f517a1bb
MD
234 if (ncp->nc_refs == 1 &&
235 (ncp->nc_flag & NCF_UNRESOLVED) &&
236 TAILQ_EMPTY(&ncp->nc_list)
237 ) {
67773eb3 238 KKASSERT(ncp->nc_exlocks == 0);
28623bf9 239 _cache_lock(ncp);
7ea21ed1 240 cache_zap(ncp);
f517a1bb 241 } else {
36e90efd 242 atomic_subtract_int(&ncp->nc_refs, 1);
f517a1bb 243 }
7ea21ed1 244}
8987aad7 245
690a3127
MD
246/*
247 * Link a new namecache entry to its parent. Be careful to avoid races
248 * if vhold() blocks in the future.
249 */
250static void
251cache_link_parent(struct namecache *ncp, struct namecache *par)
252{
253 KKASSERT(ncp->nc_parent == NULL);
254 ncp->nc_parent = par;
255 if (TAILQ_EMPTY(&par->nc_list)) {
256 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
21739618
MD
257 /*
258 * Any vp associated with an ncp which has children must
55361147 259 * be held to prevent it from being recycled.
21739618 260 */
690a3127
MD
261 if (par->nc_vp)
262 vhold(par->nc_vp);
263 } else {
264 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
265 }
266}
267
268/*
b8997912
MD
269 * Remove the parent association from a namecache structure. If this is
270 * the last child of the parent the cache_drop(par) will attempt to
271 * recursively zap the parent.
690a3127
MD
272 */
273static void
274cache_unlink_parent(struct namecache *ncp)
275{
276 struct namecache *par;
277
278 if ((par = ncp->nc_parent) != NULL) {
279 ncp->nc_parent = NULL;
28623bf9 280 par = _cache_hold(par);
690a3127
MD
281 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
282 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
283 vdrop(par->nc_vp);
28623bf9 284 _cache_drop(par);
690a3127
MD
285 }
286}
287
288/*
fad57d0e
MD
289 * Allocate a new namecache structure. Most of the code does not require
290 * zero-termination of the string but it makes vop_compat_ncreate() easier.
690a3127
MD
291 */
292static struct namecache *
524c845c 293cache_alloc(int nlen)
690a3127
MD
294{
295 struct namecache *ncp;
296
efda3bd0 297 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
524c845c 298 if (nlen)
efda3bd0 299 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK);
524c845c 300 ncp->nc_nlen = nlen;
690a3127
MD
301 ncp->nc_flag = NCF_UNRESOLVED;
302 ncp->nc_error = ENOTCONN; /* needs to be resolved */
8c361dda 303 ncp->nc_refs = 1;
e4bff3c8
MD
304
305 /*
306 * Construct a fake FSMID based on the time of day and a 32 bit
307 * roller for uniqueness. This is used to generate a useful
308 * FSMID for filesystems which do not support it.
309 */
6b008938 310 ncp->nc_fsmid = cache_getnewfsmid();
690a3127 311 TAILQ_INIT(&ncp->nc_list);
28623bf9 312 _cache_lock(ncp);
690a3127
MD
313 return(ncp);
314}
315
8c361dda 316static void
28623bf9 317_cache_free(struct namecache *ncp)
8c361dda
MD
318{
319 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1);
320 if (ncp->nc_name)
efda3bd0
MD
321 kfree(ncp->nc_name, M_VFSCACHE);
322 kfree(ncp, M_VFSCACHE);
8c361dda 323}
690a3127 324
28623bf9
MD
325void
326cache_zero(struct nchandle *nch)
327{
328 nch->ncp = NULL;
329 nch->mount = NULL;
330}
331
690a3127
MD
332/*
333 * Ref and deref a namecache structure.
5b287bba
MD
334 *
335 * Warning: caller may hold an unrelated read spinlock, which means we can't
336 * use read spinlocks here.
690a3127 337 */
28623bf9
MD
338struct nchandle *
339cache_hold(struct nchandle *nch)
bc0c094e 340{
28623bf9
MD
341 _cache_hold(nch->ncp);
342 ++nch->mount->mnt_refs;
343 return(nch);
bc0c094e
MD
344}
345
346void
28623bf9 347cache_copy(struct nchandle *nch, struct nchandle *target)
bc0c094e 348{
28623bf9
MD
349 *target = *nch;
350 _cache_hold(target->ncp);
351 ++nch->mount->mnt_refs;
352}
353
354void
355cache_changemount(struct nchandle *nch, struct mount *mp)
356{
357 --nch->mount->mnt_refs;
358 nch->mount = mp;
359 ++nch->mount->mnt_refs;
360}
361
362void
363cache_drop(struct nchandle *nch)
364{
365 --nch->mount->mnt_refs;
366 _cache_drop(nch->ncp);
367 nch->ncp = NULL;
368 nch->mount = NULL;
bc0c094e
MD
369}
370
14c92d03
MD
371/*
372 * Namespace locking. The caller must already hold a reference to the
21739618
MD
373 * namecache structure in order to lock/unlock it. This function prevents
374 * the namespace from being created or destroyed by accessors other then
375 * the lock holder.
14c92d03 376 *
55361147
MD
377 * Note that holding a locked namecache structure prevents other threads
378 * from making namespace changes (e.g. deleting or creating), prevents
379 * vnode association state changes by other threads, and prevents the
380 * namecache entry from being resolved or unresolved by other threads.
381 *
382 * The lock owner has full authority to associate/disassociate vnodes
383 * and resolve/unresolve the locked ncp.
384 *
9b1b3591
MD
385 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
386 * or recycled, but it does NOT help you if the vnode had already initiated
387 * a recyclement. If this is important, use cache_get() rather then
388 * cache_lock() (and deal with the differences in the way the refs counter
389 * is handled). Or, alternatively, make an unconditional call to
390 * cache_validate() or cache_resolve() after cache_lock() returns.
14c92d03 391 */
28623bf9 392static
14c92d03 393void
28623bf9 394_cache_lock(struct namecache *ncp)
14c92d03 395{
55361147
MD
396 thread_t td;
397 int didwarn;
14c92d03
MD
398
399 KKASSERT(ncp->nc_refs != 0);
55361147
MD
400 didwarn = 0;
401 td = curthread;
402
14c92d03
MD
403 for (;;) {
404 if (ncp->nc_exlocks == 0) {
405 ncp->nc_exlocks = 1;
406 ncp->nc_locktd = td;
55361147
MD
407 /*
408 * The vp associated with a locked ncp must be held
409 * to prevent it from being recycled (which would
410 * cause the ncp to become unresolved).
411 *
9b1b3591
MD
412 * WARNING! If VRECLAIMED is set the vnode could
413 * already be in the middle of a recycle. Callers
414 * should not assume that nc_vp is usable when
415 * not NULL. cache_vref() or cache_vget() must be
416 * called.
417 *
55361147
MD
418 * XXX loop on race for later MPSAFE work.
419 */
420 if (ncp->nc_vp)
421 vhold(ncp->nc_vp);
14c92d03
MD
422 break;
423 }
424 if (ncp->nc_locktd == td) {
425 ++ncp->nc_exlocks;
426 break;
427 }
428 ncp->nc_flag |= NCF_LOCKREQ;
fc21741a 429 if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) {
5fd012e0
MD
430 if (didwarn)
431 continue;
432 didwarn = 1;
6ea70f76
SW
433 kprintf("[diagnostic] cache_lock: blocked on %p", ncp);
434 kprintf(" \"%*.*s\"\n",
28623bf9 435 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
14c92d03
MD
436 }
437 }
55361147 438
14c92d03 439 if (didwarn == 1) {
6ea70f76 440 kprintf("[diagnostic] cache_lock: unblocked %*.*s\n",
14c92d03
MD
441 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
442 }
443}
444
28623bf9
MD
445void
446cache_lock(struct nchandle *nch)
447{
448 _cache_lock(nch->ncp);
449}
450
451static
fad57d0e 452int
28623bf9 453_cache_lock_nonblock(struct namecache *ncp)
fad57d0e
MD
454{
455 thread_t td;
456
457 KKASSERT(ncp->nc_refs != 0);
458 td = curthread;
459 if (ncp->nc_exlocks == 0) {
460 ncp->nc_exlocks = 1;
461 ncp->nc_locktd = td;
462 /*
463 * The vp associated with a locked ncp must be held
464 * to prevent it from being recycled (which would
465 * cause the ncp to become unresolved).
466 *
9b1b3591
MD
467 * WARNING! If VRECLAIMED is set the vnode could
468 * already be in the middle of a recycle. Callers
469 * should not assume that nc_vp is usable when
470 * not NULL. cache_vref() or cache_vget() must be
471 * called.
472 *
fad57d0e
MD
473 * XXX loop on race for later MPSAFE work.
474 */
475 if (ncp->nc_vp)
476 vhold(ncp->nc_vp);
477 return(0);
478 } else {
479 return(EWOULDBLOCK);
480 }
481}
482
28623bf9
MD
483int
484cache_lock_nonblock(struct nchandle *nch)
485{
486 return(_cache_lock_nonblock(nch->ncp));
487}
488
489static
14c92d03 490void
28623bf9 491_cache_unlock(struct namecache *ncp)
14c92d03
MD
492{
493 thread_t td = curthread;
494
495 KKASSERT(ncp->nc_refs > 0);
496 KKASSERT(ncp->nc_exlocks > 0);
497 KKASSERT(ncp->nc_locktd == td);
498 if (--ncp->nc_exlocks == 0) {
55361147
MD
499 if (ncp->nc_vp)
500 vdrop(ncp->nc_vp);
14c92d03
MD
501 ncp->nc_locktd = NULL;
502 if (ncp->nc_flag & NCF_LOCKREQ) {
503 ncp->nc_flag &= ~NCF_LOCKREQ;
fc21741a 504 wakeup(ncp);
14c92d03
MD
505 }
506 }
507}
508
28623bf9
MD
509void
510cache_unlock(struct nchandle *nch)
511{
512 _cache_unlock(nch->ncp);
513}
514
14c92d03 515/*
690a3127 516 * ref-and-lock, unlock-and-deref functions.
9b1b3591
MD
517 *
518 * This function is primarily used by nlookup. Even though cache_lock
519 * holds the vnode, it is possible that the vnode may have already
520 * initiated a recyclement. We want cache_get() to return a definitively
521 * usable vnode or a definitively unresolved ncp.
14c92d03 522 */
28623bf9 523static
21739618 524struct namecache *
28623bf9 525_cache_get(struct namecache *ncp)
690a3127
MD
526{
527 _cache_hold(ncp);
28623bf9 528 _cache_lock(ncp);
9b1b3591 529 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
28623bf9 530 _cache_setunresolved(ncp);
21739618 531 return(ncp);
690a3127
MD
532}
533
28623bf9
MD
534/*
535 * note: the same nchandle can be passed for both arguments.
536 */
537void
538cache_get(struct nchandle *nch, struct nchandle *target)
539{
540 target->mount = nch->mount;
541 target->ncp = _cache_get(nch->ncp);
542 ++target->mount->mnt_refs;
543}
544
545static int
546_cache_get_nonblock(struct namecache *ncp)
8e005a45
MD
547{
548 /* XXX MP */
549 if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) {
550 _cache_hold(ncp);
28623bf9 551 _cache_lock(ncp);
9b1b3591 552 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
28623bf9 553 _cache_setunresolved(ncp);
8e005a45
MD
554 return(0);
555 }
556 return(EWOULDBLOCK);
557}
558
28623bf9
MD
559int
560cache_get_nonblock(struct nchandle *nch)
561{
562 return(_cache_get_nonblock(nch->ncp));
563}
564
565static __inline
14c92d03 566void
28623bf9 567_cache_put(struct namecache *ncp)
14c92d03 568{
28623bf9 569 _cache_unlock(ncp);
14c92d03
MD
570 _cache_drop(ncp);
571}
572
28623bf9
MD
573void
574cache_put(struct nchandle *nch)
575{
576 --nch->mount->mnt_refs;
577 _cache_put(nch->ncp);
578 nch->ncp = NULL;
579 nch->mount = NULL;
580}
581
690a3127
MD
582/*
583 * Resolve an unresolved ncp by associating a vnode with it. If the
584 * vnode is NULL, a negative cache entry is created.
585 *
586 * The ncp should be locked on entry and will remain locked on return.
587 */
28623bf9 588static
690a3127 589void
28623bf9 590_cache_setvp(struct namecache *ncp, struct vnode *vp)
ce6da7e4 591{
690a3127 592 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
ce6da7e4
MD
593 ncp->nc_vp = vp;
594 if (vp != NULL) {
21739618
MD
595 /*
596 * Any vp associated with an ncp which has children must
55361147 597 * be held. Any vp associated with a locked ncp must be held.
21739618
MD
598 */
599 if (!TAILQ_EMPTY(&ncp->nc_list))
600 vhold(vp);
ce6da7e4 601 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
55361147
MD
602 if (ncp->nc_exlocks)
603 vhold(vp);
21739618
MD
604
605 /*
606 * Set auxillary flags
607 */
690a3127
MD
608 switch(vp->v_type) {
609 case VDIR:
21739618
MD
610 ncp->nc_flag |= NCF_ISDIR;
611 break;
690a3127 612 case VLNK:
21739618
MD
613 ncp->nc_flag |= NCF_ISSYMLINK;
614 /* XXX cache the contents of the symlink */
615 break;
690a3127 616 default:
21739618 617 break;
690a3127 618 }
ce6da7e4 619 ++numcache;
21739618 620 ncp->nc_error = 0;
ce6da7e4 621 } else {
1345c2b6 622 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
ce6da7e4 623 ++numneg;
21739618 624 ncp->nc_error = ENOENT;
ce6da7e4 625 }
690a3127 626 ncp->nc_flag &= ~NCF_UNRESOLVED;
ce6da7e4
MD
627}
628
fad57d0e 629void
28623bf9 630cache_setvp(struct nchandle *nch, struct vnode *vp)
fad57d0e 631{
28623bf9
MD
632 _cache_setvp(nch->ncp, vp);
633}
634
635void
636cache_settimeout(struct nchandle *nch, int nticks)
637{
638 struct namecache *ncp = nch->ncp;
639
fad57d0e
MD
640 if ((ncp->nc_timeout = ticks + nticks) == 0)
641 ncp->nc_timeout = 1;
642}
643
690a3127
MD
644/*
645 * Disassociate the vnode or negative-cache association and mark a
646 * namecache entry as unresolved again. Note that the ncp is still
647 * left in the hash table and still linked to its parent.
648 *
67773eb3
MD
649 * The ncp should be locked and refd on entry and will remain locked and refd
650 * on return.
8c361dda
MD
651 *
652 * This routine is normally never called on a directory containing children.
653 * However, NFS often does just that in its rename() code as a cop-out to
654 * avoid complex namespace operations. This disconnects a directory vnode
655 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
656 * sync.
f2e3ccf2
MD
657 *
658 * NOTE: NCF_FSMID must be cleared so a refurbishment of the ncp, such as
659 * in a create, properly propogates flag up the chain.
690a3127 660 */
28623bf9 661static
690a3127 662void
28623bf9 663_cache_setunresolved(struct namecache *ncp)
14c92d03 664{
690a3127 665 struct vnode *vp;
14c92d03 666
690a3127
MD
667 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
668 ncp->nc_flag |= NCF_UNRESOLVED;
fad57d0e 669 ncp->nc_timeout = 0;
690a3127
MD
670 ncp->nc_error = ENOTCONN;
671 ++numunres;
672 if ((vp = ncp->nc_vp) != NULL) {
673 --numcache;
fad57d0e 674 ncp->nc_vp = NULL;
690a3127 675 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
55361147
MD
676
677 /*
678 * Any vp associated with an ncp with children is
679 * held by that ncp. Any vp associated with a locked
680 * ncp is held by that ncp. These conditions must be
681 * undone when the vp is cleared out from the ncp.
682 */
6b008938
MD
683 if (ncp->nc_flag & NCF_FSMID)
684 vupdatefsmid(vp);
690a3127
MD
685 if (!TAILQ_EMPTY(&ncp->nc_list))
686 vdrop(vp);
55361147
MD
687 if (ncp->nc_exlocks)
688 vdrop(vp);
690a3127
MD
689 } else {
690 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
691 --numneg;
692 }
6b008938
MD
693 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK|
694 NCF_FSMID);
8e005a45
MD
695 }
696}
8c361dda 697
1d505369 698void
28623bf9 699cache_setunresolved(struct nchandle *nch)
1d505369 700{
28623bf9 701 _cache_setunresolved(nch->ncp);
1d505369
MD
702}
703
704/*
28623bf9
MD
705 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
706 * looking for matches. This flag tells the lookup code when it must
707 * check for a mount linkage and also prevents the directories in question
708 * from being deleted or renamed.
1d505369 709 */
28623bf9
MD
710static
711int
712cache_clrmountpt_callback(struct mount *mp, void *data)
713{
714 struct nchandle *nch = data;
715
716 if (mp->mnt_ncmounton.ncp == nch->ncp)
717 return(1);
718 if (mp->mnt_ncmountpt.ncp == nch->ncp)
719 return(1);
720 return(0);
721}
722
1d505369 723void
28623bf9 724cache_clrmountpt(struct nchandle *nch)
1d505369 725{
28623bf9
MD
726 int count;
727
728 count = mountlist_scan(cache_clrmountpt_callback, nch,
729 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
730 if (count == 0)
731 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT;
1d505369
MD
732}
733
8e005a45 734/*
e09206ba
MD
735 * Invalidate portions of the namecache topology given a starting entry.
736 * The passed ncp is set to an unresolved state and:
8e005a45 737 *
e09206ba
MD
738 * The passed ncp must be locked.
739 *
740 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
741 * that the physical underlying nodes have been
742 * destroyed... as in deleted. For example, when
743 * a directory is removed. This will cause record
744 * lookups on the name to no longer be able to find
745 * the record and tells the resolver to return failure
746 * rather then trying to resolve through the parent.
747 *
748 * The topology itself, including ncp->nc_name,
749 * remains intact.
750 *
751 * This only applies to the passed ncp, if CINV_CHILDREN
752 * is specified the children are not flagged.
753 *
754 * CINV_CHILDREN - Set all children (recursively) to an unresolved
755 * state as well.
756 *
757 * Note that this will also have the side effect of
758 * cleaning out any unreferenced nodes in the topology
759 * from the leaves up as the recursion backs out.
760 *
761 * Note that the topology for any referenced nodes remains intact.
25cb3304
MD
762 *
763 * It is possible for cache_inval() to race a cache_resolve(), meaning that
764 * the namecache entry may not actually be invalidated on return if it was
765 * revalidated while recursing down into its children. This code guarentees
766 * that the node(s) will go through an invalidation cycle, but does not
767 * guarentee that they will remain in an invalidated state.
768 *
769 * Returns non-zero if a revalidation was detected during the invalidation
770 * recursion, zero otherwise. Note that since only the original ncp is
771 * locked the revalidation ultimately can only indicate that the original ncp
772 * *MIGHT* no have been reresolved.
bf40a153
MD
773 *
774 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
775 * have to avoid blowing out the kernel stack. We do this by saving the
776 * deep namecache node and aborting the recursion, then re-recursing at that
777 * node using a depth-first algorithm in order to allow multiple deep
778 * recursions to chain through each other, then we restart the invalidation
779 * from scratch.
8e005a45 780 */
bf40a153
MD
781
782struct cinvtrack {
783 struct namecache *resume_ncp;
784 int depth;
785};
786
28623bf9 787static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *);
bf40a153 788
28623bf9 789static
25cb3304 790int
28623bf9 791_cache_inval(struct namecache *ncp, int flags)
bf40a153
MD
792{
793 struct cinvtrack track;
794 struct namecache *ncp2;
795 int r;
796
797 track.depth = 0;
798 track.resume_ncp = NULL;
799
800 for (;;) {
28623bf9 801 r = _cache_inval_internal(ncp, flags, &track);
bf40a153
MD
802 if (track.resume_ncp == NULL)
803 break;
6ea70f76 804 kprintf("Warning: deep namecache recursion at %s\n",
bf40a153 805 ncp->nc_name);
28623bf9 806 _cache_unlock(ncp);
bf40a153
MD
807 while ((ncp2 = track.resume_ncp) != NULL) {
808 track.resume_ncp = NULL;
28623bf9
MD
809 _cache_lock(ncp2);
810 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY,
bf40a153 811 &track);
28623bf9 812 _cache_put(ncp2);
bf40a153 813 }
28623bf9 814 _cache_lock(ncp);
bf40a153
MD
815 }
816 return(r);
817}
818
28623bf9
MD
819int
820cache_inval(struct nchandle *nch, int flags)
821{
822 return(_cache_inval(nch->ncp, flags));
823}
824
bf40a153 825static int
28623bf9 826_cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track)
8e005a45
MD
827{
828 struct namecache *kid;
b8997912 829 struct namecache *nextkid;
25cb3304 830 int rcnt = 0;
8e005a45 831
e09206ba 832 KKASSERT(ncp->nc_exlocks);
25cb3304 833
28623bf9 834 _cache_setunresolved(ncp);
e09206ba
MD
835 if (flags & CINV_DESTROY)
836 ncp->nc_flag |= NCF_DESTROYED;
b8997912 837
e09206ba
MD
838 if ((flags & CINV_CHILDREN) &&
839 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL
840 ) {
bf40a153
MD
841 if (++track->depth > MAX_RECURSION_DEPTH) {
842 track->resume_ncp = ncp;
28623bf9 843 _cache_hold(ncp);
bf40a153
MD
844 ++rcnt;
845 }
28623bf9
MD
846 _cache_hold(kid);
847 _cache_unlock(ncp);
b8997912 848 while (kid) {
bf40a153 849 if (track->resume_ncp) {
28623bf9 850 _cache_drop(kid);
bf40a153
MD
851 break;
852 }
b8997912 853 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
28623bf9 854 _cache_hold(nextkid);
e09206ba
MD
855 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
856 TAILQ_FIRST(&kid->nc_list)
b8997912 857 ) {
28623bf9
MD
858 _cache_lock(kid);
859 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track);
860 _cache_unlock(kid);
b8997912 861 }
28623bf9 862 _cache_drop(kid);
fad57d0e 863 kid = nextkid;
8e005a45 864 }
bf40a153 865 --track->depth;
28623bf9 866 _cache_lock(ncp);
8e005a45 867 }
25cb3304
MD
868
869 /*
870 * Someone could have gotten in there while ncp was unlocked,
871 * retry if so.
872 */
873 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
874 ++rcnt;
875 return (rcnt);
8e005a45
MD
876}
877
e09206ba 878/*
25cb3304
MD
879 * Invalidate a vnode's namecache associations. To avoid races against
880 * the resolver we do not invalidate a node which we previously invalidated
881 * but which was then re-resolved while we were in the invalidation loop.
882 *
883 * Returns non-zero if any namecache entries remain after the invalidation
884 * loop completed.
2aefb2c5
MD
885 *
886 * NOTE: unlike the namecache topology which guarentees that ncp's will not
887 * be ripped out of the topology while held, the vnode's v_namecache list
888 * has no such restriction. NCP's can be ripped out of the list at virtually
889 * any time if not locked, even if held.
e09206ba 890 */
25cb3304 891int
6b008938 892cache_inval_vp(struct vnode *vp, int flags)
8e005a45
MD
893{
894 struct namecache *ncp;
25cb3304
MD
895 struct namecache *next;
896
2aefb2c5 897restart:
25cb3304
MD
898 ncp = TAILQ_FIRST(&vp->v_namecache);
899 if (ncp)
28623bf9 900 _cache_hold(ncp);
25cb3304
MD
901 while (ncp) {
902 /* loop entered with ncp held */
2aefb2c5 903 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
28623bf9
MD
904 _cache_hold(next);
905 _cache_lock(ncp);
2aefb2c5 906 if (ncp->nc_vp != vp) {
6ea70f76 907 kprintf("Warning: cache_inval_vp: race-A detected on "
2aefb2c5 908 "%s\n", ncp->nc_name);
28623bf9 909 _cache_put(ncp);
69313361 910 if (next)
28623bf9 911 _cache_drop(next);
2aefb2c5
MD
912 goto restart;
913 }
28623bf9
MD
914 _cache_inval(ncp, flags);
915 _cache_put(ncp); /* also releases reference */
25cb3304 916 ncp = next;
2aefb2c5 917 if (ncp && ncp->nc_vp != vp) {
6ea70f76 918 kprintf("Warning: cache_inval_vp: race-B detected on "
2aefb2c5 919 "%s\n", ncp->nc_name);
28623bf9 920 _cache_drop(ncp);
2aefb2c5
MD
921 goto restart;
922 }
690a3127 923 }
25cb3304 924 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
14c92d03 925}
14c92d03 926
fad57d0e
MD
927/*
928 * The source ncp has been renamed to the target ncp. Both fncp and tncp
929 * must be locked. Both will be set to unresolved, any children of tncp
930 * will be disconnected (the prior contents of the target is assumed to be
931 * destroyed by the rename operation, e.g. renaming over an empty directory),
932 * and all children of fncp will be moved to tncp.
933 *
e09206ba
MD
934 * XXX the disconnection could pose a problem, check code paths to make
935 * sure any code that blocks can handle the parent being changed out from
936 * under it. Maybe we should lock the children (watch out for deadlocks) ?
937 *
fad57d0e
MD
938 * After we return the caller has the option of calling cache_setvp() if
939 * the vnode of the new target ncp is known.
940 *
941 * Any process CD'd into any of the children will no longer be able to ".."
942 * back out. An rm -rf can cause this situation to occur.
943 */
944void
28623bf9 945cache_rename(struct nchandle *fnch, struct nchandle *tnch)
fad57d0e 946{
28623bf9
MD
947 struct namecache *fncp = fnch->ncp;
948 struct namecache *tncp = tnch->ncp;
fad57d0e 949 struct namecache *scan;
25cb3304 950 int didwarn = 0;
fad57d0e 951
28623bf9
MD
952 _cache_setunresolved(fncp);
953 _cache_setunresolved(tncp);
954 while (_cache_inval(tncp, CINV_CHILDREN) != 0) {
25cb3304 955 if (didwarn++ % 10 == 0) {
6ea70f76 956 kprintf("Warning: cache_rename: race during "
25cb3304
MD
957 "rename %s->%s\n",
958 fncp->nc_name, tncp->nc_name);
959 }
960 tsleep(tncp, 0, "mvrace", hz / 10);
28623bf9 961 _cache_setunresolved(tncp);
25cb3304 962 }
fad57d0e 963 while ((scan = TAILQ_FIRST(&fncp->nc_list)) != NULL) {
28623bf9 964 _cache_hold(scan);
fad57d0e
MD
965 cache_unlink_parent(scan);
966 cache_link_parent(scan, tncp);
967 if (scan->nc_flag & NCF_HASHED)
28623bf9
MD
968 _cache_rehash(scan);
969 _cache_drop(scan);
fad57d0e
MD
970 }
971}
972
21739618
MD
973/*
974 * vget the vnode associated with the namecache entry. Resolve the namecache
975 * entry if necessary and deal with namecache/vp races. The passed ncp must
976 * be referenced and may be locked. The ncp's ref/locking state is not
977 * effected by this call.
978 *
979 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
980 * (depending on the passed lk_type) will be returned in *vpp with an error
981 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
982 * most typical error is ENOENT, meaning that the ncp represents a negative
983 * cache hit and there is no vnode to retrieve, but other errors can occur
984 * too.
985 *
986 * The main race we have to deal with are namecache zaps. The ncp itself
987 * will not disappear since it is referenced, and it turns out that the
988 * validity of the vp pointer can be checked simply by rechecking the
989 * contents of ncp->nc_vp.
990 */
991int
28623bf9 992cache_vget(struct nchandle *nch, struct ucred *cred,
21739618
MD
993 int lk_type, struct vnode **vpp)
994{
28623bf9 995 struct namecache *ncp;
21739618
MD
996 struct vnode *vp;
997 int error;
998
28623bf9 999 ncp = nch->ncp;
21739618
MD
1000again:
1001 vp = NULL;
1002 if (ncp->nc_flag & NCF_UNRESOLVED) {
28623bf9
MD
1003 _cache_lock(ncp);
1004 error = cache_resolve(nch, cred);
1005 _cache_unlock(ncp);
21739618
MD
1006 } else {
1007 error = 0;
1008 }
1009 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
9b1b3591
MD
1010 /*
1011 * Accessing the vnode from the namecache is a bit
1012 * dangerous. Because there are no refs on the vnode, it
1013 * could be in the middle of a reclaim.
1014 */
1015 if (vp->v_flag & VRECLAIMED) {
6ea70f76 1016 kprintf("Warning: vnode reclaim race detected in cache_vget on %p (%s)\n", vp, ncp->nc_name);
28623bf9
MD
1017 _cache_lock(ncp);
1018 _cache_setunresolved(ncp);
1019 _cache_unlock(ncp);
9b1b3591
MD
1020 goto again;
1021 }
87de5057 1022 error = vget(vp, lk_type);
21739618 1023 if (error) {
9b1b3591 1024 if (vp != ncp->nc_vp)
21739618
MD
1025 goto again;
1026 vp = NULL;
9b1b3591 1027 } else if (vp != ncp->nc_vp) {
21739618
MD
1028 vput(vp);
1029 goto again;
9b1b3591
MD
1030 } else if (vp->v_flag & VRECLAIMED) {
1031 panic("vget succeeded on a VRECLAIMED node! vp %p", vp);
21739618
MD
1032 }
1033 }
1034 if (error == 0 && vp == NULL)
1035 error = ENOENT;
1036 *vpp = vp;
1037 return(error);
1038}
1039
1040int
28623bf9 1041cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp)
21739618 1042{
28623bf9 1043 struct namecache *ncp;
21739618
MD
1044 struct vnode *vp;
1045 int error;
1046
28623bf9
MD
1047 ncp = nch->ncp;
1048
21739618
MD
1049again:
1050 vp = NULL;
1051 if (ncp->nc_flag & NCF_UNRESOLVED) {
28623bf9
MD
1052 _cache_lock(ncp);
1053 error = cache_resolve(nch, cred);
1054 _cache_unlock(ncp);
21739618
MD
1055 } else {
1056 error = 0;
1057 }
1058 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
9b1b3591
MD
1059 /*
1060 * Since we did not obtain any locks, a cache zap
1061 * race can occur here if the vnode is in the middle
1062 * of being reclaimed and has not yet been able to
1063 * clean out its cache node. If that case occurs,
1064 * we must lock and unresolve the cache, then loop
1065 * to retry.
1066 */
1067 if (vp->v_flag & VRECLAIMED) {
6ea70f76 1068 kprintf("Warning: vnode reclaim race detected on cache_vref %p (%s)\n", vp, ncp->nc_name);
28623bf9
MD
1069 _cache_lock(ncp);
1070 _cache_setunresolved(ncp);
1071 _cache_unlock(ncp);
21739618
MD
1072 goto again;
1073 }
44b1cf3d 1074 vref_initial(vp, 1);
21739618
MD
1075 }
1076 if (error == 0 && vp == NULL)
1077 error = ENOENT;
1078 *vpp = vp;
1079 return(error);
1080}
1081
dc1be39c
MD
1082/*
1083 * Recursively set the FSMID update flag for namecache nodes leading
f2e3ccf2
MD
1084 * to root. This will cause the next getattr or reclaim to increment the
1085 * fsmid and mark the inode for lazy updating.
1086 *
1087 * Stop recursing when we hit a node whos NCF_FSMID flag is already set.
1088 * This makes FSMIDs work in an Einsteinian fashion - where the observation
1089 * effects the result. In this case a program monitoring a higher level
1090 * node will have detected some prior change and started its scan (clearing
1091 * NCF_FSMID in higher level nodes), but since it has not yet observed the
1092 * node where we find NCF_FSMID still set, we can safely make the related
1093 * modification without interfering with the theorized program.
1094 *
1095 * This also means that FSMIDs cannot represent time-domain quantities
1096 * in a hierarchical sense. But the main reason for doing it this way
1097 * is to reduce the amount of recursion that occurs in the critical path
1098 * when e.g. a program is writing to a file that sits deep in a directory
1099 * hierarchy.
dc1be39c 1100 */
7d15906a 1101void
28623bf9 1102cache_update_fsmid(struct nchandle *nch)
7d15906a 1103{
28623bf9 1104 struct namecache *ncp;
7d15906a 1105 struct namecache *scan;
28623bf9
MD
1106 struct vnode *vp;
1107
1108 ncp = nch->ncp;
7d15906a 1109
9b1b3591
MD
1110 /*
1111 * Warning: even if we get a non-NULL vp it could still be in the
1112 * middle of a recyclement. Don't do anything fancy, just set
1113 * NCF_FSMID.
1114 */
7d15906a
MD
1115 if ((vp = ncp->nc_vp) != NULL) {
1116 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
f2e3ccf2
MD
1117 for (scan = ncp; scan; scan = scan->nc_parent) {
1118 if (scan->nc_flag & NCF_FSMID)
1119 break;
dc1be39c 1120 scan->nc_flag |= NCF_FSMID;
f2e3ccf2 1121 }
7d15906a
MD
1122 }
1123 } else {
f2e3ccf2 1124 while (ncp && (ncp->nc_flag & NCF_FSMID) == 0) {
dc1be39c 1125 ncp->nc_flag |= NCF_FSMID;
7d15906a
MD
1126 ncp = ncp->nc_parent;
1127 }
1128 }
1129}
1130
1131void
1132cache_update_fsmid_vp(struct vnode *vp)
1133{
1134 struct namecache *ncp;
1135 struct namecache *scan;
7d15906a
MD
1136
1137 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
f2e3ccf2
MD
1138 for (scan = ncp; scan; scan = scan->nc_parent) {
1139 if (scan->nc_flag & NCF_FSMID)
1140 break;
dc1be39c 1141 scan->nc_flag |= NCF_FSMID;
f2e3ccf2 1142 }
7d15906a
MD
1143 }
1144}
1145
dc1be39c
MD
1146/*
1147 * If getattr is called on a vnode (e.g. a stat call), the filesystem
1148 * may call this routine to determine if the namecache has the hierarchical
1149 * change flag set, requiring the fsmid to be updated.
1150 *
1151 * Since 0 indicates no support, make sure the filesystem fsmid is at least
1152 * 1.
1153 */
1154int
1155cache_check_fsmid_vp(struct vnode *vp, int64_t *fsmid)
1156{
1157 struct namecache *ncp;
1158 int changed = 0;
1159
1160 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
1161 if (ncp->nc_flag & NCF_FSMID) {
1162 ncp->nc_flag &= ~NCF_FSMID;
1163 changed = 1;
1164 }
1165 }
1166 if (*fsmid == 0)
1167 ++*fsmid;
1168 if (changed)
1169 ++*fsmid;
1170 return(changed);
1171}
1172
021f7340
MD
1173/*
1174 * Obtain the FSMID for a vnode for filesystems which do not support
1175 * a built-in FSMID.
1176 */
1177int64_t
1178cache_sync_fsmid_vp(struct vnode *vp)
1179{
1180 struct namecache *ncp;
1181
1182 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) {
1183 if (ncp->nc_flag & NCF_FSMID) {
1184 ncp->nc_flag &= ~NCF_FSMID;
1185 ++ncp->nc_fsmid;
1186 }
1187 return(ncp->nc_fsmid);
1188 }
1189 return(VNOVAL);
1190}
1191
fad57d0e
MD
1192/*
1193 * Convert a directory vnode to a namecache record without any other
1194 * knowledge of the topology. This ONLY works with directory vnodes and
1195 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
1196 * returned ncp (if not NULL) will be held and unlocked.
1197 *
1198 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
1199 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
1200 * for dvp. This will fail only if the directory has been deleted out from
1201 * under the caller.
1202 *
1203 * Callers must always check for a NULL return no matter the value of 'makeit'.
a0d57516
MD
1204 *
1205 * To avoid underflowing the kernel stack each recursive call increments
1206 * the makeit variable.
fad57d0e
MD
1207 */
1208
28623bf9 1209static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
fad57d0e 1210 struct vnode *dvp);
a0d57516 1211static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
cc4c3b52 1212 struct vnode **saved_dvp);
fad57d0e 1213
28623bf9
MD
1214int
1215cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit,
1216 struct nchandle *nch)
fad57d0e 1217{
cc4c3b52 1218 struct vnode *saved_dvp;
fad57d0e
MD
1219 struct vnode *pvp;
1220 int error;
1221
28623bf9
MD
1222 nch->ncp = NULL;
1223 nch->mount = dvp->v_mount;
cc4c3b52 1224 saved_dvp = NULL;
a0d57516 1225
fad57d0e
MD
1226 /*
1227 * Temporary debugging code to force the directory scanning code
1228 * to be exercised.
1229 */
fad57d0e 1230 if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) {
28623bf9 1231 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
6ea70f76 1232 kprintf("cache_fromdvp: forcing %s\n", nch->ncp->nc_name);
fad57d0e
MD
1233 goto force;
1234 }
1235
1236 /*
1237 * Loop until resolution, inside code will break out on error.
1238 */
28623bf9 1239 while ((nch->ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) {
fad57d0e
MD
1240force:
1241 /*
1242 * If dvp is the root of its filesystem it should already
1243 * have a namecache pointer associated with it as a side
1244 * effect of the mount, but it may have been disassociated.
1245 */
1246 if (dvp->v_flag & VROOT) {
28623bf9
MD
1247 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp);
1248 error = cache_resolve_mp(nch->mount);
1249 _cache_put(nch->ncp);
fad57d0e 1250 if (ncvp_debug) {
6ea70f76 1251 kprintf("cache_fromdvp: resolve root of mount %p error %d",
fad57d0e
MD
1252 dvp->v_mount, error);
1253 }
1254 if (error) {
1255 if (ncvp_debug)
6ea70f76 1256 kprintf(" failed\n");
28623bf9 1257 nch->ncp = NULL;
fad57d0e
MD
1258 break;
1259 }
1260 if (ncvp_debug)
6ea70f76 1261 kprintf(" succeeded\n");
fad57d0e
MD
1262 continue;
1263 }
1264
a0d57516
MD
1265 /*
1266 * If we are recursed too deeply resort to an O(n^2)
1267 * algorithm to resolve the namecache topology. The
cc4c3b52 1268 * resolved pvp is left referenced in saved_dvp to
a0d57516
MD
1269 * prevent the tree from being destroyed while we loop.
1270 */
1271 if (makeit > 20) {
cc4c3b52 1272 error = cache_fromdvp_try(dvp, cred, &saved_dvp);
a0d57516 1273 if (error) {
6ea70f76 1274 kprintf("lookupdotdot(longpath) failed %d "
a0d57516
MD
1275 "dvp %p\n", error, dvp);
1276 break;
1277 }
1278 continue;
1279 }
1280
fad57d0e
MD
1281 /*
1282 * Get the parent directory and resolve its ncp.
1283 */
6ddb7618 1284 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred);
fad57d0e 1285 if (error) {
6ea70f76 1286 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp);
fad57d0e
MD
1287 break;
1288 }
a11aaa81 1289 vn_unlock(pvp);
fad57d0e
MD
1290
1291 /*
a0d57516 1292 * Reuse makeit as a recursion depth counter.
fad57d0e 1293 */
28623bf9 1294 cache_fromdvp(pvp, cred, makeit + 1, nch);
fad57d0e 1295 vrele(pvp);
28623bf9 1296 if (nch->ncp == NULL)
fad57d0e
MD
1297 break;
1298
1299 /*
1300 * Do an inefficient scan of pvp (embodied by ncp) to look
1301 * for dvp. This will create a namecache record for dvp on
1302 * success. We loop up to recheck on success.
1303 *
1304 * ncp and dvp are both held but not locked.
1305 */
28623bf9
MD
1306 error = cache_inefficient_scan(nch, cred, dvp);
1307 _cache_drop(nch->ncp);
fad57d0e 1308 if (error) {
6ea70f76 1309 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
28623bf9
MD
1310 pvp, nch->ncp->nc_name, dvp);
1311 nch->ncp = NULL;
fad57d0e
MD
1312 break;
1313 }
1314 if (ncvp_debug) {
6ea70f76 1315 kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
28623bf9 1316 pvp, nch->ncp->nc_name);
fad57d0e
MD
1317 }
1318 }
28623bf9
MD
1319
1320 /*
1321 * hold it for real so the mount gets a ref
1322 */
1323 if (nch->ncp)
1324 cache_hold(nch);
cc4c3b52
MD
1325 if (saved_dvp)
1326 vrele(saved_dvp);
28623bf9
MD
1327 if (nch->ncp)
1328 return (0);
1329 return (EINVAL);
fad57d0e
MD
1330}
1331
a0d57516
MD
1332/*
1333 * Go up the chain of parent directories until we find something
1334 * we can resolve into the namecache. This is very inefficient.
1335 */
1336static
1337int
1338cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
cc4c3b52 1339 struct vnode **saved_dvp)
a0d57516 1340{
28623bf9 1341 struct nchandle nch;
a0d57516
MD
1342 struct vnode *pvp;
1343 int error;
1344 static time_t last_fromdvp_report;
1345
1346 /*
1347 * Loop getting the parent directory vnode until we get something we
1348 * can resolve in the namecache.
1349 */
1350 vref(dvp);
28623bf9
MD
1351 nch.mount = dvp->v_mount;
1352
a0d57516
MD
1353 for (;;) {
1354 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred);
1355 if (error) {
1356 vrele(dvp);
1357 return (error);
1358 }
a11aaa81 1359 vn_unlock(pvp);
28623bf9
MD
1360 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
1361 _cache_hold(nch.ncp);
a0d57516
MD
1362 vrele(pvp);
1363 break;
1364 }
1365 if (pvp->v_flag & VROOT) {
28623bf9
MD
1366 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp);
1367 error = cache_resolve_mp(nch.mount);
1368 _cache_unlock(nch.ncp);
a0d57516
MD
1369 vrele(pvp);
1370 if (error) {
28623bf9 1371 _cache_drop(nch.ncp);
a0d57516
MD
1372 vrele(dvp);
1373 return (error);
1374 }
1375 break;
1376 }
1377 vrele(dvp);
1378 dvp = pvp;
1379 }
1380 if (last_fromdvp_report != time_second) {
1381 last_fromdvp_report = time_second;
6ea70f76 1382 kprintf("Warning: extremely inefficient path resolution on %s\n",
28623bf9 1383 nch.ncp->nc_name);
a0d57516 1384 }
28623bf9 1385 error = cache_inefficient_scan(&nch, cred, dvp);
cc4c3b52
MD
1386
1387 /*
1388 * Hopefully dvp now has a namecache record associated with it.
1389 * Leave it referenced to prevent the kernel from recycling the
1390 * vnode. Otherwise extremely long directory paths could result
1391 * in endless recycling.
1392 */
1393 if (*saved_dvp)
1394 vrele(*saved_dvp);
1395 *saved_dvp = dvp;
a0d57516
MD
1396 return (error);
1397}
1398
1399
fad57d0e
MD
1400/*
1401 * Do an inefficient scan of the directory represented by ncp looking for
1402 * the directory vnode dvp. ncp must be held but not locked on entry and
1403 * will be held on return. dvp must be refd but not locked on entry and
1404 * will remain refd on return.
1405 *
1406 * Why do this at all? Well, due to its stateless nature the NFS server
1407 * converts file handles directly to vnodes without necessarily going through
1408 * the namecache ops that would otherwise create the namecache topology
1409 * leading to the vnode. We could either (1) Change the namecache algorithms
1410 * to allow disconnect namecache records that are re-merged opportunistically,
1411 * or (2) Make the NFS server backtrack and scan to recover a connected
1412 * namecache topology in order to then be able to issue new API lookups.
1413 *
1414 * It turns out that (1) is a huge mess. It takes a nice clean set of
1415 * namecache algorithms and introduces a lot of complication in every subsystem
1416 * that calls into the namecache to deal with the re-merge case, especially
1417 * since we are using the namecache to placehold negative lookups and the
1418 * vnode might not be immediately assigned. (2) is certainly far less
1419 * efficient then (1), but since we are only talking about directories here
1420 * (which are likely to remain cached), the case does not actually run all
1421 * that often and has the supreme advantage of not polluting the namecache
1422 * algorithms.
1423 */
1424static int
28623bf9 1425cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
fad57d0e
MD
1426 struct vnode *dvp)
1427{
1428 struct nlcomponent nlc;
28623bf9 1429 struct nchandle rncp;
fad57d0e
MD
1430 struct dirent *den;
1431 struct vnode *pvp;
1432 struct vattr vat;
1433 struct iovec iov;
1434 struct uio uio;
fad57d0e
MD
1435 int blksize;
1436 int eofflag;
4d22f42a 1437 int bytes;
fad57d0e
MD
1438 char *rbuf;
1439 int error;
fad57d0e
MD
1440
1441 vat.va_blocksize = 0;
87de5057 1442 if ((error = VOP_GETATTR(dvp, &vat)) != 0)
fad57d0e 1443 return (error);
28623bf9 1444 if ((error = cache_vref(nch, cred, &pvp)) != 0)
fad57d0e
MD
1445 return (error);
1446 if (ncvp_debug)
6ea70f76 1447 kprintf("inefficient_scan: directory iosize %ld vattr fileid = %ld\n", vat.va_blocksize, (long)vat.va_fileid);
fad57d0e
MD
1448 if ((blksize = vat.va_blocksize) == 0)
1449 blksize = DEV_BSIZE;
efda3bd0 1450 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK);
28623bf9 1451 rncp.ncp = NULL;
fad57d0e
MD
1452
1453 eofflag = 0;
1454 uio.uio_offset = 0;
fad57d0e 1455again:
fad57d0e
MD
1456 iov.iov_base = rbuf;
1457 iov.iov_len = blksize;
1458 uio.uio_iov = &iov;
1459 uio.uio_iovcnt = 1;
1460 uio.uio_resid = blksize;
1461 uio.uio_segflg = UIO_SYSSPACE;
1462 uio.uio_rw = UIO_READ;
1463 uio.uio_td = curthread;
1464
fad57d0e 1465 if (ncvp_debug >= 2)
6ea70f76 1466 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
4d22f42a 1467 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
fad57d0e 1468 if (error == 0) {
4d22f42a
MD
1469 den = (struct dirent *)rbuf;
1470 bytes = blksize - uio.uio_resid;
1471
1472 while (bytes > 0) {
1473 if (ncvp_debug >= 2) {
6ea70f76 1474 kprintf("cache_inefficient_scan: %*.*s\n",
4d22f42a
MD
1475 den->d_namlen, den->d_namlen,
1476 den->d_name);
1477 }
fad57d0e 1478 if (den->d_type != DT_WHT &&
01f31ab3 1479 den->d_ino == vat.va_fileid) {
4d22f42a 1480 if (ncvp_debug) {
6ea70f76 1481 kprintf("cache_inefficient_scan: "
4d22f42a 1482 "MATCHED inode %ld path %s/%*.*s\n",
28623bf9 1483 vat.va_fileid, nch->ncp->nc_name,
4d22f42a
MD
1484 den->d_namlen, den->d_namlen,
1485 den->d_name);
1486 }
fad57d0e
MD
1487 nlc.nlc_nameptr = den->d_name;
1488 nlc.nlc_namelen = den->d_namlen;
28623bf9
MD
1489 rncp = cache_nlookup(nch, &nlc);
1490 KKASSERT(rncp.ncp != NULL);
fad57d0e
MD
1491 break;
1492 }
01f31ab3
JS
1493 bytes -= _DIRENT_DIRSIZ(den);
1494 den = _DIRENT_NEXT(den);
fad57d0e 1495 }
28623bf9 1496 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
fad57d0e
MD
1497 goto again;
1498 }
885ecb13 1499 vrele(pvp);
28623bf9
MD
1500 if (rncp.ncp) {
1501 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) {
1502 _cache_setvp(rncp.ncp, dvp);
fad57d0e 1503 if (ncvp_debug >= 2) {
6ea70f76 1504 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
28623bf9 1505 nch->ncp->nc_name, rncp.ncp->nc_name, dvp);
fad57d0e
MD
1506 }
1507 } else {
1508 if (ncvp_debug >= 2) {
6ea70f76 1509 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
28623bf9
MD
1510 nch->ncp->nc_name, rncp.ncp->nc_name, dvp,
1511 rncp.ncp->nc_vp);
fad57d0e
MD
1512 }
1513 }
28623bf9
MD
1514 if (rncp.ncp->nc_vp == NULL)
1515 error = rncp.ncp->nc_error;
1516 _cache_put(rncp.ncp);
fad57d0e 1517 } else {
6ea70f76 1518 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
28623bf9 1519 dvp, nch->ncp->nc_name);
fad57d0e
MD
1520 error = ENOENT;
1521 }
efda3bd0 1522 kfree(rbuf, M_TEMP);
fad57d0e
MD
1523 return (error);
1524}
1525
984263bc 1526/*
67773eb3
MD
1527 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
1528 * state, which disassociates it from its vnode or ncneglist.
7ea21ed1 1529 *
67773eb3
MD
1530 * Then, if there are no additional references to the ncp and no children,
1531 * the ncp is removed from the topology and destroyed. This function will
1532 * also run through the nc_parent chain and destroy parent ncps if possible.
1533 * As a side benefit, it turns out the only conditions that allow running
1534 * up the chain are also the conditions to ensure no deadlock will occur.
7ea21ed1 1535 *
67773eb3
MD
1536 * References and/or children may exist if the ncp is in the middle of the
1537 * topology, preventing the ncp from being destroyed.
7ea21ed1 1538 *
67773eb3
MD
1539 * This function must be called with the ncp held and locked and will unlock
1540 * and drop it during zapping.
984263bc
MD
1541 */
1542static void
8987aad7 1543cache_zap(struct namecache *ncp)
984263bc 1544{
7ea21ed1 1545 struct namecache *par;
7ea21ed1
MD
1546
1547 /*
1548 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
1549 */
28623bf9 1550 _cache_setunresolved(ncp);
7ea21ed1
MD
1551
1552 /*
1553 * Try to scrap the entry and possibly tail-recurse on its parent.
1554 * We only scrap unref'd (other then our ref) unresolved entries,
1555 * we do not scrap 'live' entries.
1556 */
1557 while (ncp->nc_flag & NCF_UNRESOLVED) {
1558 /*
1559 * Someone other then us has a ref, stop.
1560 */
1561 if (ncp->nc_refs > 1)
1562 goto done;
1563
1564 /*
1565 * We have children, stop.
1566 */
1567 if (!TAILQ_EMPTY(&ncp->nc_list))
1568 goto done;
1569
67773eb3
MD
1570 /*
1571 * Remove ncp from the topology: hash table and parent linkage.
1572 */
7ea21ed1
MD
1573 if (ncp->nc_flag & NCF_HASHED) {
1574 ncp->nc_flag &= ~NCF_HASHED;
1575 LIST_REMOVE(ncp, nc_hash);
1576 }
7ea21ed1 1577 if ((par = ncp->nc_parent) != NULL) {
28623bf9 1578 par = _cache_hold(par);
7ea21ed1 1579 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
67773eb3 1580 ncp->nc_parent = NULL;
7ea21ed1
MD
1581 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
1582 vdrop(par->nc_vp);
1583 }
67773eb3
MD
1584
1585 /*
1586 * ncp should not have picked up any refs. Physically
1587 * destroy the ncp.
1588 */
1589 KKASSERT(ncp->nc_refs == 1);
f517a1bb 1590 --numunres;
28623bf9 1591 /* _cache_unlock(ncp) not required */
7ea21ed1 1592 ncp->nc_refs = -1; /* safety */
7ea21ed1 1593 if (ncp->nc_name)
efda3bd0
MD
1594 kfree(ncp->nc_name, M_VFSCACHE);
1595 kfree(ncp, M_VFSCACHE);
67773eb3
MD
1596
1597 /*
1598 * Loop on the parent (it may be NULL). Only bother looping
1599 * if the parent has a single ref (ours), which also means
1600 * we can lock it trivially.
1601 */
1602 ncp = par;
1603 if (ncp == NULL)
1604 return;
1605 if (ncp->nc_refs != 1) {
28623bf9 1606 _cache_drop(ncp);
8c361dda 1607 return;
67773eb3
MD
1608 }
1609 KKASSERT(par->nc_exlocks == 0);
28623bf9 1610 _cache_lock(ncp);
7ea21ed1
MD
1611 }
1612done:
28623bf9 1613 _cache_unlock(ncp);
36e90efd 1614 atomic_subtract_int(&ncp->nc_refs, 1);
984263bc
MD
1615}
1616
62d0f1f0
MD
1617static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW;
1618
1619static __inline
1620void
1621cache_hysteresis(void)
1622{
1623 /*
1624 * Don't cache too many negative hits. We use hysteresis to reduce
1625 * the impact on the critical path.
1626 */
1627 switch(cache_hysteresis_state) {
1628 case CHI_LOW:
1629 if (numneg > MINNEG && numneg * ncnegfactor > numcache) {
1630 cache_cleanneg(10);
1631 cache_hysteresis_state = CHI_HIGH;
1632 }
1633 break;
1634 case CHI_HIGH:
1635 if (numneg > MINNEG * 9 / 10 &&
1636 numneg * ncnegfactor * 9 / 10 > numcache
1637 ) {
1638 cache_cleanneg(10);
1639 } else {
1640 cache_hysteresis_state = CHI_LOW;
1641 }
1642 break;
1643 }
1644}
1645
14c92d03
MD
1646/*
1647 * NEW NAMECACHE LOOKUP API
1648 *
1649 * Lookup an entry in the cache. A locked, referenced, non-NULL
1650 * entry is *always* returned, even if the supplied component is illegal.
fad57d0e 1651 * The resulting namecache entry should be returned to the system with
28623bf9 1652 * cache_put() or _cache_unlock() + cache_drop().
14c92d03
MD
1653 *
1654 * namecache locks are recursive but care must be taken to avoid lock order
1655 * reversals.
1656 *
1657 * Nobody else will be able to manipulate the associated namespace (e.g.
1658 * create, delete, rename, rename-target) until the caller unlocks the
1659 * entry.
1660 *
1661 * The returned entry will be in one of three states: positive hit (non-null
1662 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
1663 * Unresolved entries must be resolved through the filesystem to associate the
1664 * vnode and/or determine whether a positive or negative hit has occured.
1665 *
1666 * It is not necessary to lock a directory in order to lock namespace under
1667 * that directory. In fact, it is explicitly not allowed to do that. A
1668 * directory is typically only locked when being created, renamed, or
1669 * destroyed.
1670 *
1671 * The directory (par) may be unresolved, in which case any returned child
1672 * will likely also be marked unresolved. Likely but not guarenteed. Since
fad57d0e
MD
1673 * the filesystem lookup requires a resolved directory vnode the caller is
1674 * responsible for resolving the namecache chain top-down. This API
14c92d03
MD
1675 * specifically allows whole chains to be created in an unresolved state.
1676 */
28623bf9
MD
1677struct nchandle
1678cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc)
14c92d03 1679{
28623bf9 1680 struct nchandle nch;
690a3127
MD
1681 struct namecache *ncp;
1682 struct namecache *new_ncp;
1683 struct nchashhead *nchpp;
1684 u_int32_t hash;
1685 globaldata_t gd;
1686
1687 numcalls++;
1688 gd = mycpu;
1689
690a3127
MD
1690 /*
1691 * Try to locate an existing entry
1692 */
1693 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
28623bf9 1694 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
690a3127
MD
1695 new_ncp = NULL;
1696restart:
1697 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1698 numchecks++;
1699
1700 /*
1701 * Zap entries that have timed out.
1702 */
1703 if (ncp->nc_timeout &&
67773eb3
MD
1704 (int)(ncp->nc_timeout - ticks) < 0 &&
1705 (ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
1706 ncp->nc_exlocks == 0
690a3127 1707 ) {
28623bf9 1708 cache_zap(_cache_get(ncp));
690a3127
MD
1709 goto restart;
1710 }
1711
1712 /*
1713 * Break out if we find a matching entry. Note that
e09206ba
MD
1714 * UNRESOLVED entries may match, but DESTROYED entries
1715 * do not.
690a3127 1716 */
28623bf9 1717 if (ncp->nc_parent == par_nch->ncp &&
690a3127 1718 ncp->nc_nlen == nlc->nlc_namelen &&
e09206ba
MD
1719 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
1720 (ncp->nc_flag & NCF_DESTROYED) == 0
690a3127 1721 ) {
28623bf9 1722 if (_cache_get_nonblock(ncp) == 0) {
67773eb3 1723 if (new_ncp)
28623bf9 1724 _cache_free(new_ncp);
67773eb3
MD
1725 goto found;
1726 }
28623bf9
MD
1727 _cache_get(ncp);
1728 _cache_put(ncp);
67773eb3 1729 goto restart;
690a3127
MD
1730 }
1731 }
1732
1733 /*
1734 * We failed to locate an entry, create a new entry and add it to
1735 * the cache. We have to relookup after possibly blocking in
1736 * malloc.
1737 */
1738 if (new_ncp == NULL) {
524c845c 1739 new_ncp = cache_alloc(nlc->nlc_namelen);
690a3127
MD
1740 goto restart;
1741 }
1742
1743 ncp = new_ncp;
1744
1745 /*
1746 * Initialize as a new UNRESOLVED entry, lock (non-blocking),
fad57d0e
MD
1747 * and link to the parent. The mount point is usually inherited
1748 * from the parent unless this is a special case such as a mount
28623bf9 1749 * point where nlc_namelen is 0. If nlc_namelen is 0 nc_name will
fad57d0e 1750 * be NULL.
690a3127 1751 */
4fcb1cf7
MD
1752 if (nlc->nlc_namelen) {
1753 bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen);
fad57d0e 1754 ncp->nc_name[nlc->nlc_namelen] = 0;
4fcb1cf7 1755 }
690a3127
MD
1756 nchpp = NCHHASH(hash);
1757 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
1758 ncp->nc_flag |= NCF_HASHED;
28623bf9 1759 cache_link_parent(ncp, par_nch->ncp);
690a3127 1760found:
fad57d0e
MD
1761 /*
1762 * stats and namecache size management
1763 */
1764 if (ncp->nc_flag & NCF_UNRESOLVED)
1765 ++gd->gd_nchstats->ncs_miss;
1766 else if (ncp->nc_vp)
1767 ++gd->gd_nchstats->ncs_goodhits;
1768 else
1769 ++gd->gd_nchstats->ncs_neghits;
62d0f1f0 1770 cache_hysteresis();
28623bf9
MD
1771 nch.mount = par_nch->mount;
1772 nch.ncp = ncp;
1773 ++nch.mount->mnt_refs;
1774 return(nch);
690a3127
MD
1775}
1776
1d505369 1777/*
28623bf9
MD
1778 * The namecache entry is marked as being used as a mount point.
1779 * Locate the mount if it is visible to the caller.
1d505369 1780 */
28623bf9
MD
1781struct findmount_info {
1782 struct mount *result;
1783 struct mount *nch_mount;
1784 struct namecache *nch_ncp;
1785};
1786
1787static
1788int
1789cache_findmount_callback(struct mount *mp, void *data)
1d505369 1790{
28623bf9 1791 struct findmount_info *info = data;
1d505369 1792
28623bf9
MD
1793 /*
1794 * Check the mount's mounted-on point against the passed nch.
1795 */
1796 if (mp->mnt_ncmounton.mount == info->nch_mount &&
1797 mp->mnt_ncmounton.ncp == info->nch_ncp
1798 ) {
1799 info->result = mp;
1800 return(-1);
1d505369 1801 }
28623bf9 1802 return(0);
1d505369
MD
1803}
1804
28623bf9
MD
1805struct mount *
1806cache_findmount(struct nchandle *nch)
9b1b3591 1807{
28623bf9
MD
1808 struct findmount_info info;
1809
1810 info.result = NULL;
1811 info.nch_mount = nch->mount;
1812 info.nch_ncp = nch->ncp;
1813 mountlist_scan(cache_findmount_callback, &info,
1814 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
1815 return(info.result);
9b1b3591
MD
1816}
1817
690a3127 1818/*
21739618 1819 * Resolve an unresolved namecache entry, generally by looking it up.
67773eb3 1820 * The passed ncp must be locked and refd.
21739618
MD
1821 *
1822 * Theoretically since a vnode cannot be recycled while held, and since
1823 * the nc_parent chain holds its vnode as long as children exist, the
1824 * direct parent of the cache entry we are trying to resolve should
1825 * have a valid vnode. If not then generate an error that we can
1826 * determine is related to a resolver bug.
fad57d0e 1827 *
9b1b3591
MD
1828 * However, if a vnode was in the middle of a recyclement when the NCP
1829 * got locked, ncp->nc_vp might point to a vnode that is about to become
1830 * invalid. cache_resolve() handles this case by unresolving the entry
1831 * and then re-resolving it.
1832 *
fad57d0e
MD
1833 * Note that successful resolution does not necessarily return an error
1834 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
1835 * will be returned.
690a3127
MD
1836 */
1837int
28623bf9 1838cache_resolve(struct nchandle *nch, struct ucred *cred)
690a3127 1839{
21739618 1840 struct namecache *par;
28623bf9
MD
1841 struct namecache *ncp;
1842 struct nchandle nctmp;
1843 struct mount *mp;
67773eb3 1844 int error;
8e005a45 1845
28623bf9
MD
1846 ncp = nch->ncp;
1847 mp = nch->mount;
67773eb3 1848restart:
8e005a45 1849 /*
9b1b3591
MD
1850 * If the ncp is already resolved we have nothing to do. However,
1851 * we do want to guarentee that a usable vnode is returned when
1852 * a vnode is present, so make sure it hasn't been reclaimed.
8e005a45 1853 */
9b1b3591
MD
1854 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1855 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
28623bf9 1856 _cache_setunresolved(ncp);
9b1b3591
MD
1857 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1858 return (ncp->nc_error);
1859 }
21739618 1860
646a1cda
MD
1861 /*
1862 * Mount points need special handling because the parent does not
1863 * belong to the same filesystem as the ncp.
1864 */
28623bf9
MD
1865 if (ncp == mp->mnt_ncmountpt.ncp)
1866 return (cache_resolve_mp(mp));
646a1cda
MD
1867
1868 /*
1869 * We expect an unbroken chain of ncps to at least the mount point,
1870 * and even all the way to root (but this code doesn't have to go
1871 * past the mount point).
1872 */
1873 if (ncp->nc_parent == NULL) {
6ea70f76 1874 kprintf("EXDEV case 1 %p %*.*s\n", ncp,
646a1cda 1875 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
21739618 1876 ncp->nc_error = EXDEV;
646a1cda
MD
1877 return(ncp->nc_error);
1878 }
1879
1880 /*
1881 * The vp's of the parent directories in the chain are held via vhold()
1882 * due to the existance of the child, and should not disappear.
1883 * However, there are cases where they can disappear:
1884 *
1885 * - due to filesystem I/O errors.
1886 * - due to NFS being stupid about tracking the namespace and
1887 * destroys the namespace for entire directories quite often.
1888 * - due to forced unmounts.
e09206ba 1889 * - due to an rmdir (parent will be marked DESTROYED)
646a1cda
MD
1890 *
1891 * When this occurs we have to track the chain backwards and resolve
1892 * it, looping until the resolver catches up to the current node. We
1893 * could recurse here but we might run ourselves out of kernel stack
1894 * so we do it in a more painful manner. This situation really should
1895 * not occur all that often, or if it does not have to go back too
1896 * many nodes to resolve the ncp.
1897 */
1898 while (ncp->nc_parent->nc_vp == NULL) {
e09206ba
MD
1899 /*
1900 * This case can occur if a process is CD'd into a
1901 * directory which is then rmdir'd. If the parent is marked
1902 * destroyed there is no point trying to resolve it.
1903 */
1904 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
1905 return(ENOENT);
1906
646a1cda
MD
1907 par = ncp->nc_parent;
1908 while (par->nc_parent && par->nc_parent->nc_vp == NULL)
1909 par = par->nc_parent;
1910 if (par->nc_parent == NULL) {
6ea70f76 1911 kprintf("EXDEV case 2 %*.*s\n",
646a1cda
MD
1912 par->nc_nlen, par->nc_nlen, par->nc_name);
1913 return (EXDEV);
1914 }
6ea70f76 1915 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
646a1cda
MD
1916 par->nc_nlen, par->nc_nlen, par->nc_name);
1917 /*
67773eb3
MD
1918 * The parent is not set in stone, ref and lock it to prevent
1919 * it from disappearing. Also note that due to renames it
1920 * is possible for our ncp to move and for par to no longer
1921 * be one of its parents. We resolve it anyway, the loop
1922 * will handle any moves.
646a1cda 1923 */
28623bf9
MD
1924 _cache_get(par);
1925 if (par == nch->mount->mnt_ncmountpt.ncp) {
1926 cache_resolve_mp(nch->mount);
8e005a45 1927 } else if (par->nc_parent->nc_vp == NULL) {
6ea70f76 1928 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
28623bf9 1929 _cache_put(par);
8e005a45 1930 continue;
fad57d0e 1931 } else if (par->nc_flag & NCF_UNRESOLVED) {
28623bf9
MD
1932 nctmp.mount = mp;
1933 nctmp.ncp = par;
1934 par->nc_error = VOP_NRESOLVE(&nctmp, cred);
646a1cda 1935 }
67773eb3
MD
1936 if ((error = par->nc_error) != 0) {
1937 if (par->nc_error != EAGAIN) {
6ea70f76 1938 kprintf("EXDEV case 3 %*.*s error %d\n",
67773eb3
MD
1939 par->nc_nlen, par->nc_nlen, par->nc_name,
1940 par->nc_error);
28623bf9 1941 _cache_put(par);
67773eb3
MD
1942 return(error);
1943 }
6ea70f76 1944 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
67773eb3 1945 par, par->nc_nlen, par->nc_nlen, par->nc_name);
646a1cda 1946 }
28623bf9 1947 _cache_put(par);
67773eb3 1948 /* loop */
646a1cda 1949 }
8e005a45
MD
1950
1951 /*
fad57d0e 1952 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
8e005a45
MD
1953 * ncp's and reattach them. If this occurs the original ncp is marked
1954 * EAGAIN to force a relookup.
fad57d0e
MD
1955 *
1956 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
1957 * ncp must already be resolved.
8e005a45 1958 */
28623bf9
MD
1959 nctmp.mount = mp;
1960 nctmp.ncp = ncp;
1961 ncp->nc_error = VOP_NRESOLVE(&nctmp, cred);
6ddb7618 1962 /*vop_nresolve(*ncp->nc_parent->nc_vp->v_ops, ncp, cred);*/
67773eb3 1963 if (ncp->nc_error == EAGAIN) {
6ea70f76 1964 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
67773eb3
MD
1965 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
1966 goto restart;
1967 }
646a1cda
MD
1968 return(ncp->nc_error);
1969}
1970
1971/*
1972 * Resolve the ncp associated with a mount point. Such ncp's almost always
1973 * remain resolved and this routine is rarely called. NFS MPs tends to force
1974 * re-resolution more often due to its mac-truck-smash-the-namecache
1975 * method of tracking namespace changes.
1976 *
6215aa92
MD
1977 * The semantics for this call is that the passed ncp must be locked on
1978 * entry and will be locked on return. However, if we actually have to
1979 * resolve the mount point we temporarily unlock the entry in order to
1980 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
1981 * the unlock we have to recheck the flags after we relock.
646a1cda
MD
1982 */
1983static int
28623bf9 1984cache_resolve_mp(struct mount *mp)
646a1cda 1985{
28623bf9 1986 struct namecache *ncp = mp->mnt_ncmountpt.ncp;
646a1cda 1987 struct vnode *vp;
6215aa92 1988 int error;
646a1cda
MD
1989
1990 KKASSERT(mp != NULL);
9b1b3591
MD
1991
1992 /*
1993 * If the ncp is already resolved we have nothing to do. However,
1994 * we do want to guarentee that a usable vnode is returned when
1995 * a vnode is present, so make sure it hasn't been reclaimed.
1996 */
1997 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1998 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
28623bf9 1999 _cache_setunresolved(ncp);
9b1b3591
MD
2000 }
2001
646a1cda 2002 if (ncp->nc_flag & NCF_UNRESOLVED) {
28623bf9 2003 _cache_unlock(ncp);
f9642f56 2004 while (vfs_busy(mp, 0))
646a1cda 2005 ;
6215aa92 2006 error = VFS_ROOT(mp, &vp);
28623bf9 2007 _cache_lock(ncp);
6215aa92
MD
2008
2009 /*
2010 * recheck the ncp state after relocking.
2011 */
2012 if (ncp->nc_flag & NCF_UNRESOLVED) {
2013 ncp->nc_error = error;
2014 if (error == 0) {
28623bf9 2015 _cache_setvp(ncp, vp);
6215aa92
MD
2016 vput(vp);
2017 } else {
6ea70f76 2018 kprintf("[diagnostic] cache_resolve_mp: failed to resolve mount %p\n", mp);
28623bf9 2019 _cache_setvp(ncp, NULL);
6215aa92
MD
2020 }
2021 } else if (error == 0) {
646a1cda 2022 vput(vp);
646a1cda 2023 }
f9642f56 2024 vfs_unbusy(mp);
21739618
MD
2025 }
2026 return(ncp->nc_error);
14c92d03
MD
2027}
2028
62d0f1f0
MD
2029void
2030cache_cleanneg(int count)
2031{
2032 struct namecache *ncp;
7ea21ed1
MD
2033
2034 /*
62d0f1f0
MD
2035 * Automode from the vnlru proc - clean out 10% of the negative cache
2036 * entries.
7ea21ed1 2037 */
62d0f1f0
MD
2038 if (count == 0)
2039 count = numneg / 10 + 1;
2040
2041 /*
2042 * Attempt to clean out the specified number of negative cache
2043 * entries.
2044 */
2045 while (count) {
7ea21ed1 2046 ncp = TAILQ_FIRST(&ncneglist);
eb82ae62
MD
2047 if (ncp == NULL) {
2048 KKASSERT(numneg == 0);
2049 break;
2050 }
62d0f1f0
MD
2051 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
2052 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
28623bf9 2053 if (_cache_get_nonblock(ncp) == 0)
67773eb3 2054 cache_zap(ncp);
62d0f1f0 2055 --count;
984263bc
MD
2056 }
2057}
2058
fad57d0e
MD
2059/*
2060 * Rehash a ncp. Rehashing is typically required if the name changes (should
2061 * not generally occur) or the parent link changes. This function will
2062 * unhash the ncp if the ncp is no longer hashable.
2063 */
8c361dda 2064static void
28623bf9 2065_cache_rehash(struct namecache *ncp)
8c361dda
MD
2066{
2067 struct nchashhead *nchpp;
2068 u_int32_t hash;
2069
2070 if (ncp->nc_flag & NCF_HASHED) {
2071 ncp->nc_flag &= ~NCF_HASHED;
2072 LIST_REMOVE(ncp, nc_hash);
2073 }
fad57d0e
MD
2074 if (ncp->nc_nlen && ncp->nc_parent) {
2075 hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT);
2076 hash = fnv_32_buf(&ncp->nc_parent,
2077 sizeof(ncp->nc_parent), hash);
2078 nchpp = NCHHASH(hash);
2079 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
2080 ncp->nc_flag |= NCF_HASHED;
2081 }
8c361dda
MD
2082}
2083
984263bc 2084/*
24e51f36 2085 * Name cache initialization, from vfsinit() when we are booting
984263bc
MD
2086 */
2087void
8987aad7 2088nchinit(void)
984263bc 2089{
24e51f36
HP
2090 int i;
2091 globaldata_t gd;
2092
2093 /* initialise per-cpu namecache effectiveness statistics. */
2094 for (i = 0; i < ncpus; ++i) {
2095 gd = globaldata_find(i);
2096 gd->gd_nchstats = &nchstats[i];
2097 }
7ea21ed1 2098 TAILQ_INIT(&ncneglist);
984263bc 2099 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash);
fc21741a 2100 nclockwarn = 1 * hz;
21739618
MD
2101}
2102
2103/*
2104 * Called from start_init() to bootstrap the root filesystem. Returns
2105 * a referenced, unlocked namecache record.
2106 */
28623bf9
MD
2107void
2108cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp)
21739618 2109{
28623bf9
MD
2110 nch->ncp = cache_alloc(0);
2111 nch->mount = mp;
2112 ++mp->mnt_refs;
2113 if (vp)
2114 _cache_setvp(nch->ncp, vp);
984263bc
MD
2115}
2116
2117/*
7ea21ed1 2118 * vfs_cache_setroot()
984263bc 2119 *
7ea21ed1
MD
2120 * Create an association between the root of our namecache and
2121 * the root vnode. This routine may be called several times during
2122 * booting.
690a3127
MD
2123 *
2124 * If the caller intends to save the returned namecache pointer somewhere
2125 * it must cache_hold() it.
7ea21ed1 2126 */
21739618 2127void
28623bf9 2128vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch)
7ea21ed1 2129{
21739618 2130 struct vnode *ovp;
28623bf9 2131 struct nchandle onch;
21739618
MD
2132
2133 ovp = rootvnode;
28623bf9 2134 onch = rootnch;
21739618 2135 rootvnode = nvp;
28623bf9
MD
2136 if (nch)
2137 rootnch = *nch;
2138 else
2139 cache_zero(&rootnch);
21739618
MD
2140 if (ovp)
2141 vrele(ovp);
28623bf9
MD
2142 if (onch.ncp)
2143 cache_drop(&onch);
7ea21ed1
MD
2144}
2145
2146/*
fad57d0e
MD
2147 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
2148 * topology and is being removed as quickly as possible. The new VOP_N*()
2149 * API calls are required to make specific adjustments using the supplied
2150 * ncp pointers rather then just bogusly purging random vnodes.
2151 *
7ea21ed1
MD
2152 * Invalidate all namecache entries to a particular vnode as well as
2153 * any direct children of that vnode in the namecache. This is a
2154 * 'catch all' purge used by filesystems that do not know any better.
2155 *
7ea21ed1
MD
2156 * Note that the linkage between the vnode and its namecache entries will
2157 * be removed, but the namecache entries themselves might stay put due to
2158 * active references from elsewhere in the system or due to the existance of
2159 * the children. The namecache topology is left intact even if we do not
2160 * know what the vnode association is. Such entries will be marked
2161 * NCF_UNRESOLVED.
984263bc 2162 */
984263bc 2163void
8987aad7 2164cache_purge(struct vnode *vp)
984263bc 2165{
6b008938 2166 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
984263bc
MD
2167}
2168
2169/*
2170 * Flush all entries referencing a particular filesystem.
2171 *
2172 * Since we need to check it anyway, we will flush all the invalid
2173 * entries at the same time.
2174 */
28623bf9
MD
2175#if 0
2176
984263bc 2177void
8987aad7 2178cache_purgevfs(struct mount *mp)
984263bc 2179{
bc0c094e 2180 struct nchashhead *nchpp;
984263bc
MD
2181 struct namecache *ncp, *nnp;
2182
7ea21ed1
MD
2183 /*
2184 * Scan hash tables for applicable entries.
2185 */
bc0c094e
MD
2186 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) {
2187 ncp = LIST_FIRST(nchpp);
7ea21ed1 2188 if (ncp)
28623bf9 2189 _cache_hold(ncp);
7ea21ed1 2190 while (ncp) {
984263bc 2191 nnp = LIST_NEXT(ncp, nc_hash);
7ea21ed1 2192 if (nnp)
28623bf9 2193 _cache_hold(nnp);
4fcb1cf7 2194 if (ncp->nc_mount == mp) {
28623bf9 2195 _cache_lock(ncp);
984263bc 2196 cache_zap(ncp);
67773eb3 2197 } else {
28623bf9 2198 _cache_drop(ncp);
67773eb3 2199 }
7ea21ed1 2200 ncp = nnp;
984263bc
MD
2201 }
2202 }
2203}
2204
28623bf9
MD
2205#endif
2206
6b008938
MD
2207/*
2208 * Create a new (theoretically) unique fsmid
2209 */
2210int64_t
2211cache_getnewfsmid(void)
2212{
2213 static int fsmid_roller;
2214 int64_t fsmid;
2215
2216 ++fsmid_roller;
2217 fsmid = ((int64_t)time_second << 32) |
2218 (fsmid_roller & 0x7FFFFFFF);
2219 return (fsmid);
2220}
2221
2222
984263bc
MD
2223static int disablecwd;
2224SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
2225
2226static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
2227static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
2228static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
2229static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
2230static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
2231static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
41c20dac 2232
984263bc 2233int
753fd850 2234sys___getcwd(struct __getcwd_args *uap)
63f58b90 2235{
02680f1b 2236 int buflen;
63f58b90 2237 int error;
02680f1b
MD
2238 char *buf;
2239 char *bp;
2240
2241 if (disablecwd)
2242 return (ENODEV);
2243
2244 buflen = uap->buflen;
2245 if (buflen < 2)
2246 return (EINVAL);
2247 if (buflen > MAXPATHLEN)
2248 buflen = MAXPATHLEN;
63f58b90 2249
efda3bd0 2250 buf = kmalloc(buflen, M_TEMP, M_WAITOK);
02680f1b 2251 bp = kern_getcwd(buf, buflen, &error);
63f58b90 2252 if (error == 0)
02680f1b 2253 error = copyout(bp, uap->buf, strlen(bp) + 1);
efda3bd0 2254 kfree(buf, M_TEMP);
63f58b90
EN
2255 return (error);
2256}
2257
02680f1b
MD
2258char *
2259kern_getcwd(char *buf, size_t buflen, int *error)
984263bc 2260{
41c20dac 2261 struct proc *p = curproc;
63f58b90 2262 char *bp;
02680f1b 2263 int i, slash_prefixed;
984263bc 2264 struct filedesc *fdp;
28623bf9 2265 struct nchandle nch;
984263bc
MD
2266
2267 numcwdcalls++;
63f58b90
EN
2268 bp = buf;
2269 bp += buflen - 1;
984263bc
MD
2270 *bp = '\0';
2271 fdp = p->p_fd;
2272 slash_prefixed = 0;
524c845c 2273
28623bf9
MD
2274 nch = fdp->fd_ncdir;
2275 while (nch.ncp && (nch.ncp != fdp->fd_nrdir.ncp ||
2276 nch.mount != fdp->fd_nrdir.mount)
2277 ) {
2278 /*
2279 * While traversing upwards if we encounter the root
2280 * of the current mount we have to skip to the mount point
2281 * in the underlying filesystem.
2282 */
2283 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) {
2284 nch = nch.mount->mnt_ncmounton;
984263bc
MD
2285 continue;
2286 }
28623bf9
MD
2287
2288 /*
2289 * Prepend the path segment
2290 */
2291 for (i = nch.ncp->nc_nlen - 1; i >= 0; i--) {
984263bc
MD
2292 if (bp == buf) {
2293 numcwdfail4++;
02680f1b
MD
2294 *error = ENOMEM;
2295 return(NULL);
984263bc 2296 }
28623bf9 2297 *--bp = nch.ncp->nc_name[i];
984263bc
MD
2298 }
2299 if (bp == buf) {
2300 numcwdfail4++;
02680f1b
MD
2301 *error = ENOMEM;
2302 return(NULL);
984263bc
MD
2303 }
2304 *--bp = '/';
2305 slash_prefixed = 1;
28623bf9
MD
2306
2307 /*
2308 * Go up a directory. This isn't a mount point so we don't
2309 * have to check again.
2310 */
2311 nch.ncp = nch.ncp->nc_parent;
524c845c 2312 }
28623bf9 2313 if (nch.ncp == NULL) {
524c845c
MD
2314 numcwdfail2++;
2315 *error = ENOENT;
2316 return(NULL);
984263bc
MD
2317 }
2318 if (!slash_prefixed) {
2319 if (bp == buf) {
2320 numcwdfail4++;
02680f1b
MD
2321 *error = ENOMEM;
2322 return(NULL);
984263bc
MD
2323 }
2324 *--bp = '/';
2325 }
2326 numcwdfound++;
02680f1b
MD
2327 *error = 0;
2328 return (bp);
984263bc
MD
2329}
2330
2331/*
2332 * Thus begins the fullpath magic.
2333 */
2334
2335#undef STATNODE
2336#define STATNODE(name) \
2337 static u_int name; \
2338 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
2339
2340static int disablefullpath;
2341SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
2342 &disablefullpath, 0, "");
2343
2344STATNODE(numfullpathcalls);
2345STATNODE(numfullpathfail1);
2346STATNODE(numfullpathfail2);
2347STATNODE(numfullpathfail3);
2348STATNODE(numfullpathfail4);
2349STATNODE(numfullpathfound);
2350
2351int
28623bf9 2352cache_fullpath(struct proc *p, struct nchandle *nchp, char **retbuf, char **freebuf)
8987aad7 2353{
984263bc
MD
2354 char *bp, *buf;
2355 int i, slash_prefixed;
28623bf9
MD
2356 struct nchandle fd_nrdir;
2357 struct nchandle nch;
984263bc 2358
b6372d22 2359 numfullpathcalls--;
b310dfc4 2360
28623bf9
MD
2361 *retbuf = NULL;
2362 *freebuf = NULL;
2363
efda3bd0 2364 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK);
984263bc
MD
2365 bp = buf + MAXPATHLEN - 1;
2366 *bp = '\0';
75ffff0d
JS
2367 if (p != NULL)
2368 fd_nrdir = p->p_fd->fd_nrdir;
2369 else
28623bf9 2370 fd_nrdir = rootnch;
984263bc 2371 slash_prefixed = 0;
28623bf9
MD
2372 nch = *nchp;
2373
2374 while (nch.ncp &&
2375 (nch.ncp != fd_nrdir.ncp || nch.mount != fd_nrdir.mount)
2376 ) {
2377 /*
2378 * While traversing upwards if we encounter the root
2379 * of the current mount we have to skip to the mount point.
2380 */
2381 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) {
2382 nch = nch.mount->mnt_ncmounton;
984263bc
MD
2383 continue;
2384 }
28623bf9
MD
2385
2386 /*
2387 * Prepend the path segment
2388 */
2389 for (i = nch.ncp->nc_nlen - 1; i >= 0; i--) {
984263bc
MD
2390 if (bp == buf) {
2391 numfullpathfail4++;
efda3bd0 2392 kfree(buf, M_TEMP);
b6372d22 2393 return(ENOMEM);
984263bc 2394 }
28623bf9 2395 *--bp = nch.ncp->nc_name[i];
984263bc
MD
2396 }
2397 if (bp == buf) {
2398 numfullpathfail4++;
efda3bd0 2399 kfree(buf, M_TEMP);
b6372d22 2400 return(ENOMEM);
984263bc
MD
2401 }
2402 *--bp = '/';
2403 slash_prefixed = 1;
28623bf9
MD
2404
2405 /*
2406 * Go up a directory. This isn't a mount point so we don't
2407 * have to check again.
2408 */
2409 nch.ncp = nch.ncp->nc_parent;
524c845c 2410 }
28623bf9 2411 if (nch.ncp == NULL) {
524c845c 2412 numfullpathfail2++;
efda3bd0 2413 kfree(buf, M_TEMP);
b6372d22 2414 return(ENOENT);
984263bc 2415 }
28623bf9 2416
984263bc
MD
2417 if (!slash_prefixed) {
2418 if (bp == buf) {
2419 numfullpathfail4++;
efda3bd0 2420 kfree(buf, M_TEMP);
b6372d22 2421 return(ENOMEM);
984263bc
MD
2422 }
2423 *--bp = '/';
2424 }
2425 numfullpathfound++;
2426 *retbuf = bp;
b310dfc4 2427 *freebuf = buf;
6a506bad
JS
2428
2429 return(0);
984263bc 2430}
8987aad7 2431
b6372d22
JS
2432int
2433vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf)
2434{
b6372d22 2435 struct namecache *ncp;
28623bf9 2436 struct nchandle nch;
b6372d22
JS
2437
2438 numfullpathcalls++;
2439 if (disablefullpath)
2440 return (ENODEV);
2441
2442 if (p == NULL)
2443 return (EINVAL);
2444
2445 /* vn is NULL, client wants us to use p->p_textvp */
2446 if (vn == NULL) {
2447 if ((vn = p->p_textvp) == NULL)
2448 return (EINVAL);
2449 }
2450 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
2451 if (ncp->nc_nlen)
2452 break;
2453 }
2454 if (ncp == NULL)
2455 return (EINVAL);
2456
2457 numfullpathcalls--;
28623bf9
MD
2458 nch.ncp = ncp;;
2459 nch.mount = vn->v_mount;
2460 return(cache_fullpath(p, &nch, retbuf, freebuf));
b6372d22 2461}