Cleanup last commit. Remove local ncp, it shadows the parameter, add
[dragonfly.git] / sys / kern / vfs_cache.c
CommitLineData
984263bc 1/*
8c10bfcf
MD
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
984263bc
MD
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
36 *
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $
6a506bad 70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.48 2005/01/31 18:11:06 joerg Exp $
984263bc
MD
71 */
72
73#include <sys/param.h>
74#include <sys/systm.h>
75#include <sys/kernel.h>
76#include <sys/sysctl.h>
77#include <sys/mount.h>
78#include <sys/vnode.h>
984263bc
MD
79#include <sys/malloc.h>
80#include <sys/sysproto.h>
81#include <sys/proc.h>
dadab5e9 82#include <sys/namei.h>
690a3127 83#include <sys/nlookup.h>
984263bc
MD
84#include <sys/filedesc.h>
85#include <sys/fnv_hash.h>
24e51f36 86#include <sys/globaldata.h>
63f58b90 87#include <sys/kern_syscall.h>
fad57d0e 88#include <sys/dirent.h>
8c361dda 89#include <ddb/ddb.h>
984263bc
MD
90
91/*
7ea21ed1 92 * Random lookups in the cache are accomplished with a hash table using
8987aad7 93 * a hash key of (nc_src_vp, name).
984263bc 94 *
7ea21ed1 95 * Negative entries may exist and correspond to structures where nc_vp
8987aad7
MD
96 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry
97 * corresponds to a whited-out directory entry (verses simply not finding the
98 * entry at all).
984263bc 99 *
8987aad7
MD
100 * Upon reaching the last segment of a path, if the reference is for DELETE,
101 * or NOCACHE is set (rewrite), and the name is located in the cache, it
102 * will be dropped.
984263bc
MD
103 */
104
105/*
106 * Structures associated with name cacheing.
107 */
8987aad7 108#define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
f517a1bb 109#define MINNEG 1024
8987aad7 110
24e51f36
HP
111MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
112
984263bc 113static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */
7ea21ed1 114static struct namecache_list ncneglist; /* instead of vnode */
8987aad7 115
fad57d0e
MD
116/*
117 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
118 * to create the namecache infrastructure leading to a dangling vnode.
119 *
120 * 0 Only errors are reported
121 * 1 Successes are reported
122 * 2 Successes + the whole directory scan is reported
123 * 3 Force the directory scan code run as if the parent vnode did not
124 * have a namecache record, even if it does have one.
125 */
126static int ncvp_debug;
127SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, "");
128
984263bc
MD
129static u_long nchash; /* size of hash table */
130SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
8987aad7 131
984263bc
MD
132static u_long ncnegfactor = 16; /* ratio of negative entries */
133SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
8987aad7 134
fc21741a
MD
135static int nclockwarn; /* warn on locked entries in ticks */
136SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, "");
137
984263bc
MD
138static u_long numneg; /* number of cache entries allocated */
139SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
8987aad7 140
984263bc
MD
141static u_long numcache; /* number of cache entries allocated */
142SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
8987aad7 143
f517a1bb
MD
144static u_long numunres; /* number of unresolved entries */
145SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, "");
146
984263bc
MD
147SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
148SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
149
646a1cda 150static int cache_resolve_mp(struct namecache *ncp);
8e005a45 151static void cache_rehash(struct namecache *ncp);
646a1cda 152
984263bc
MD
153/*
154 * The new name cache statistics
155 */
156SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
157#define STATNODE(mode, name, var) \
158 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
159STATNODE(CTLFLAG_RD, numneg, &numneg);
160STATNODE(CTLFLAG_RD, numcache, &numcache);
161static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
162static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
163static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
164static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
165static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
166static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
167static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
168static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
169static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
170static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
171
24e51f36
HP
172struct nchstats nchstats[SMP_MAXCPU];
173/*
174 * Export VFS cache effectiveness statistics to user-land.
175 *
176 * The statistics are left for aggregation to user-land so
177 * neat things can be achieved, like observing per-CPU cache
178 * distribution.
179 */
180static int
3736bb9b 181sysctl_nchstats(SYSCTL_HANDLER_ARGS)
24e51f36
HP
182{
183 struct globaldata *gd;
184 int i, error;
185
186 error = 0;
187 for (i = 0; i < ncpus; ++i) {
188 gd = globaldata_find(i);
189 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
190 sizeof(struct nchstats))))
191 break;
192 }
984263bc 193
24e51f36
HP
194 return (error);
195}
196SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
3736bb9b 197 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
984263bc 198
24e51f36 199static void cache_zap(struct namecache *ncp);
984263bc
MD
200
201/*
7ea21ed1
MD
202 * cache_hold() and cache_drop() prevent the premature deletion of a
203 * namecache entry but do not prevent operations (such as zapping) on
204 * that namecache entry.
984263bc 205 */
7ea21ed1
MD
206static __inline
207struct namecache *
bc0c094e 208_cache_hold(struct namecache *ncp)
7ea21ed1
MD
209{
210 ++ncp->nc_refs;
211 return(ncp);
212}
213
8c361dda 214/*
67773eb3
MD
215 * When dropping an entry, if only one ref remains and the entry has not
216 * been resolved, zap it. Since the one reference is being dropped the
217 * entry had better not be locked.
8c361dda 218 */
7ea21ed1
MD
219static __inline
220void
bc0c094e 221_cache_drop(struct namecache *ncp)
7ea21ed1
MD
222{
223 KKASSERT(ncp->nc_refs > 0);
f517a1bb
MD
224 if (ncp->nc_refs == 1 &&
225 (ncp->nc_flag & NCF_UNRESOLVED) &&
226 TAILQ_EMPTY(&ncp->nc_list)
227 ) {
67773eb3
MD
228 KKASSERT(ncp->nc_exlocks == 0);
229 cache_lock(ncp);
7ea21ed1 230 cache_zap(ncp);
f517a1bb 231 } else {
7ea21ed1 232 --ncp->nc_refs;
f517a1bb 233 }
7ea21ed1 234}
8987aad7 235
690a3127
MD
236/*
237 * Link a new namecache entry to its parent. Be careful to avoid races
238 * if vhold() blocks in the future.
8c361dda
MD
239 *
240 * If we are creating a child under an oldapi parent we must mark the
241 * child as being an oldapi entry as well.
690a3127
MD
242 */
243static void
244cache_link_parent(struct namecache *ncp, struct namecache *par)
245{
246 KKASSERT(ncp->nc_parent == NULL);
247 ncp->nc_parent = par;
248 if (TAILQ_EMPTY(&par->nc_list)) {
249 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
21739618
MD
250 /*
251 * Any vp associated with an ncp which has children must
55361147 252 * be held to prevent it from being recycled.
21739618 253 */
690a3127
MD
254 if (par->nc_vp)
255 vhold(par->nc_vp);
256 } else {
257 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
258 }
259}
260
261/*
b8997912
MD
262 * Remove the parent association from a namecache structure. If this is
263 * the last child of the parent the cache_drop(par) will attempt to
264 * recursively zap the parent.
690a3127
MD
265 */
266static void
267cache_unlink_parent(struct namecache *ncp)
268{
269 struct namecache *par;
270
271 if ((par = ncp->nc_parent) != NULL) {
272 ncp->nc_parent = NULL;
273 par = cache_hold(par);
274 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
275 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
276 vdrop(par->nc_vp);
277 cache_drop(par);
278 }
279}
280
281/*
fad57d0e
MD
282 * Allocate a new namecache structure. Most of the code does not require
283 * zero-termination of the string but it makes vop_compat_ncreate() easier.
690a3127
MD
284 */
285static struct namecache *
524c845c 286cache_alloc(int nlen)
690a3127
MD
287{
288 struct namecache *ncp;
289
290 ncp = malloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
524c845c 291 if (nlen)
fad57d0e 292 ncp->nc_name = malloc(nlen + 1, M_VFSCACHE, M_WAITOK);
524c845c 293 ncp->nc_nlen = nlen;
690a3127
MD
294 ncp->nc_flag = NCF_UNRESOLVED;
295 ncp->nc_error = ENOTCONN; /* needs to be resolved */
8c361dda 296 ncp->nc_refs = 1;
690a3127 297 TAILQ_INIT(&ncp->nc_list);
8c361dda 298 cache_lock(ncp);
690a3127
MD
299 return(ncp);
300}
301
8c361dda
MD
302static void
303cache_free(struct namecache *ncp)
304{
305 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1);
306 if (ncp->nc_name)
307 free(ncp->nc_name, M_VFSCACHE);
308 free(ncp, M_VFSCACHE);
309}
690a3127
MD
310
311/*
312 * Ref and deref a namecache structure.
313 */
bc0c094e
MD
314struct namecache *
315cache_hold(struct namecache *ncp)
316{
317 return(_cache_hold(ncp));
318}
319
320void
321cache_drop(struct namecache *ncp)
322{
323 _cache_drop(ncp);
324}
325
14c92d03
MD
326/*
327 * Namespace locking. The caller must already hold a reference to the
21739618
MD
328 * namecache structure in order to lock/unlock it. This function prevents
329 * the namespace from being created or destroyed by accessors other then
330 * the lock holder.
14c92d03 331 *
55361147
MD
332 * Note that holding a locked namecache structure prevents other threads
333 * from making namespace changes (e.g. deleting or creating), prevents
334 * vnode association state changes by other threads, and prevents the
335 * namecache entry from being resolved or unresolved by other threads.
336 *
337 * The lock owner has full authority to associate/disassociate vnodes
338 * and resolve/unresolve the locked ncp.
339 *
340 * In particular, if a vnode is associated with a locked cache entry
341 * that vnode will *NOT* be recycled. We accomplish this by vhold()ing the
342 * vnode. XXX we should find a more efficient way to prevent the vnode
343 * from being recycled, but remember that any given vnode may have multiple
344 * namecache associations (think hardlinks).
14c92d03
MD
345 */
346void
347cache_lock(struct namecache *ncp)
348{
55361147
MD
349 thread_t td;
350 int didwarn;
14c92d03
MD
351
352 KKASSERT(ncp->nc_refs != 0);
55361147
MD
353 didwarn = 0;
354 td = curthread;
355
14c92d03
MD
356 for (;;) {
357 if (ncp->nc_exlocks == 0) {
358 ncp->nc_exlocks = 1;
359 ncp->nc_locktd = td;
55361147
MD
360 /*
361 * The vp associated with a locked ncp must be held
362 * to prevent it from being recycled (which would
363 * cause the ncp to become unresolved).
364 *
365 * XXX loop on race for later MPSAFE work.
366 */
367 if (ncp->nc_vp)
368 vhold(ncp->nc_vp);
14c92d03
MD
369 break;
370 }
371 if (ncp->nc_locktd == td) {
372 ++ncp->nc_exlocks;
373 break;
374 }
375 ncp->nc_flag |= NCF_LOCKREQ;
fc21741a 376 if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) {
5fd012e0
MD
377 if (didwarn)
378 continue;
379 didwarn = 1;
380 printf("[diagnostic] cache_lock: blocked on %p", ncp);
381 if ((ncp->nc_flag & NCF_MOUNTPT) && ncp->nc_mount)
382 printf(" [MOUNTPT %s]\n", ncp->nc_mount->mnt_stat.f_mntonname);
383 else
384 printf(" \"%*.*s\"\n",
385 ncp->nc_nlen, ncp->nc_nlen,
386 ncp->nc_name);
14c92d03
MD
387 }
388 }
55361147 389
14c92d03 390 if (didwarn == 1) {
21739618 391 printf("[diagnostic] cache_lock: unblocked %*.*s\n",
14c92d03
MD
392 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
393 }
394}
395
fad57d0e
MD
396int
397cache_lock_nonblock(struct namecache *ncp)
398{
399 thread_t td;
400
401 KKASSERT(ncp->nc_refs != 0);
402 td = curthread;
403 if (ncp->nc_exlocks == 0) {
404 ncp->nc_exlocks = 1;
405 ncp->nc_locktd = td;
406 /*
407 * The vp associated with a locked ncp must be held
408 * to prevent it from being recycled (which would
409 * cause the ncp to become unresolved).
410 *
411 * XXX loop on race for later MPSAFE work.
412 */
413 if (ncp->nc_vp)
414 vhold(ncp->nc_vp);
415 return(0);
416 } else {
417 return(EWOULDBLOCK);
418 }
419}
420
14c92d03
MD
421void
422cache_unlock(struct namecache *ncp)
423{
424 thread_t td = curthread;
425
426 KKASSERT(ncp->nc_refs > 0);
427 KKASSERT(ncp->nc_exlocks > 0);
428 KKASSERT(ncp->nc_locktd == td);
429 if (--ncp->nc_exlocks == 0) {
55361147
MD
430 if (ncp->nc_vp)
431 vdrop(ncp->nc_vp);
14c92d03
MD
432 ncp->nc_locktd = NULL;
433 if (ncp->nc_flag & NCF_LOCKREQ) {
434 ncp->nc_flag &= ~NCF_LOCKREQ;
fc21741a 435 wakeup(ncp);
14c92d03
MD
436 }
437 }
438}
439
440/*
690a3127 441 * ref-and-lock, unlock-and-deref functions.
14c92d03 442 */
21739618 443struct namecache *
690a3127
MD
444cache_get(struct namecache *ncp)
445{
446 _cache_hold(ncp);
447 cache_lock(ncp);
21739618 448 return(ncp);
690a3127
MD
449}
450
8e005a45
MD
451int
452cache_get_nonblock(struct namecache *ncp)
453{
454 /* XXX MP */
455 if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) {
456 _cache_hold(ncp);
457 cache_lock(ncp);
458 return(0);
459 }
460 return(EWOULDBLOCK);
461}
462
14c92d03
MD
463void
464cache_put(struct namecache *ncp)
465{
466 cache_unlock(ncp);
467 _cache_drop(ncp);
468}
469
690a3127
MD
470/*
471 * Resolve an unresolved ncp by associating a vnode with it. If the
472 * vnode is NULL, a negative cache entry is created.
473 *
474 * The ncp should be locked on entry and will remain locked on return.
475 */
476void
477cache_setvp(struct namecache *ncp, struct vnode *vp)
ce6da7e4 478{
690a3127 479 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
ce6da7e4
MD
480 ncp->nc_vp = vp;
481 if (vp != NULL) {
21739618
MD
482 /*
483 * Any vp associated with an ncp which has children must
55361147 484 * be held. Any vp associated with a locked ncp must be held.
21739618
MD
485 */
486 if (!TAILQ_EMPTY(&ncp->nc_list))
487 vhold(vp);
ce6da7e4 488 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
55361147
MD
489 if (ncp->nc_exlocks)
490 vhold(vp);
21739618
MD
491
492 /*
493 * Set auxillary flags
494 */
690a3127
MD
495 switch(vp->v_type) {
496 case VDIR:
21739618
MD
497 ncp->nc_flag |= NCF_ISDIR;
498 break;
690a3127 499 case VLNK:
21739618
MD
500 ncp->nc_flag |= NCF_ISSYMLINK;
501 /* XXX cache the contents of the symlink */
502 break;
690a3127 503 default:
21739618 504 break;
690a3127 505 }
ce6da7e4 506 ++numcache;
21739618 507 ncp->nc_error = 0;
ce6da7e4 508 } else {
1345c2b6 509 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
ce6da7e4 510 ++numneg;
21739618 511 ncp->nc_error = ENOENT;
ce6da7e4 512 }
690a3127 513 ncp->nc_flag &= ~NCF_UNRESOLVED;
ce6da7e4
MD
514}
515
fad57d0e
MD
516void
517cache_settimeout(struct namecache *ncp, int nticks)
518{
519 if ((ncp->nc_timeout = ticks + nticks) == 0)
520 ncp->nc_timeout = 1;
521}
522
690a3127
MD
523/*
524 * Disassociate the vnode or negative-cache association and mark a
525 * namecache entry as unresolved again. Note that the ncp is still
526 * left in the hash table and still linked to its parent.
527 *
67773eb3
MD
528 * The ncp should be locked and refd on entry and will remain locked and refd
529 * on return.
8c361dda
MD
530 *
531 * This routine is normally never called on a directory containing children.
532 * However, NFS often does just that in its rename() code as a cop-out to
533 * avoid complex namespace operations. This disconnects a directory vnode
534 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
535 * sync.
690a3127
MD
536 */
537void
538cache_setunresolved(struct namecache *ncp)
14c92d03 539{
690a3127 540 struct vnode *vp;
14c92d03 541
690a3127
MD
542 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
543 ncp->nc_flag |= NCF_UNRESOLVED;
544 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK);
fad57d0e 545 ncp->nc_timeout = 0;
690a3127
MD
546 ncp->nc_error = ENOTCONN;
547 ++numunres;
548 if ((vp = ncp->nc_vp) != NULL) {
549 --numcache;
fad57d0e 550 ncp->nc_vp = NULL;
690a3127 551 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
55361147
MD
552
553 /*
554 * Any vp associated with an ncp with children is
555 * held by that ncp. Any vp associated with a locked
556 * ncp is held by that ncp. These conditions must be
557 * undone when the vp is cleared out from the ncp.
558 */
690a3127
MD
559 if (!TAILQ_EMPTY(&ncp->nc_list))
560 vdrop(vp);
55361147
MD
561 if (ncp->nc_exlocks)
562 vdrop(vp);
690a3127
MD
563 } else {
564 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
565 --numneg;
566 }
8e005a45
MD
567 }
568}
8c361dda 569
8e005a45 570/*
e09206ba
MD
571 * Invalidate portions of the namecache topology given a starting entry.
572 * The passed ncp is set to an unresolved state and:
8e005a45 573 *
e09206ba
MD
574 * The passed ncp must be locked.
575 *
576 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
577 * that the physical underlying nodes have been
578 * destroyed... as in deleted. For example, when
579 * a directory is removed. This will cause record
580 * lookups on the name to no longer be able to find
581 * the record and tells the resolver to return failure
582 * rather then trying to resolve through the parent.
583 *
584 * The topology itself, including ncp->nc_name,
585 * remains intact.
586 *
587 * This only applies to the passed ncp, if CINV_CHILDREN
588 * is specified the children are not flagged.
589 *
590 * CINV_CHILDREN - Set all children (recursively) to an unresolved
591 * state as well.
592 *
593 * Note that this will also have the side effect of
594 * cleaning out any unreferenced nodes in the topology
595 * from the leaves up as the recursion backs out.
596 *
597 * Note that the topology for any referenced nodes remains intact.
8e005a45
MD
598 */
599void
600cache_inval(struct namecache *ncp, int flags)
601{
602 struct namecache *kid;
b8997912 603 struct namecache *nextkid;
8e005a45 604
e09206ba
MD
605 KKASSERT(ncp->nc_exlocks);
606again:
607 cache_setunresolved(ncp);
608 if (flags & CINV_DESTROY)
609 ncp->nc_flag |= NCF_DESTROYED;
b8997912 610
e09206ba
MD
611 if ((flags & CINV_CHILDREN) &&
612 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL
613 ) {
614 cache_hold(kid);
615 cache_unlock(ncp);
b8997912
MD
616 while (kid) {
617 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
618 cache_hold(nextkid);
e09206ba
MD
619 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
620 TAILQ_FIRST(&kid->nc_list)
b8997912 621 ) {
e09206ba
MD
622 cache_lock(kid);
623 cache_inval(kid, flags & ~CINV_DESTROY);
624 cache_unlock(kid);
b8997912 625 }
b8997912 626 cache_drop(kid);
fad57d0e 627 kid = nextkid;
8e005a45 628 }
e09206ba
MD
629 cache_lock(ncp);
630
631 /*
632 * Someone could have gotten in there while ncp was unlocked,
633 * retry if so.
634 */
635 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
636 goto again;
8e005a45
MD
637 }
638}
639
e09206ba
MD
640/*
641 * Invalidate a vnode's namecache associations.
642 */
8e005a45
MD
643void
644cache_inval_vp(struct vnode *vp, int flags)
645{
646 struct namecache *ncp;
647
e09206ba
MD
648 while ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) {
649 cache_get(ncp);
650 cache_inval(ncp, flags);
651 cache_put(ncp);
690a3127 652 }
14c92d03 653}
14c92d03 654
fad57d0e
MD
655/*
656 * The source ncp has been renamed to the target ncp. Both fncp and tncp
657 * must be locked. Both will be set to unresolved, any children of tncp
658 * will be disconnected (the prior contents of the target is assumed to be
659 * destroyed by the rename operation, e.g. renaming over an empty directory),
660 * and all children of fncp will be moved to tncp.
661 *
e09206ba
MD
662 * XXX the disconnection could pose a problem, check code paths to make
663 * sure any code that blocks can handle the parent being changed out from
664 * under it. Maybe we should lock the children (watch out for deadlocks) ?
665 *
fad57d0e
MD
666 * After we return the caller has the option of calling cache_setvp() if
667 * the vnode of the new target ncp is known.
668 *
669 * Any process CD'd into any of the children will no longer be able to ".."
670 * back out. An rm -rf can cause this situation to occur.
671 */
672void
673cache_rename(struct namecache *fncp, struct namecache *tncp)
674{
675 struct namecache *scan;
676
677 cache_setunresolved(fncp);
678 cache_setunresolved(tncp);
679 cache_inval(tncp, CINV_CHILDREN);
680 while ((scan = TAILQ_FIRST(&fncp->nc_list)) != NULL) {
681 cache_hold(scan);
682 cache_unlink_parent(scan);
683 cache_link_parent(scan, tncp);
684 if (scan->nc_flag & NCF_HASHED)
685 cache_rehash(scan);
686 cache_drop(scan);
687 }
688}
689
21739618
MD
690/*
691 * vget the vnode associated with the namecache entry. Resolve the namecache
692 * entry if necessary and deal with namecache/vp races. The passed ncp must
693 * be referenced and may be locked. The ncp's ref/locking state is not
694 * effected by this call.
695 *
696 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
697 * (depending on the passed lk_type) will be returned in *vpp with an error
698 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
699 * most typical error is ENOENT, meaning that the ncp represents a negative
700 * cache hit and there is no vnode to retrieve, but other errors can occur
701 * too.
702 *
703 * The main race we have to deal with are namecache zaps. The ncp itself
704 * will not disappear since it is referenced, and it turns out that the
705 * validity of the vp pointer can be checked simply by rechecking the
706 * contents of ncp->nc_vp.
707 */
708int
709cache_vget(struct namecache *ncp, struct ucred *cred,
710 int lk_type, struct vnode **vpp)
711{
712 struct vnode *vp;
713 int error;
714
715again:
716 vp = NULL;
717 if (ncp->nc_flag & NCF_UNRESOLVED) {
718 cache_lock(ncp);
719 error = cache_resolve(ncp, cred);
720 cache_unlock(ncp);
721 } else {
722 error = 0;
723 }
724 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
5fd012e0 725 error = vget(vp, lk_type, curthread);
21739618
MD
726 if (error) {
727 if (vp != ncp->nc_vp) /* handle cache_zap race */
728 goto again;
729 vp = NULL;
730 } else if (vp != ncp->nc_vp) { /* handle cache_zap race */
731 vput(vp);
732 goto again;
733 }
734 }
735 if (error == 0 && vp == NULL)
736 error = ENOENT;
737 *vpp = vp;
738 return(error);
739}
740
741int
742cache_vref(struct namecache *ncp, struct ucred *cred, struct vnode **vpp)
743{
744 struct vnode *vp;
745 int error;
746
747again:
748 vp = NULL;
749 if (ncp->nc_flag & NCF_UNRESOLVED) {
750 cache_lock(ncp);
751 error = cache_resolve(ncp, cred);
752 cache_unlock(ncp);
753 } else {
754 error = 0;
755 }
756 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
757 vref(vp);
758 if (vp != ncp->nc_vp) { /* handle cache_zap race */
759 vrele(vp);
760 goto again;
761 }
762 }
763 if (error == 0 && vp == NULL)
764 error = ENOENT;
765 *vpp = vp;
766 return(error);
767}
768
fad57d0e
MD
769/*
770 * Convert a directory vnode to a namecache record without any other
771 * knowledge of the topology. This ONLY works with directory vnodes and
772 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
773 * returned ncp (if not NULL) will be held and unlocked.
774 *
775 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
776 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
777 * for dvp. This will fail only if the directory has been deleted out from
778 * under the caller.
779 *
780 * Callers must always check for a NULL return no matter the value of 'makeit'.
781 */
782
783static int cache_inefficient_scan(struct namecache *ncp, struct ucred *cred,
784 struct vnode *dvp);
785
786struct namecache *
787cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit)
788{
789 struct namecache *ncp;
790 struct vnode *pvp;
791 int error;
792
793 /*
794 * Temporary debugging code to force the directory scanning code
795 * to be exercised.
796 */
797 ncp = NULL;
798 if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) {
799 ncp = TAILQ_FIRST(&dvp->v_namecache);
800 printf("cache_fromdvp: forcing %s\n", ncp->nc_name);
801 goto force;
802 }
803
804 /*
805 * Loop until resolution, inside code will break out on error.
806 */
807 while ((ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) {
808force:
809 /*
810 * If dvp is the root of its filesystem it should already
811 * have a namecache pointer associated with it as a side
812 * effect of the mount, but it may have been disassociated.
813 */
814 if (dvp->v_flag & VROOT) {
815 ncp = cache_get(dvp->v_mount->mnt_ncp);
816 error = cache_resolve_mp(ncp);
817 cache_put(ncp);
818 if (ncvp_debug) {
819 printf("cache_fromdvp: resolve root of mount %p error %d",
820 dvp->v_mount, error);
821 }
822 if (error) {
823 if (ncvp_debug)
824 printf(" failed\n");
825 ncp = NULL;
826 break;
827 }
828 if (ncvp_debug)
829 printf(" succeeded\n");
830 continue;
831 }
832
833 /*
834 * Get the parent directory and resolve its ncp.
835 */
6ddb7618 836 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred);
fad57d0e
MD
837 if (error) {
838 printf("lookupdotdot failed %d %p\n", error, pvp);
839 break;
840 }
841 VOP_UNLOCK(pvp, 0, curthread);
842
843 /*
844 * XXX this recursion could run the kernel out of stack,
845 * change to a less efficient algorithm if we get too deep
846 * (use 'makeit' for a depth counter?)
847 */
848 ncp = cache_fromdvp(pvp, cred, makeit);
849 vrele(pvp);
850 if (ncp == NULL)
851 break;
852
853 /*
854 * Do an inefficient scan of pvp (embodied by ncp) to look
855 * for dvp. This will create a namecache record for dvp on
856 * success. We loop up to recheck on success.
857 *
858 * ncp and dvp are both held but not locked.
859 */
860 error = cache_inefficient_scan(ncp, cred, dvp);
861 cache_drop(ncp);
862 if (error) {
863 printf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
864 pvp, ncp->nc_name, dvp);
865 ncp = NULL;
866 break;
867 }
868 if (ncvp_debug) {
869 printf("cache_fromdvp: scan %p (%s) succeeded\n",
870 pvp, ncp->nc_name);
871 }
872 }
873 if (ncp)
874 cache_hold(ncp);
875 return (ncp);
876}
877
878/*
879 * Do an inefficient scan of the directory represented by ncp looking for
880 * the directory vnode dvp. ncp must be held but not locked on entry and
881 * will be held on return. dvp must be refd but not locked on entry and
882 * will remain refd on return.
883 *
884 * Why do this at all? Well, due to its stateless nature the NFS server
885 * converts file handles directly to vnodes without necessarily going through
886 * the namecache ops that would otherwise create the namecache topology
887 * leading to the vnode. We could either (1) Change the namecache algorithms
888 * to allow disconnect namecache records that are re-merged opportunistically,
889 * or (2) Make the NFS server backtrack and scan to recover a connected
890 * namecache topology in order to then be able to issue new API lookups.
891 *
892 * It turns out that (1) is a huge mess. It takes a nice clean set of
893 * namecache algorithms and introduces a lot of complication in every subsystem
894 * that calls into the namecache to deal with the re-merge case, especially
895 * since we are using the namecache to placehold negative lookups and the
896 * vnode might not be immediately assigned. (2) is certainly far less
897 * efficient then (1), but since we are only talking about directories here
898 * (which are likely to remain cached), the case does not actually run all
899 * that often and has the supreme advantage of not polluting the namecache
900 * algorithms.
901 */
902static int
903cache_inefficient_scan(struct namecache *ncp, struct ucred *cred,
904 struct vnode *dvp)
905{
906 struct nlcomponent nlc;
907 struct namecache *rncp;
908 struct dirent *den;
909 struct vnode *pvp;
910 struct vattr vat;
911 struct iovec iov;
912 struct uio uio;
913 u_long *cookies;
914 off_t baseoff;
915 int ncookies;
916 int blksize;
917 int eofflag;
918 char *rbuf;
919 int error;
920 int xoff;
921 int i;
922
923 vat.va_blocksize = 0;
924 if ((error = VOP_GETATTR(dvp, &vat, curthread)) != 0)
925 return (error);
926 if ((error = cache_vget(ncp, cred, LK_SHARED, &pvp)) != 0)
927 return (error);
928 if (ncvp_debug)
929 printf("inefficient_scan: directory iosize %ld vattr fileid = %ld\n", vat.va_blocksize, (long)vat.va_fileid);
930 if ((blksize = vat.va_blocksize) == 0)
931 blksize = DEV_BSIZE;
932 rbuf = malloc(blksize, M_TEMP, M_WAITOK);
933 rncp = NULL;
934
935 eofflag = 0;
936 uio.uio_offset = 0;
937 cookies = NULL;
938again:
939 baseoff = uio.uio_offset;
940 iov.iov_base = rbuf;
941 iov.iov_len = blksize;
942 uio.uio_iov = &iov;
943 uio.uio_iovcnt = 1;
944 uio.uio_resid = blksize;
945 uio.uio_segflg = UIO_SYSSPACE;
946 uio.uio_rw = UIO_READ;
947 uio.uio_td = curthread;
948
949 if (cookies) {
950 free(cookies, M_TEMP);
951 cookies = NULL;
952 }
953 if (ncvp_debug >= 2)
954 printf("cache_inefficient_scan: readdir @ %08x\n", (int)baseoff);
955 error = VOP_READDIR(pvp, &uio, cred, &eofflag, &ncookies, &cookies);
956 if (error == 0 && cookies == NULL)
957 error = EPERM;
958 if (error == 0) {
959 for (i = 0; i < ncookies; ++i) {
960 xoff = (int)(cookies[i] - (u_long)baseoff);
961 /*
962 * UFS plays a little trick to skip the first entry
963 * in a directory ("."), by assigning the cookie to
964 * dpoff + dp->d_reclen in the loop. This causes
965 * the last cookie to be assigned to the data-end of
966 * the directory. XXX
967 */
968 if (xoff == blksize)
969 break;
970 KKASSERT(xoff >= 0 && xoff <= blksize);
971 den = (struct dirent *)(rbuf + xoff);
972 if (ncvp_debug >= 2)
973 printf("cache_inefficient_scan: %*.*s\n",
974 den->d_namlen, den->d_namlen, den->d_name);
975 if (den->d_type != DT_WHT &&
976 den->d_fileno == vat.va_fileid) {
977 if (ncvp_debug)
978 printf("cache_inefficient_scan: MATCHED inode %ld path %s/%*.*s\n", vat.va_fileid, ncp->nc_name, den->d_namlen, den->d_namlen, den->d_name);
979 nlc.nlc_nameptr = den->d_name;
980 nlc.nlc_namelen = den->d_namlen;
981 VOP_UNLOCK(pvp, 0, curthread);
982 rncp = cache_nlookup(ncp, &nlc);
983 KKASSERT(rncp != NULL);
984 break;
985 }
986 }
987 if (rncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
988 goto again;
989 }
990 if (cookies) {
991 free(cookies, M_TEMP);
992 cookies = NULL;
993 }
994 if (rncp) {
995 vrele(pvp);
996 if (rncp->nc_flag & NCF_UNRESOLVED) {
997 cache_setvp(rncp, dvp);
998 if (ncvp_debug >= 2) {
999 printf("cache_inefficient_scan: setvp %s/%s = %p\n",
1000 ncp->nc_name, rncp->nc_name, dvp);
1001 }
1002 } else {
1003 if (ncvp_debug >= 2) {
1004 printf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
1005 ncp->nc_name, rncp->nc_name, dvp,
1006 rncp->nc_vp);
1007 }
1008 }
1009 if (rncp->nc_vp == NULL)
1010 error = rncp->nc_error;
1011 cache_put(rncp);
1012 } else {
1013 printf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
1014 dvp, ncp->nc_name);
1015 vput(pvp);
1016 error = ENOENT;
1017 }
1018 free(rbuf, M_TEMP);
1019 return (error);
1020}
1021
984263bc 1022/*
67773eb3
MD
1023 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
1024 * state, which disassociates it from its vnode or ncneglist.
7ea21ed1 1025 *
67773eb3
MD
1026 * Then, if there are no additional references to the ncp and no children,
1027 * the ncp is removed from the topology and destroyed. This function will
1028 * also run through the nc_parent chain and destroy parent ncps if possible.
1029 * As a side benefit, it turns out the only conditions that allow running
1030 * up the chain are also the conditions to ensure no deadlock will occur.
7ea21ed1 1031 *
67773eb3
MD
1032 * References and/or children may exist if the ncp is in the middle of the
1033 * topology, preventing the ncp from being destroyed.
7ea21ed1 1034 *
67773eb3
MD
1035 * This function must be called with the ncp held and locked and will unlock
1036 * and drop it during zapping.
984263bc
MD
1037 */
1038static void
8987aad7 1039cache_zap(struct namecache *ncp)
984263bc 1040{
7ea21ed1 1041 struct namecache *par;
7ea21ed1
MD
1042
1043 /*
1044 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
1045 */
690a3127 1046 cache_setunresolved(ncp);
7ea21ed1
MD
1047
1048 /*
1049 * Try to scrap the entry and possibly tail-recurse on its parent.
1050 * We only scrap unref'd (other then our ref) unresolved entries,
1051 * we do not scrap 'live' entries.
1052 */
1053 while (ncp->nc_flag & NCF_UNRESOLVED) {
1054 /*
1055 * Someone other then us has a ref, stop.
1056 */
1057 if (ncp->nc_refs > 1)
1058 goto done;
1059
1060 /*
1061 * We have children, stop.
1062 */
1063 if (!TAILQ_EMPTY(&ncp->nc_list))
1064 goto done;
1065
67773eb3
MD
1066 /*
1067 * Remove ncp from the topology: hash table and parent linkage.
1068 */
7ea21ed1
MD
1069 if (ncp->nc_flag & NCF_HASHED) {
1070 ncp->nc_flag &= ~NCF_HASHED;
1071 LIST_REMOVE(ncp, nc_hash);
1072 }
7ea21ed1
MD
1073 if ((par = ncp->nc_parent) != NULL) {
1074 par = cache_hold(par);
1075 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
67773eb3 1076 ncp->nc_parent = NULL;
7ea21ed1
MD
1077 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
1078 vdrop(par->nc_vp);
1079 }
67773eb3
MD
1080
1081 /*
1082 * ncp should not have picked up any refs. Physically
1083 * destroy the ncp.
1084 */
1085 KKASSERT(ncp->nc_refs == 1);
f517a1bb 1086 --numunres;
67773eb3 1087 /* cache_unlock(ncp) not required */
7ea21ed1 1088 ncp->nc_refs = -1; /* safety */
7ea21ed1
MD
1089 if (ncp->nc_name)
1090 free(ncp->nc_name, M_VFSCACHE);
1091 free(ncp, M_VFSCACHE);
67773eb3
MD
1092
1093 /*
1094 * Loop on the parent (it may be NULL). Only bother looping
1095 * if the parent has a single ref (ours), which also means
1096 * we can lock it trivially.
1097 */
1098 ncp = par;
1099 if (ncp == NULL)
1100 return;
1101 if (ncp->nc_refs != 1) {
1102 cache_drop(ncp);
8c361dda 1103 return;
67773eb3
MD
1104 }
1105 KKASSERT(par->nc_exlocks == 0);
1106 cache_lock(ncp);
7ea21ed1
MD
1107 }
1108done:
67773eb3 1109 cache_unlock(ncp);
7ea21ed1 1110 --ncp->nc_refs;
984263bc
MD
1111}
1112
62d0f1f0
MD
1113static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW;
1114
1115static __inline
1116void
1117cache_hysteresis(void)
1118{
1119 /*
1120 * Don't cache too many negative hits. We use hysteresis to reduce
1121 * the impact on the critical path.
1122 */
1123 switch(cache_hysteresis_state) {
1124 case CHI_LOW:
1125 if (numneg > MINNEG && numneg * ncnegfactor > numcache) {
1126 cache_cleanneg(10);
1127 cache_hysteresis_state = CHI_HIGH;
1128 }
1129 break;
1130 case CHI_HIGH:
1131 if (numneg > MINNEG * 9 / 10 &&
1132 numneg * ncnegfactor * 9 / 10 > numcache
1133 ) {
1134 cache_cleanneg(10);
1135 } else {
1136 cache_hysteresis_state = CHI_LOW;
1137 }
1138 break;
1139 }
1140}
1141
14c92d03
MD
1142/*
1143 * NEW NAMECACHE LOOKUP API
1144 *
1145 * Lookup an entry in the cache. A locked, referenced, non-NULL
1146 * entry is *always* returned, even if the supplied component is illegal.
fad57d0e 1147 * The resulting namecache entry should be returned to the system with
14c92d03
MD
1148 * cache_put() or cache_unlock() + cache_drop().
1149 *
1150 * namecache locks are recursive but care must be taken to avoid lock order
1151 * reversals.
1152 *
1153 * Nobody else will be able to manipulate the associated namespace (e.g.
1154 * create, delete, rename, rename-target) until the caller unlocks the
1155 * entry.
1156 *
1157 * The returned entry will be in one of three states: positive hit (non-null
1158 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
1159 * Unresolved entries must be resolved through the filesystem to associate the
1160 * vnode and/or determine whether a positive or negative hit has occured.
1161 *
1162 * It is not necessary to lock a directory in order to lock namespace under
1163 * that directory. In fact, it is explicitly not allowed to do that. A
1164 * directory is typically only locked when being created, renamed, or
1165 * destroyed.
1166 *
1167 * The directory (par) may be unresolved, in which case any returned child
1168 * will likely also be marked unresolved. Likely but not guarenteed. Since
fad57d0e
MD
1169 * the filesystem lookup requires a resolved directory vnode the caller is
1170 * responsible for resolving the namecache chain top-down. This API
14c92d03
MD
1171 * specifically allows whole chains to be created in an unresolved state.
1172 */
1173struct namecache *
690a3127 1174cache_nlookup(struct namecache *par, struct nlcomponent *nlc)
14c92d03 1175{
690a3127
MD
1176 struct namecache *ncp;
1177 struct namecache *new_ncp;
1178 struct nchashhead *nchpp;
1179 u_int32_t hash;
1180 globaldata_t gd;
1181
1182 numcalls++;
1183 gd = mycpu;
1184
690a3127
MD
1185 /*
1186 * Try to locate an existing entry
1187 */
1188 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
1189 hash = fnv_32_buf(&par, sizeof(par), hash);
1190 new_ncp = NULL;
1191restart:
1192 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1193 numchecks++;
1194
1195 /*
1196 * Zap entries that have timed out.
1197 */
1198 if (ncp->nc_timeout &&
67773eb3
MD
1199 (int)(ncp->nc_timeout - ticks) < 0 &&
1200 (ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
1201 ncp->nc_exlocks == 0
690a3127 1202 ) {
67773eb3 1203 cache_zap(cache_get(ncp));
690a3127
MD
1204 goto restart;
1205 }
1206
1207 /*
1208 * Break out if we find a matching entry. Note that
e09206ba
MD
1209 * UNRESOLVED entries may match, but DESTROYED entries
1210 * do not.
690a3127
MD
1211 */
1212 if (ncp->nc_parent == par &&
1213 ncp->nc_nlen == nlc->nlc_namelen &&
e09206ba
MD
1214 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
1215 (ncp->nc_flag & NCF_DESTROYED) == 0
690a3127 1216 ) {
67773eb3
MD
1217 if (cache_get_nonblock(ncp) == 0) {
1218 if (new_ncp)
1219 cache_free(new_ncp);
1220 goto found;
1221 }
8c361dda 1222 cache_get(ncp);
67773eb3
MD
1223 cache_put(ncp);
1224 goto restart;
690a3127
MD
1225 }
1226 }
1227
1228 /*
1229 * We failed to locate an entry, create a new entry and add it to
1230 * the cache. We have to relookup after possibly blocking in
1231 * malloc.
1232 */
1233 if (new_ncp == NULL) {
524c845c 1234 new_ncp = cache_alloc(nlc->nlc_namelen);
690a3127
MD
1235 goto restart;
1236 }
1237
1238 ncp = new_ncp;
1239
1240 /*
1241 * Initialize as a new UNRESOLVED entry, lock (non-blocking),
fad57d0e
MD
1242 * and link to the parent. The mount point is usually inherited
1243 * from the parent unless this is a special case such as a mount
1244 * point where nlc_namelen is 0. The caller is responsible for
1245 * setting nc_mount in that case. If nlc_namelen is 0 nc_name will
1246 * be NULL.
690a3127 1247 */
4fcb1cf7
MD
1248 if (nlc->nlc_namelen) {
1249 bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen);
fad57d0e 1250 ncp->nc_name[nlc->nlc_namelen] = 0;
4fcb1cf7
MD
1251 ncp->nc_mount = par->nc_mount;
1252 }
690a3127
MD
1253 nchpp = NCHHASH(hash);
1254 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
1255 ncp->nc_flag |= NCF_HASHED;
690a3127 1256 cache_link_parent(ncp, par);
690a3127 1257found:
fad57d0e
MD
1258 /*
1259 * stats and namecache size management
1260 */
1261 if (ncp->nc_flag & NCF_UNRESOLVED)
1262 ++gd->gd_nchstats->ncs_miss;
1263 else if (ncp->nc_vp)
1264 ++gd->gd_nchstats->ncs_goodhits;
1265 else
1266 ++gd->gd_nchstats->ncs_neghits;
62d0f1f0 1267 cache_hysteresis();
690a3127
MD
1268 return(ncp);
1269}
1270
1271/*
21739618 1272 * Resolve an unresolved namecache entry, generally by looking it up.
67773eb3 1273 * The passed ncp must be locked and refd.
21739618
MD
1274 *
1275 * Theoretically since a vnode cannot be recycled while held, and since
1276 * the nc_parent chain holds its vnode as long as children exist, the
1277 * direct parent of the cache entry we are trying to resolve should
1278 * have a valid vnode. If not then generate an error that we can
1279 * determine is related to a resolver bug.
fad57d0e
MD
1280 *
1281 * Note that successful resolution does not necessarily return an error
1282 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
1283 * will be returned.
690a3127
MD
1284 */
1285int
21739618 1286cache_resolve(struct namecache *ncp, struct ucred *cred)
690a3127 1287{
21739618 1288 struct namecache *par;
67773eb3 1289 int error;
8e005a45 1290
67773eb3 1291restart:
8e005a45
MD
1292 /*
1293 * If the ncp is already resolved we have nothing to do.
1294 */
1295 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1296 return (ncp->nc_error);
21739618 1297
646a1cda
MD
1298 /*
1299 * Mount points need special handling because the parent does not
1300 * belong to the same filesystem as the ncp.
1301 */
8e005a45 1302 if (ncp->nc_flag & NCF_MOUNTPT)
646a1cda 1303 return (cache_resolve_mp(ncp));
646a1cda
MD
1304
1305 /*
1306 * We expect an unbroken chain of ncps to at least the mount point,
1307 * and even all the way to root (but this code doesn't have to go
1308 * past the mount point).
1309 */
1310 if (ncp->nc_parent == NULL) {
8e005a45 1311 printf("EXDEV case 1 %p %*.*s\n", ncp,
646a1cda 1312 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
21739618 1313 ncp->nc_error = EXDEV;
646a1cda
MD
1314 return(ncp->nc_error);
1315 }
1316
1317 /*
1318 * The vp's of the parent directories in the chain are held via vhold()
1319 * due to the existance of the child, and should not disappear.
1320 * However, there are cases where they can disappear:
1321 *
1322 * - due to filesystem I/O errors.
1323 * - due to NFS being stupid about tracking the namespace and
1324 * destroys the namespace for entire directories quite often.
1325 * - due to forced unmounts.
e09206ba 1326 * - due to an rmdir (parent will be marked DESTROYED)
646a1cda
MD
1327 *
1328 * When this occurs we have to track the chain backwards and resolve
1329 * it, looping until the resolver catches up to the current node. We
1330 * could recurse here but we might run ourselves out of kernel stack
1331 * so we do it in a more painful manner. This situation really should
1332 * not occur all that often, or if it does not have to go back too
1333 * many nodes to resolve the ncp.
1334 */
1335 while (ncp->nc_parent->nc_vp == NULL) {
e09206ba
MD
1336 /*
1337 * This case can occur if a process is CD'd into a
1338 * directory which is then rmdir'd. If the parent is marked
1339 * destroyed there is no point trying to resolve it.
1340 */
1341 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
1342 return(ENOENT);
1343
646a1cda
MD
1344 par = ncp->nc_parent;
1345 while (par->nc_parent && par->nc_parent->nc_vp == NULL)
1346 par = par->nc_parent;
1347 if (par->nc_parent == NULL) {
1348 printf("EXDEV case 2 %*.*s\n",
1349 par->nc_nlen, par->nc_nlen, par->nc_name);
1350 return (EXDEV);
1351 }
1352 printf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
1353 par->nc_nlen, par->nc_nlen, par->nc_name);
1354 /*
67773eb3
MD
1355 * The parent is not set in stone, ref and lock it to prevent
1356 * it from disappearing. Also note that due to renames it
1357 * is possible for our ncp to move and for par to no longer
1358 * be one of its parents. We resolve it anyway, the loop
1359 * will handle any moves.
646a1cda
MD
1360 */
1361 cache_get(par);
1362 if (par->nc_flag & NCF_MOUNTPT) {
1363 cache_resolve_mp(par);
8e005a45
MD
1364 } else if (par->nc_parent->nc_vp == NULL) {
1365 printf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
1366 cache_put(par);
1367 continue;
fad57d0e
MD
1368 } else if (par->nc_flag & NCF_UNRESOLVED) {
1369 par->nc_error = VOP_NRESOLVE(par, cred);
646a1cda 1370 }
67773eb3
MD
1371 if ((error = par->nc_error) != 0) {
1372 if (par->nc_error != EAGAIN) {
1373 printf("EXDEV case 3 %*.*s error %d\n",
1374 par->nc_nlen, par->nc_nlen, par->nc_name,
1375 par->nc_error);
1376 cache_put(par);
1377 return(error);
1378 }
1379 printf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
1380 par, par->nc_nlen, par->nc_nlen, par->nc_name);
646a1cda 1381 }
67773eb3
MD
1382 cache_put(par);
1383 /* loop */
646a1cda 1384 }
8e005a45
MD
1385
1386 /*
fad57d0e 1387 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
8e005a45
MD
1388 * ncp's and reattach them. If this occurs the original ncp is marked
1389 * EAGAIN to force a relookup.
fad57d0e
MD
1390 *
1391 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
1392 * ncp must already be resolved.
8e005a45
MD
1393 */
1394 KKASSERT((ncp->nc_flag & NCF_MOUNTPT) == 0);
fad57d0e 1395 ncp->nc_error = VOP_NRESOLVE(ncp, cred);
6ddb7618 1396 /*vop_nresolve(*ncp->nc_parent->nc_vp->v_ops, ncp, cred);*/
67773eb3
MD
1397 if (ncp->nc_error == EAGAIN) {
1398 printf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
1399 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
1400 goto restart;
1401 }
646a1cda
MD
1402 return(ncp->nc_error);
1403}
1404
1405/*
1406 * Resolve the ncp associated with a mount point. Such ncp's almost always
1407 * remain resolved and this routine is rarely called. NFS MPs tends to force
1408 * re-resolution more often due to its mac-truck-smash-the-namecache
1409 * method of tracking namespace changes.
1410 *
6215aa92
MD
1411 * The semantics for this call is that the passed ncp must be locked on
1412 * entry and will be locked on return. However, if we actually have to
1413 * resolve the mount point we temporarily unlock the entry in order to
1414 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
1415 * the unlock we have to recheck the flags after we relock.
646a1cda
MD
1416 */
1417static int
1418cache_resolve_mp(struct namecache *ncp)
1419{
1420 struct vnode *vp;
1421 struct mount *mp = ncp->nc_mount;
6215aa92 1422 int error;
646a1cda
MD
1423
1424 KKASSERT(mp != NULL);
1425 if (ncp->nc_flag & NCF_UNRESOLVED) {
6215aa92 1426 cache_unlock(ncp);
646a1cda
MD
1427 while (vfs_busy(mp, 0, NULL, curthread))
1428 ;
6215aa92
MD
1429 error = VFS_ROOT(mp, &vp);
1430 cache_lock(ncp);
1431
1432 /*
1433 * recheck the ncp state after relocking.
1434 */
1435 if (ncp->nc_flag & NCF_UNRESOLVED) {
1436 ncp->nc_error = error;
1437 if (error == 0) {
1438 cache_setvp(ncp, vp);
1439 vput(vp);
1440 } else {
1441 printf("[diagnostic] cache_resolve_mp: failed to resolve mount %p\n", mp);
1442 cache_setvp(ncp, NULL);
1443 }
1444 } else if (error == 0) {
646a1cda 1445 vput(vp);
646a1cda
MD
1446 }
1447 vfs_unbusy(mp, curthread);
21739618
MD
1448 }
1449 return(ncp->nc_error);
14c92d03
MD
1450}
1451
62d0f1f0
MD
1452void
1453cache_cleanneg(int count)
1454{
1455 struct namecache *ncp;
7ea21ed1
MD
1456
1457 /*
62d0f1f0
MD
1458 * Automode from the vnlru proc - clean out 10% of the negative cache
1459 * entries.
7ea21ed1 1460 */
62d0f1f0
MD
1461 if (count == 0)
1462 count = numneg / 10 + 1;
1463
1464 /*
1465 * Attempt to clean out the specified number of negative cache
1466 * entries.
1467 */
1468 while (count) {
7ea21ed1 1469 ncp = TAILQ_FIRST(&ncneglist);
eb82ae62
MD
1470 if (ncp == NULL) {
1471 KKASSERT(numneg == 0);
1472 break;
1473 }
62d0f1f0
MD
1474 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
1475 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
67773eb3
MD
1476 if (cache_get_nonblock(ncp) == 0)
1477 cache_zap(ncp);
62d0f1f0 1478 --count;
984263bc
MD
1479 }
1480}
1481
fad57d0e
MD
1482/*
1483 * Rehash a ncp. Rehashing is typically required if the name changes (should
1484 * not generally occur) or the parent link changes. This function will
1485 * unhash the ncp if the ncp is no longer hashable.
1486 */
8c361dda
MD
1487static void
1488cache_rehash(struct namecache *ncp)
1489{
1490 struct nchashhead *nchpp;
1491 u_int32_t hash;
1492
1493 if (ncp->nc_flag & NCF_HASHED) {
1494 ncp->nc_flag &= ~NCF_HASHED;
1495 LIST_REMOVE(ncp, nc_hash);
1496 }
fad57d0e
MD
1497 if (ncp->nc_nlen && ncp->nc_parent) {
1498 hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT);
1499 hash = fnv_32_buf(&ncp->nc_parent,
1500 sizeof(ncp->nc_parent), hash);
1501 nchpp = NCHHASH(hash);
1502 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
1503 ncp->nc_flag |= NCF_HASHED;
1504 }
8c361dda
MD
1505}
1506
984263bc 1507/*
24e51f36 1508 * Name cache initialization, from vfsinit() when we are booting
984263bc
MD
1509 */
1510void
8987aad7 1511nchinit(void)
984263bc 1512{
24e51f36
HP
1513 int i;
1514 globaldata_t gd;
1515
1516 /* initialise per-cpu namecache effectiveness statistics. */
1517 for (i = 0; i < ncpus; ++i) {
1518 gd = globaldata_find(i);
1519 gd->gd_nchstats = &nchstats[i];
1520 }
7ea21ed1 1521 TAILQ_INIT(&ncneglist);
984263bc 1522 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash);
fc21741a 1523 nclockwarn = 1 * hz;
21739618
MD
1524}
1525
1526/*
1527 * Called from start_init() to bootstrap the root filesystem. Returns
1528 * a referenced, unlocked namecache record.
1529 */
1530struct namecache *
5fd012e0 1531cache_allocroot(struct mount *mp, struct vnode *vp)
21739618 1532{
524c845c 1533 struct namecache *ncp = cache_alloc(0);
21739618 1534
21739618 1535 ncp->nc_flag |= NCF_MOUNTPT | NCF_ROOT;
5fd012e0 1536 ncp->nc_mount = mp;
8c361dda
MD
1537 cache_setvp(ncp, vp);
1538 return(ncp);
984263bc
MD
1539}
1540
1541/*
7ea21ed1 1542 * vfs_cache_setroot()
984263bc 1543 *
7ea21ed1
MD
1544 * Create an association between the root of our namecache and
1545 * the root vnode. This routine may be called several times during
1546 * booting.
690a3127
MD
1547 *
1548 * If the caller intends to save the returned namecache pointer somewhere
1549 * it must cache_hold() it.
7ea21ed1 1550 */
21739618
MD
1551void
1552vfs_cache_setroot(struct vnode *nvp, struct namecache *ncp)
7ea21ed1 1553{
21739618
MD
1554 struct vnode *ovp;
1555 struct namecache *oncp;
1556
1557 ovp = rootvnode;
1558 oncp = rootncp;
1559 rootvnode = nvp;
1560 rootncp = ncp;
1561
1562 if (ovp)
1563 vrele(ovp);
1564 if (oncp)
1565 cache_drop(oncp);
7ea21ed1
MD
1566}
1567
1568/*
fad57d0e
MD
1569 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
1570 * topology and is being removed as quickly as possible. The new VOP_N*()
1571 * API calls are required to make specific adjustments using the supplied
1572 * ncp pointers rather then just bogusly purging random vnodes.
1573 *
7ea21ed1
MD
1574 * Invalidate all namecache entries to a particular vnode as well as
1575 * any direct children of that vnode in the namecache. This is a
1576 * 'catch all' purge used by filesystems that do not know any better.
1577 *
1578 * A new vnode v_id is generated. Note that no vnode will ever have a
1579 * v_id of 0.
1580 *
1581 * Note that the linkage between the vnode and its namecache entries will
1582 * be removed, but the namecache entries themselves might stay put due to
1583 * active references from elsewhere in the system or due to the existance of
1584 * the children. The namecache topology is left intact even if we do not
1585 * know what the vnode association is. Such entries will be marked
1586 * NCF_UNRESOLVED.
984263bc
MD
1587 *
1588 * XXX: Only time and the size of v_id prevents this from failing:
1589 * XXX: In theory we should hunt down all (struct vnode*, v_id)
1590 * XXX: soft references and nuke them, at least on the global
1591 * XXX: v_id wraparound. The period of resistance can be extended
1592 * XXX: by incrementing each vnodes v_id individually instead of
1593 * XXX: using the global v_id.
1594 */
984263bc 1595void
8987aad7 1596cache_purge(struct vnode *vp)
984263bc
MD
1597{
1598 static u_long nextid;
1599
e09206ba 1600 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
984263bc 1601
7ea21ed1
MD
1602 /*
1603 * Calculate a new unique id for ".." handling
1604 */
8987aad7 1605 do {
984263bc 1606 nextid++;
7ea21ed1 1607 } while (nextid == vp->v_id || nextid == 0);
984263bc 1608 vp->v_id = nextid;
984263bc
MD
1609}
1610
1611/*
1612 * Flush all entries referencing a particular filesystem.
1613 *
1614 * Since we need to check it anyway, we will flush all the invalid
1615 * entries at the same time.
1616 */
1617void
8987aad7 1618cache_purgevfs(struct mount *mp)
984263bc 1619{
bc0c094e 1620 struct nchashhead *nchpp;
984263bc
MD
1621 struct namecache *ncp, *nnp;
1622
7ea21ed1
MD
1623 /*
1624 * Scan hash tables for applicable entries.
1625 */
bc0c094e
MD
1626 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) {
1627 ncp = LIST_FIRST(nchpp);
7ea21ed1
MD
1628 if (ncp)
1629 cache_hold(ncp);
1630 while (ncp) {
984263bc 1631 nnp = LIST_NEXT(ncp, nc_hash);
7ea21ed1
MD
1632 if (nnp)
1633 cache_hold(nnp);
4fcb1cf7 1634 if (ncp->nc_mount == mp) {
67773eb3 1635 cache_lock(ncp);
984263bc 1636 cache_zap(ncp);
67773eb3 1637 } else {
7ea21ed1 1638 cache_drop(ncp);
67773eb3 1639 }
7ea21ed1 1640 ncp = nnp;
984263bc
MD
1641 }
1642 }
1643}
1644
984263bc
MD
1645static int disablecwd;
1646SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
1647
1648static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
1649static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
1650static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
1651static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
1652static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
1653static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
41c20dac 1654
984263bc 1655int
41c20dac 1656__getcwd(struct __getcwd_args *uap)
63f58b90 1657{
02680f1b 1658 int buflen;
63f58b90 1659 int error;
02680f1b
MD
1660 char *buf;
1661 char *bp;
1662
1663 if (disablecwd)
1664 return (ENODEV);
1665
1666 buflen = uap->buflen;
1667 if (buflen < 2)
1668 return (EINVAL);
1669 if (buflen > MAXPATHLEN)
1670 buflen = MAXPATHLEN;
63f58b90 1671
02680f1b
MD
1672 buf = malloc(buflen, M_TEMP, M_WAITOK);
1673 bp = kern_getcwd(buf, buflen, &error);
63f58b90 1674 if (error == 0)
02680f1b
MD
1675 error = copyout(bp, uap->buf, strlen(bp) + 1);
1676 free(buf, M_TEMP);
63f58b90
EN
1677 return (error);
1678}
1679
02680f1b
MD
1680char *
1681kern_getcwd(char *buf, size_t buflen, int *error)
984263bc 1682{
41c20dac 1683 struct proc *p = curproc;
63f58b90 1684 char *bp;
02680f1b 1685 int i, slash_prefixed;
984263bc
MD
1686 struct filedesc *fdp;
1687 struct namecache *ncp;
984263bc
MD
1688
1689 numcwdcalls++;
63f58b90
EN
1690 bp = buf;
1691 bp += buflen - 1;
984263bc
MD
1692 *bp = '\0';
1693 fdp = p->p_fd;
1694 slash_prefixed = 0;
524c845c
MD
1695
1696 ncp = fdp->fd_ncdir;
1697 while (ncp && ncp != fdp->fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) {
1698 if (ncp->nc_flag & NCF_MOUNTPT) {
1699 if (ncp->nc_mount == NULL) {
1700 *error = EBADF; /* forced unmount? */
02680f1b 1701 return(NULL);
984263bc 1702 }
524c845c 1703 ncp = ncp->nc_parent;
984263bc
MD
1704 continue;
1705 }
984263bc
MD
1706 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
1707 if (bp == buf) {
1708 numcwdfail4++;
02680f1b
MD
1709 *error = ENOMEM;
1710 return(NULL);
984263bc
MD
1711 }
1712 *--bp = ncp->nc_name[i];
1713 }
1714 if (bp == buf) {
1715 numcwdfail4++;
02680f1b
MD
1716 *error = ENOMEM;
1717 return(NULL);
984263bc
MD
1718 }
1719 *--bp = '/';
1720 slash_prefixed = 1;
524c845c
MD
1721 ncp = ncp->nc_parent;
1722 }
1723 if (ncp == NULL) {
1724 numcwdfail2++;
1725 *error = ENOENT;
1726 return(NULL);
984263bc
MD
1727 }
1728 if (!slash_prefixed) {
1729 if (bp == buf) {
1730 numcwdfail4++;
02680f1b
MD
1731 *error = ENOMEM;
1732 return(NULL);
984263bc
MD
1733 }
1734 *--bp = '/';
1735 }
1736 numcwdfound++;
02680f1b
MD
1737 *error = 0;
1738 return (bp);
984263bc
MD
1739}
1740
1741/*
1742 * Thus begins the fullpath magic.
1743 */
1744
1745#undef STATNODE
1746#define STATNODE(name) \
1747 static u_int name; \
1748 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
1749
1750static int disablefullpath;
1751SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
1752 &disablefullpath, 0, "");
1753
1754STATNODE(numfullpathcalls);
1755STATNODE(numfullpathfail1);
1756STATNODE(numfullpathfail2);
1757STATNODE(numfullpathfail3);
1758STATNODE(numfullpathfail4);
1759STATNODE(numfullpathfound);
1760
1761int
b6372d22 1762cache_fullpath(struct proc *p, struct namecache *ncp, char **retbuf, char **freebuf)
8987aad7 1763{
984263bc
MD
1764 char *bp, *buf;
1765 int i, slash_prefixed;
1766 struct filedesc *fdp;
984263bc 1767
b6372d22 1768 numfullpathcalls--;
b310dfc4 1769
984263bc
MD
1770 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1771 bp = buf + MAXPATHLEN - 1;
1772 *bp = '\0';
1773 fdp = p->p_fd;
1774 slash_prefixed = 0;
524c845c
MD
1775 while (ncp && ncp != fdp->fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) {
1776 if (ncp->nc_flag & NCF_MOUNTPT) {
1777 if (ncp->nc_mount == NULL) {
984263bc 1778 free(buf, M_TEMP);
524c845c 1779 return(EBADF);
984263bc 1780 }
524c845c 1781 ncp = ncp->nc_parent;
984263bc
MD
1782 continue;
1783 }
984263bc
MD
1784 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
1785 if (bp == buf) {
1786 numfullpathfail4++;
1787 free(buf, M_TEMP);
b6372d22 1788 return(ENOMEM);
984263bc
MD
1789 }
1790 *--bp = ncp->nc_name[i];
1791 }
1792 if (bp == buf) {
1793 numfullpathfail4++;
1794 free(buf, M_TEMP);
b6372d22 1795 return(ENOMEM);
984263bc
MD
1796 }
1797 *--bp = '/';
1798 slash_prefixed = 1;
524c845c
MD
1799 ncp = ncp->nc_parent;
1800 }
1801 if (ncp == NULL) {
1802 numfullpathfail2++;
1803 free(buf, M_TEMP);
b6372d22 1804 return(ENOENT);
984263bc
MD
1805 }
1806 if (!slash_prefixed) {
1807 if (bp == buf) {
1808 numfullpathfail4++;
1809 free(buf, M_TEMP);
b6372d22 1810 return(ENOMEM);
984263bc
MD
1811 }
1812 *--bp = '/';
1813 }
1814 numfullpathfound++;
1815 *retbuf = bp;
b310dfc4 1816 *freebuf = buf;
6a506bad
JS
1817
1818 return(0);
984263bc 1819}
8987aad7 1820
b6372d22
JS
1821int
1822vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf)
1823{
b6372d22
JS
1824 struct namecache *ncp;
1825
1826 numfullpathcalls++;
1827 if (disablefullpath)
1828 return (ENODEV);
1829
1830 if (p == NULL)
1831 return (EINVAL);
1832
1833 /* vn is NULL, client wants us to use p->p_textvp */
1834 if (vn == NULL) {
1835 if ((vn = p->p_textvp) == NULL)
1836 return (EINVAL);
1837 }
1838 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
1839 if (ncp->nc_nlen)
1840 break;
1841 }
1842 if (ncp == NULL)
1843 return (EINVAL);
1844
1845 numfullpathcalls--;
1846 return(cache_fullpath(p, ncp, retbuf, freebuf));
1847}