Add an argument to vfs_add_vnodeops() to specify VVF_* flags for the vop_ops
[dragonfly.git] / sys / kern / vfs_cache.c
CommitLineData
984263bc 1/*
8c10bfcf
MD
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
984263bc
MD
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
36 *
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $
dc1be39c 70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.58 2005/09/17 07:42:59 dillon Exp $
984263bc
MD
71 */
72
73#include <sys/param.h>
74#include <sys/systm.h>
75#include <sys/kernel.h>
76#include <sys/sysctl.h>
77#include <sys/mount.h>
78#include <sys/vnode.h>
984263bc
MD
79#include <sys/malloc.h>
80#include <sys/sysproto.h>
81#include <sys/proc.h>
dadab5e9 82#include <sys/namei.h>
690a3127 83#include <sys/nlookup.h>
984263bc
MD
84#include <sys/filedesc.h>
85#include <sys/fnv_hash.h>
24e51f36 86#include <sys/globaldata.h>
63f58b90 87#include <sys/kern_syscall.h>
fad57d0e 88#include <sys/dirent.h>
8c361dda 89#include <ddb/ddb.h>
984263bc
MD
90
91/*
7ea21ed1 92 * Random lookups in the cache are accomplished with a hash table using
8987aad7 93 * a hash key of (nc_src_vp, name).
984263bc 94 *
7ea21ed1 95 * Negative entries may exist and correspond to structures where nc_vp
8987aad7
MD
96 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry
97 * corresponds to a whited-out directory entry (verses simply not finding the
98 * entry at all).
984263bc 99 *
8987aad7
MD
100 * Upon reaching the last segment of a path, if the reference is for DELETE,
101 * or NOCACHE is set (rewrite), and the name is located in the cache, it
102 * will be dropped.
984263bc
MD
103 */
104
105/*
106 * Structures associated with name cacheing.
107 */
8987aad7 108#define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
f517a1bb 109#define MINNEG 1024
8987aad7 110
24e51f36
HP
111MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
112
984263bc 113static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */
7ea21ed1 114static struct namecache_list ncneglist; /* instead of vnode */
8987aad7 115
fad57d0e
MD
116/*
117 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
118 * to create the namecache infrastructure leading to a dangling vnode.
119 *
120 * 0 Only errors are reported
121 * 1 Successes are reported
122 * 2 Successes + the whole directory scan is reported
123 * 3 Force the directory scan code run as if the parent vnode did not
124 * have a namecache record, even if it does have one.
125 */
126static int ncvp_debug;
127SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, "");
128
984263bc
MD
129static u_long nchash; /* size of hash table */
130SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
8987aad7 131
984263bc
MD
132static u_long ncnegfactor = 16; /* ratio of negative entries */
133SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
8987aad7 134
fc21741a
MD
135static int nclockwarn; /* warn on locked entries in ticks */
136SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, "");
137
984263bc
MD
138static u_long numneg; /* number of cache entries allocated */
139SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
8987aad7 140
984263bc
MD
141static u_long numcache; /* number of cache entries allocated */
142SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
8987aad7 143
f517a1bb
MD
144static u_long numunres; /* number of unresolved entries */
145SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, "");
146
984263bc
MD
147SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
148SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
149
646a1cda 150static int cache_resolve_mp(struct namecache *ncp);
8e005a45 151static void cache_rehash(struct namecache *ncp);
646a1cda 152
984263bc
MD
153/*
154 * The new name cache statistics
155 */
156SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
157#define STATNODE(mode, name, var) \
158 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
159STATNODE(CTLFLAG_RD, numneg, &numneg);
160STATNODE(CTLFLAG_RD, numcache, &numcache);
161static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
162static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
163static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
164static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
165static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
166static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
167static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
168static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
169static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
170static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
171
24e51f36
HP
172struct nchstats nchstats[SMP_MAXCPU];
173/*
174 * Export VFS cache effectiveness statistics to user-land.
175 *
176 * The statistics are left for aggregation to user-land so
177 * neat things can be achieved, like observing per-CPU cache
178 * distribution.
179 */
180static int
3736bb9b 181sysctl_nchstats(SYSCTL_HANDLER_ARGS)
24e51f36
HP
182{
183 struct globaldata *gd;
184 int i, error;
185
186 error = 0;
187 for (i = 0; i < ncpus; ++i) {
188 gd = globaldata_find(i);
189 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
190 sizeof(struct nchstats))))
191 break;
192 }
984263bc 193
24e51f36
HP
194 return (error);
195}
196SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
3736bb9b 197 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
984263bc 198
24e51f36 199static void cache_zap(struct namecache *ncp);
984263bc
MD
200
201/*
7ea21ed1
MD
202 * cache_hold() and cache_drop() prevent the premature deletion of a
203 * namecache entry but do not prevent operations (such as zapping) on
204 * that namecache entry.
984263bc 205 */
7ea21ed1
MD
206static __inline
207struct namecache *
bc0c094e 208_cache_hold(struct namecache *ncp)
7ea21ed1
MD
209{
210 ++ncp->nc_refs;
211 return(ncp);
212}
213
8c361dda 214/*
67773eb3
MD
215 * When dropping an entry, if only one ref remains and the entry has not
216 * been resolved, zap it. Since the one reference is being dropped the
217 * entry had better not be locked.
8c361dda 218 */
7ea21ed1
MD
219static __inline
220void
bc0c094e 221_cache_drop(struct namecache *ncp)
7ea21ed1
MD
222{
223 KKASSERT(ncp->nc_refs > 0);
f517a1bb
MD
224 if (ncp->nc_refs == 1 &&
225 (ncp->nc_flag & NCF_UNRESOLVED) &&
226 TAILQ_EMPTY(&ncp->nc_list)
227 ) {
67773eb3
MD
228 KKASSERT(ncp->nc_exlocks == 0);
229 cache_lock(ncp);
7ea21ed1 230 cache_zap(ncp);
f517a1bb 231 } else {
7ea21ed1 232 --ncp->nc_refs;
f517a1bb 233 }
7ea21ed1 234}
8987aad7 235
690a3127
MD
236/*
237 * Link a new namecache entry to its parent. Be careful to avoid races
238 * if vhold() blocks in the future.
8c361dda
MD
239 *
240 * If we are creating a child under an oldapi parent we must mark the
241 * child as being an oldapi entry as well.
690a3127
MD
242 */
243static void
244cache_link_parent(struct namecache *ncp, struct namecache *par)
245{
246 KKASSERT(ncp->nc_parent == NULL);
247 ncp->nc_parent = par;
248 if (TAILQ_EMPTY(&par->nc_list)) {
249 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
21739618
MD
250 /*
251 * Any vp associated with an ncp which has children must
55361147 252 * be held to prevent it from being recycled.
21739618 253 */
690a3127
MD
254 if (par->nc_vp)
255 vhold(par->nc_vp);
256 } else {
257 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
258 }
259}
260
261/*
b8997912
MD
262 * Remove the parent association from a namecache structure. If this is
263 * the last child of the parent the cache_drop(par) will attempt to
264 * recursively zap the parent.
690a3127
MD
265 */
266static void
267cache_unlink_parent(struct namecache *ncp)
268{
269 struct namecache *par;
270
271 if ((par = ncp->nc_parent) != NULL) {
272 ncp->nc_parent = NULL;
273 par = cache_hold(par);
274 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
275 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
276 vdrop(par->nc_vp);
277 cache_drop(par);
278 }
279}
280
281/*
fad57d0e
MD
282 * Allocate a new namecache structure. Most of the code does not require
283 * zero-termination of the string but it makes vop_compat_ncreate() easier.
690a3127
MD
284 */
285static struct namecache *
524c845c 286cache_alloc(int nlen)
690a3127
MD
287{
288 struct namecache *ncp;
289
290 ncp = malloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
524c845c 291 if (nlen)
fad57d0e 292 ncp->nc_name = malloc(nlen + 1, M_VFSCACHE, M_WAITOK);
524c845c 293 ncp->nc_nlen = nlen;
690a3127
MD
294 ncp->nc_flag = NCF_UNRESOLVED;
295 ncp->nc_error = ENOTCONN; /* needs to be resolved */
8c361dda 296 ncp->nc_refs = 1;
dc1be39c 297 ncp->nc_fsmid = 1;
690a3127 298 TAILQ_INIT(&ncp->nc_list);
8c361dda 299 cache_lock(ncp);
690a3127
MD
300 return(ncp);
301}
302
8c361dda
MD
303static void
304cache_free(struct namecache *ncp)
305{
306 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1);
307 if (ncp->nc_name)
308 free(ncp->nc_name, M_VFSCACHE);
309 free(ncp, M_VFSCACHE);
310}
690a3127
MD
311
312/*
313 * Ref and deref a namecache structure.
314 */
bc0c094e
MD
315struct namecache *
316cache_hold(struct namecache *ncp)
317{
318 return(_cache_hold(ncp));
319}
320
321void
322cache_drop(struct namecache *ncp)
323{
324 _cache_drop(ncp);
325}
326
14c92d03
MD
327/*
328 * Namespace locking. The caller must already hold a reference to the
21739618
MD
329 * namecache structure in order to lock/unlock it. This function prevents
330 * the namespace from being created or destroyed by accessors other then
331 * the lock holder.
14c92d03 332 *
55361147
MD
333 * Note that holding a locked namecache structure prevents other threads
334 * from making namespace changes (e.g. deleting or creating), prevents
335 * vnode association state changes by other threads, and prevents the
336 * namecache entry from being resolved or unresolved by other threads.
337 *
338 * The lock owner has full authority to associate/disassociate vnodes
339 * and resolve/unresolve the locked ncp.
340 *
341 * In particular, if a vnode is associated with a locked cache entry
342 * that vnode will *NOT* be recycled. We accomplish this by vhold()ing the
343 * vnode. XXX we should find a more efficient way to prevent the vnode
344 * from being recycled, but remember that any given vnode may have multiple
345 * namecache associations (think hardlinks).
14c92d03
MD
346 */
347void
348cache_lock(struct namecache *ncp)
349{
55361147
MD
350 thread_t td;
351 int didwarn;
14c92d03
MD
352
353 KKASSERT(ncp->nc_refs != 0);
55361147
MD
354 didwarn = 0;
355 td = curthread;
356
14c92d03
MD
357 for (;;) {
358 if (ncp->nc_exlocks == 0) {
359 ncp->nc_exlocks = 1;
360 ncp->nc_locktd = td;
55361147
MD
361 /*
362 * The vp associated with a locked ncp must be held
363 * to prevent it from being recycled (which would
364 * cause the ncp to become unresolved).
365 *
366 * XXX loop on race for later MPSAFE work.
367 */
368 if (ncp->nc_vp)
369 vhold(ncp->nc_vp);
14c92d03
MD
370 break;
371 }
372 if (ncp->nc_locktd == td) {
373 ++ncp->nc_exlocks;
374 break;
375 }
376 ncp->nc_flag |= NCF_LOCKREQ;
fc21741a 377 if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) {
5fd012e0
MD
378 if (didwarn)
379 continue;
380 didwarn = 1;
381 printf("[diagnostic] cache_lock: blocked on %p", ncp);
382 if ((ncp->nc_flag & NCF_MOUNTPT) && ncp->nc_mount)
75ffff0d 383 printf(" [MOUNTFROM %s]\n", ncp->nc_mount->mnt_stat.f_mntfromname);
5fd012e0
MD
384 else
385 printf(" \"%*.*s\"\n",
386 ncp->nc_nlen, ncp->nc_nlen,
387 ncp->nc_name);
14c92d03
MD
388 }
389 }
55361147 390
14c92d03 391 if (didwarn == 1) {
21739618 392 printf("[diagnostic] cache_lock: unblocked %*.*s\n",
14c92d03
MD
393 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
394 }
395}
396
fad57d0e
MD
397int
398cache_lock_nonblock(struct namecache *ncp)
399{
400 thread_t td;
401
402 KKASSERT(ncp->nc_refs != 0);
403 td = curthread;
404 if (ncp->nc_exlocks == 0) {
405 ncp->nc_exlocks = 1;
406 ncp->nc_locktd = td;
407 /*
408 * The vp associated with a locked ncp must be held
409 * to prevent it from being recycled (which would
410 * cause the ncp to become unresolved).
411 *
412 * XXX loop on race for later MPSAFE work.
413 */
414 if (ncp->nc_vp)
415 vhold(ncp->nc_vp);
416 return(0);
417 } else {
418 return(EWOULDBLOCK);
419 }
420}
421
14c92d03
MD
422void
423cache_unlock(struct namecache *ncp)
424{
425 thread_t td = curthread;
426
427 KKASSERT(ncp->nc_refs > 0);
428 KKASSERT(ncp->nc_exlocks > 0);
429 KKASSERT(ncp->nc_locktd == td);
430 if (--ncp->nc_exlocks == 0) {
55361147
MD
431 if (ncp->nc_vp)
432 vdrop(ncp->nc_vp);
14c92d03
MD
433 ncp->nc_locktd = NULL;
434 if (ncp->nc_flag & NCF_LOCKREQ) {
435 ncp->nc_flag &= ~NCF_LOCKREQ;
fc21741a 436 wakeup(ncp);
14c92d03
MD
437 }
438 }
439}
440
441/*
690a3127 442 * ref-and-lock, unlock-and-deref functions.
14c92d03 443 */
21739618 444struct namecache *
690a3127
MD
445cache_get(struct namecache *ncp)
446{
447 _cache_hold(ncp);
448 cache_lock(ncp);
21739618 449 return(ncp);
690a3127
MD
450}
451
8e005a45
MD
452int
453cache_get_nonblock(struct namecache *ncp)
454{
455 /* XXX MP */
456 if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) {
457 _cache_hold(ncp);
458 cache_lock(ncp);
459 return(0);
460 }
461 return(EWOULDBLOCK);
462}
463
14c92d03
MD
464void
465cache_put(struct namecache *ncp)
466{
467 cache_unlock(ncp);
468 _cache_drop(ncp);
469}
470
690a3127
MD
471/*
472 * Resolve an unresolved ncp by associating a vnode with it. If the
473 * vnode is NULL, a negative cache entry is created.
474 *
475 * The ncp should be locked on entry and will remain locked on return.
476 */
477void
478cache_setvp(struct namecache *ncp, struct vnode *vp)
ce6da7e4 479{
690a3127 480 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
ce6da7e4
MD
481 ncp->nc_vp = vp;
482 if (vp != NULL) {
21739618
MD
483 /*
484 * Any vp associated with an ncp which has children must
55361147 485 * be held. Any vp associated with a locked ncp must be held.
21739618
MD
486 */
487 if (!TAILQ_EMPTY(&ncp->nc_list))
488 vhold(vp);
ce6da7e4 489 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
55361147
MD
490 if (ncp->nc_exlocks)
491 vhold(vp);
21739618
MD
492
493 /*
494 * Set auxillary flags
495 */
690a3127
MD
496 switch(vp->v_type) {
497 case VDIR:
21739618
MD
498 ncp->nc_flag |= NCF_ISDIR;
499 break;
690a3127 500 case VLNK:
21739618
MD
501 ncp->nc_flag |= NCF_ISSYMLINK;
502 /* XXX cache the contents of the symlink */
503 break;
690a3127 504 default:
21739618 505 break;
690a3127 506 }
ce6da7e4 507 ++numcache;
21739618 508 ncp->nc_error = 0;
ce6da7e4 509 } else {
1345c2b6 510 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
ce6da7e4 511 ++numneg;
21739618 512 ncp->nc_error = ENOENT;
ce6da7e4 513 }
690a3127 514 ncp->nc_flag &= ~NCF_UNRESOLVED;
ce6da7e4
MD
515}
516
fad57d0e
MD
517void
518cache_settimeout(struct namecache *ncp, int nticks)
519{
520 if ((ncp->nc_timeout = ticks + nticks) == 0)
521 ncp->nc_timeout = 1;
522}
523
690a3127
MD
524/*
525 * Disassociate the vnode or negative-cache association and mark a
526 * namecache entry as unresolved again. Note that the ncp is still
527 * left in the hash table and still linked to its parent.
528 *
67773eb3
MD
529 * The ncp should be locked and refd on entry and will remain locked and refd
530 * on return.
8c361dda
MD
531 *
532 * This routine is normally never called on a directory containing children.
533 * However, NFS often does just that in its rename() code as a cop-out to
534 * avoid complex namespace operations. This disconnects a directory vnode
535 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
536 * sync.
690a3127
MD
537 */
538void
539cache_setunresolved(struct namecache *ncp)
14c92d03 540{
690a3127 541 struct vnode *vp;
14c92d03 542
690a3127
MD
543 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
544 ncp->nc_flag |= NCF_UNRESOLVED;
545 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK);
fad57d0e 546 ncp->nc_timeout = 0;
690a3127
MD
547 ncp->nc_error = ENOTCONN;
548 ++numunres;
549 if ((vp = ncp->nc_vp) != NULL) {
550 --numcache;
fad57d0e 551 ncp->nc_vp = NULL;
690a3127 552 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
55361147
MD
553
554 /*
555 * Any vp associated with an ncp with children is
556 * held by that ncp. Any vp associated with a locked
557 * ncp is held by that ncp. These conditions must be
558 * undone when the vp is cleared out from the ncp.
559 */
690a3127
MD
560 if (!TAILQ_EMPTY(&ncp->nc_list))
561 vdrop(vp);
55361147
MD
562 if (ncp->nc_exlocks)
563 vdrop(vp);
690a3127
MD
564 } else {
565 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
566 --numneg;
567 }
8e005a45
MD
568 }
569}
8c361dda 570
8e005a45 571/*
e09206ba
MD
572 * Invalidate portions of the namecache topology given a starting entry.
573 * The passed ncp is set to an unresolved state and:
8e005a45 574 *
e09206ba
MD
575 * The passed ncp must be locked.
576 *
577 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
578 * that the physical underlying nodes have been
579 * destroyed... as in deleted. For example, when
580 * a directory is removed. This will cause record
581 * lookups on the name to no longer be able to find
582 * the record and tells the resolver to return failure
583 * rather then trying to resolve through the parent.
584 *
585 * The topology itself, including ncp->nc_name,
586 * remains intact.
587 *
588 * This only applies to the passed ncp, if CINV_CHILDREN
589 * is specified the children are not flagged.
590 *
591 * CINV_CHILDREN - Set all children (recursively) to an unresolved
592 * state as well.
593 *
594 * Note that this will also have the side effect of
595 * cleaning out any unreferenced nodes in the topology
596 * from the leaves up as the recursion backs out.
597 *
598 * Note that the topology for any referenced nodes remains intact.
25cb3304
MD
599 *
600 * It is possible for cache_inval() to race a cache_resolve(), meaning that
601 * the namecache entry may not actually be invalidated on return if it was
602 * revalidated while recursing down into its children. This code guarentees
603 * that the node(s) will go through an invalidation cycle, but does not
604 * guarentee that they will remain in an invalidated state.
605 *
606 * Returns non-zero if a revalidation was detected during the invalidation
607 * recursion, zero otherwise. Note that since only the original ncp is
608 * locked the revalidation ultimately can only indicate that the original ncp
609 * *MIGHT* no have been reresolved.
8e005a45 610 */
25cb3304 611int
8e005a45
MD
612cache_inval(struct namecache *ncp, int flags)
613{
614 struct namecache *kid;
b8997912 615 struct namecache *nextkid;
25cb3304 616 int rcnt = 0;
8e005a45 617
e09206ba 618 KKASSERT(ncp->nc_exlocks);
25cb3304 619
e09206ba
MD
620 cache_setunresolved(ncp);
621 if (flags & CINV_DESTROY)
622 ncp->nc_flag |= NCF_DESTROYED;
b8997912 623
e09206ba
MD
624 if ((flags & CINV_CHILDREN) &&
625 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL
626 ) {
627 cache_hold(kid);
628 cache_unlock(ncp);
b8997912
MD
629 while (kid) {
630 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
631 cache_hold(nextkid);
e09206ba
MD
632 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
633 TAILQ_FIRST(&kid->nc_list)
b8997912 634 ) {
e09206ba 635 cache_lock(kid);
25cb3304 636 rcnt += cache_inval(kid, flags & ~CINV_DESTROY);
e09206ba 637 cache_unlock(kid);
b8997912 638 }
b8997912 639 cache_drop(kid);
fad57d0e 640 kid = nextkid;
8e005a45 641 }
e09206ba 642 cache_lock(ncp);
8e005a45 643 }
25cb3304
MD
644
645 /*
646 * Someone could have gotten in there while ncp was unlocked,
647 * retry if so.
648 */
649 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
650 ++rcnt;
651 return (rcnt);
8e005a45
MD
652}
653
e09206ba 654/*
25cb3304
MD
655 * Invalidate a vnode's namecache associations. To avoid races against
656 * the resolver we do not invalidate a node which we previously invalidated
657 * but which was then re-resolved while we were in the invalidation loop.
658 *
659 * Returns non-zero if any namecache entries remain after the invalidation
660 * loop completed.
2aefb2c5
MD
661 *
662 * NOTE: unlike the namecache topology which guarentees that ncp's will not
663 * be ripped out of the topology while held, the vnode's v_namecache list
664 * has no such restriction. NCP's can be ripped out of the list at virtually
665 * any time if not locked, even if held.
e09206ba 666 */
25cb3304 667int
dc1be39c 668cache_inval_vp(struct vnode *vp, int flags, int *retflags)
8e005a45
MD
669{
670 struct namecache *ncp;
25cb3304
MD
671 struct namecache *next;
672
2aefb2c5 673restart:
25cb3304
MD
674 ncp = TAILQ_FIRST(&vp->v_namecache);
675 if (ncp)
676 cache_hold(ncp);
677 while (ncp) {
678 /* loop entered with ncp held */
2aefb2c5 679 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
25cb3304
MD
680 cache_hold(next);
681 cache_lock(ncp);
2aefb2c5
MD
682 if (ncp->nc_vp != vp) {
683 printf("Warning: cache_inval_vp: race-A detected on "
684 "%s\n", ncp->nc_name);
685 cache_put(ncp);
69313361
MD
686 if (next)
687 cache_drop(next);
2aefb2c5
MD
688 goto restart;
689 }
dc1be39c 690 *retflags |= ncp->nc_flag & NCF_FSMID;
e09206ba 691 cache_inval(ncp, flags);
25cb3304
MD
692 cache_put(ncp); /* also releases reference */
693 ncp = next;
2aefb2c5
MD
694 if (ncp && ncp->nc_vp != vp) {
695 printf("Warning: cache_inval_vp: race-B detected on "
696 "%s\n", ncp->nc_name);
697 cache_drop(ncp);
698 goto restart;
699 }
690a3127 700 }
25cb3304 701 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
14c92d03 702}
14c92d03 703
fad57d0e
MD
704/*
705 * The source ncp has been renamed to the target ncp. Both fncp and tncp
706 * must be locked. Both will be set to unresolved, any children of tncp
707 * will be disconnected (the prior contents of the target is assumed to be
708 * destroyed by the rename operation, e.g. renaming over an empty directory),
709 * and all children of fncp will be moved to tncp.
710 *
e09206ba
MD
711 * XXX the disconnection could pose a problem, check code paths to make
712 * sure any code that blocks can handle the parent being changed out from
713 * under it. Maybe we should lock the children (watch out for deadlocks) ?
714 *
fad57d0e
MD
715 * After we return the caller has the option of calling cache_setvp() if
716 * the vnode of the new target ncp is known.
717 *
718 * Any process CD'd into any of the children will no longer be able to ".."
719 * back out. An rm -rf can cause this situation to occur.
720 */
721void
722cache_rename(struct namecache *fncp, struct namecache *tncp)
723{
724 struct namecache *scan;
25cb3304 725 int didwarn = 0;
fad57d0e
MD
726
727 cache_setunresolved(fncp);
728 cache_setunresolved(tncp);
25cb3304
MD
729 while (cache_inval(tncp, CINV_CHILDREN) != 0) {
730 if (didwarn++ % 10 == 0) {
731 printf("Warning: cache_rename: race during "
732 "rename %s->%s\n",
733 fncp->nc_name, tncp->nc_name);
734 }
735 tsleep(tncp, 0, "mvrace", hz / 10);
736 cache_setunresolved(tncp);
737 }
fad57d0e
MD
738 while ((scan = TAILQ_FIRST(&fncp->nc_list)) != NULL) {
739 cache_hold(scan);
740 cache_unlink_parent(scan);
741 cache_link_parent(scan, tncp);
742 if (scan->nc_flag & NCF_HASHED)
743 cache_rehash(scan);
744 cache_drop(scan);
745 }
746}
747
21739618
MD
748/*
749 * vget the vnode associated with the namecache entry. Resolve the namecache
750 * entry if necessary and deal with namecache/vp races. The passed ncp must
751 * be referenced and may be locked. The ncp's ref/locking state is not
752 * effected by this call.
753 *
754 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
755 * (depending on the passed lk_type) will be returned in *vpp with an error
756 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
757 * most typical error is ENOENT, meaning that the ncp represents a negative
758 * cache hit and there is no vnode to retrieve, but other errors can occur
759 * too.
760 *
761 * The main race we have to deal with are namecache zaps. The ncp itself
762 * will not disappear since it is referenced, and it turns out that the
763 * validity of the vp pointer can be checked simply by rechecking the
764 * contents of ncp->nc_vp.
765 */
766int
767cache_vget(struct namecache *ncp, struct ucred *cred,
768 int lk_type, struct vnode **vpp)
769{
770 struct vnode *vp;
771 int error;
772
773again:
774 vp = NULL;
775 if (ncp->nc_flag & NCF_UNRESOLVED) {
776 cache_lock(ncp);
777 error = cache_resolve(ncp, cred);
778 cache_unlock(ncp);
779 } else {
780 error = 0;
781 }
782 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
5fd012e0 783 error = vget(vp, lk_type, curthread);
21739618
MD
784 if (error) {
785 if (vp != ncp->nc_vp) /* handle cache_zap race */
786 goto again;
787 vp = NULL;
788 } else if (vp != ncp->nc_vp) { /* handle cache_zap race */
789 vput(vp);
790 goto again;
791 }
792 }
793 if (error == 0 && vp == NULL)
794 error = ENOENT;
795 *vpp = vp;
796 return(error);
797}
798
799int
800cache_vref(struct namecache *ncp, struct ucred *cred, struct vnode **vpp)
801{
802 struct vnode *vp;
803 int error;
804
805again:
806 vp = NULL;
807 if (ncp->nc_flag & NCF_UNRESOLVED) {
808 cache_lock(ncp);
809 error = cache_resolve(ncp, cred);
810 cache_unlock(ncp);
811 } else {
812 error = 0;
813 }
814 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
815 vref(vp);
816 if (vp != ncp->nc_vp) { /* handle cache_zap race */
817 vrele(vp);
818 goto again;
819 }
820 }
821 if (error == 0 && vp == NULL)
822 error = ENOENT;
823 *vpp = vp;
824 return(error);
825}
826
dc1be39c
MD
827/*
828 * Recursively set the FSMID update flag for namecache nodes leading
829 * to root. This will cause the next getattr to increment the fsmid.
830 */
7d15906a
MD
831void
832cache_update_fsmid(struct namecache *ncp)
833{
834 struct vnode *vp;
835 struct namecache *scan;
7d15906a
MD
836
837 if ((vp = ncp->nc_vp) != NULL) {
838 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
839 for (scan = ncp; scan; scan = scan->nc_parent)
dc1be39c 840 scan->nc_flag |= NCF_FSMID;
7d15906a
MD
841 }
842 } else {
843 while (ncp) {
dc1be39c 844 ncp->nc_flag |= NCF_FSMID;
7d15906a
MD
845 ncp = ncp->nc_parent;
846 }
847 }
848}
849
850void
851cache_update_fsmid_vp(struct vnode *vp)
852{
853 struct namecache *ncp;
854 struct namecache *scan;
7d15906a
MD
855
856 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
857 for (scan = ncp; scan; scan = scan->nc_parent)
dc1be39c 858 scan->nc_flag |= NCF_FSMID;
7d15906a
MD
859 }
860}
861
dc1be39c
MD
862/*
863 * If getattr is called on a vnode (e.g. a stat call), the filesystem
864 * may call this routine to determine if the namecache has the hierarchical
865 * change flag set, requiring the fsmid to be updated.
866 *
867 * Since 0 indicates no support, make sure the filesystem fsmid is at least
868 * 1.
869 */
870int
871cache_check_fsmid_vp(struct vnode *vp, int64_t *fsmid)
872{
873 struct namecache *ncp;
874 int changed = 0;
875
876 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
877 if (ncp->nc_flag & NCF_FSMID) {
878 ncp->nc_flag &= ~NCF_FSMID;
879 changed = 1;
880 }
881 }
882 if (*fsmid == 0)
883 ++*fsmid;
884 if (changed)
885 ++*fsmid;
886 return(changed);
887}
888
fad57d0e
MD
889/*
890 * Convert a directory vnode to a namecache record without any other
891 * knowledge of the topology. This ONLY works with directory vnodes and
892 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
893 * returned ncp (if not NULL) will be held and unlocked.
894 *
895 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
896 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
897 * for dvp. This will fail only if the directory has been deleted out from
898 * under the caller.
899 *
900 * Callers must always check for a NULL return no matter the value of 'makeit'.
901 */
902
903static int cache_inefficient_scan(struct namecache *ncp, struct ucred *cred,
904 struct vnode *dvp);
905
906struct namecache *
907cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit)
908{
909 struct namecache *ncp;
910 struct vnode *pvp;
911 int error;
912
913 /*
914 * Temporary debugging code to force the directory scanning code
915 * to be exercised.
916 */
917 ncp = NULL;
918 if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) {
919 ncp = TAILQ_FIRST(&dvp->v_namecache);
920 printf("cache_fromdvp: forcing %s\n", ncp->nc_name);
921 goto force;
922 }
923
924 /*
925 * Loop until resolution, inside code will break out on error.
926 */
927 while ((ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) {
928force:
929 /*
930 * If dvp is the root of its filesystem it should already
931 * have a namecache pointer associated with it as a side
932 * effect of the mount, but it may have been disassociated.
933 */
934 if (dvp->v_flag & VROOT) {
935 ncp = cache_get(dvp->v_mount->mnt_ncp);
936 error = cache_resolve_mp(ncp);
937 cache_put(ncp);
938 if (ncvp_debug) {
939 printf("cache_fromdvp: resolve root of mount %p error %d",
940 dvp->v_mount, error);
941 }
942 if (error) {
943 if (ncvp_debug)
944 printf(" failed\n");
945 ncp = NULL;
946 break;
947 }
948 if (ncvp_debug)
949 printf(" succeeded\n");
950 continue;
951 }
952
953 /*
954 * Get the parent directory and resolve its ncp.
955 */
6ddb7618 956 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred);
fad57d0e
MD
957 if (error) {
958 printf("lookupdotdot failed %d %p\n", error, pvp);
959 break;
960 }
961 VOP_UNLOCK(pvp, 0, curthread);
962
963 /*
964 * XXX this recursion could run the kernel out of stack,
965 * change to a less efficient algorithm if we get too deep
966 * (use 'makeit' for a depth counter?)
967 */
968 ncp = cache_fromdvp(pvp, cred, makeit);
969 vrele(pvp);
970 if (ncp == NULL)
971 break;
972
973 /*
974 * Do an inefficient scan of pvp (embodied by ncp) to look
975 * for dvp. This will create a namecache record for dvp on
976 * success. We loop up to recheck on success.
977 *
978 * ncp and dvp are both held but not locked.
979 */
980 error = cache_inefficient_scan(ncp, cred, dvp);
981 cache_drop(ncp);
982 if (error) {
983 printf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
984 pvp, ncp->nc_name, dvp);
985 ncp = NULL;
986 break;
987 }
988 if (ncvp_debug) {
989 printf("cache_fromdvp: scan %p (%s) succeeded\n",
990 pvp, ncp->nc_name);
991 }
992 }
993 if (ncp)
994 cache_hold(ncp);
995 return (ncp);
996}
997
998/*
999 * Do an inefficient scan of the directory represented by ncp looking for
1000 * the directory vnode dvp. ncp must be held but not locked on entry and
1001 * will be held on return. dvp must be refd but not locked on entry and
1002 * will remain refd on return.
1003 *
1004 * Why do this at all? Well, due to its stateless nature the NFS server
1005 * converts file handles directly to vnodes without necessarily going through
1006 * the namecache ops that would otherwise create the namecache topology
1007 * leading to the vnode. We could either (1) Change the namecache algorithms
1008 * to allow disconnect namecache records that are re-merged opportunistically,
1009 * or (2) Make the NFS server backtrack and scan to recover a connected
1010 * namecache topology in order to then be able to issue new API lookups.
1011 *
1012 * It turns out that (1) is a huge mess. It takes a nice clean set of
1013 * namecache algorithms and introduces a lot of complication in every subsystem
1014 * that calls into the namecache to deal with the re-merge case, especially
1015 * since we are using the namecache to placehold negative lookups and the
1016 * vnode might not be immediately assigned. (2) is certainly far less
1017 * efficient then (1), but since we are only talking about directories here
1018 * (which are likely to remain cached), the case does not actually run all
1019 * that often and has the supreme advantage of not polluting the namecache
1020 * algorithms.
1021 */
1022static int
1023cache_inefficient_scan(struct namecache *ncp, struct ucred *cred,
1024 struct vnode *dvp)
1025{
1026 struct nlcomponent nlc;
1027 struct namecache *rncp;
1028 struct dirent *den;
1029 struct vnode *pvp;
1030 struct vattr vat;
1031 struct iovec iov;
1032 struct uio uio;
fad57d0e
MD
1033 int blksize;
1034 int eofflag;
4d22f42a 1035 int bytes;
fad57d0e
MD
1036 char *rbuf;
1037 int error;
fad57d0e
MD
1038
1039 vat.va_blocksize = 0;
1040 if ((error = VOP_GETATTR(dvp, &vat, curthread)) != 0)
1041 return (error);
1042 if ((error = cache_vget(ncp, cred, LK_SHARED, &pvp)) != 0)
1043 return (error);
1044 if (ncvp_debug)
1045 printf("inefficient_scan: directory iosize %ld vattr fileid = %ld\n", vat.va_blocksize, (long)vat.va_fileid);
1046 if ((blksize = vat.va_blocksize) == 0)
1047 blksize = DEV_BSIZE;
1048 rbuf = malloc(blksize, M_TEMP, M_WAITOK);
1049 rncp = NULL;
1050
1051 eofflag = 0;
1052 uio.uio_offset = 0;
fad57d0e 1053again:
fad57d0e
MD
1054 iov.iov_base = rbuf;
1055 iov.iov_len = blksize;
1056 uio.uio_iov = &iov;
1057 uio.uio_iovcnt = 1;
1058 uio.uio_resid = blksize;
1059 uio.uio_segflg = UIO_SYSSPACE;
1060 uio.uio_rw = UIO_READ;
1061 uio.uio_td = curthread;
1062
fad57d0e 1063 if (ncvp_debug >= 2)
4d22f42a
MD
1064 printf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
1065 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
fad57d0e 1066 if (error == 0) {
4d22f42a
MD
1067 den = (struct dirent *)rbuf;
1068 bytes = blksize - uio.uio_resid;
1069
1070 while (bytes > 0) {
1071 if (ncvp_debug >= 2) {
fad57d0e 1072 printf("cache_inefficient_scan: %*.*s\n",
4d22f42a
MD
1073 den->d_namlen, den->d_namlen,
1074 den->d_name);
1075 }
fad57d0e 1076 if (den->d_type != DT_WHT &&
01f31ab3 1077 den->d_ino == vat.va_fileid) {
4d22f42a
MD
1078 if (ncvp_debug) {
1079 printf("cache_inefficient_scan: "
1080 "MATCHED inode %ld path %s/%*.*s\n",
1081 vat.va_fileid, ncp->nc_name,
1082 den->d_namlen, den->d_namlen,
1083 den->d_name);
1084 }
fad57d0e
MD
1085 nlc.nlc_nameptr = den->d_name;
1086 nlc.nlc_namelen = den->d_namlen;
1087 VOP_UNLOCK(pvp, 0, curthread);
1088 rncp = cache_nlookup(ncp, &nlc);
1089 KKASSERT(rncp != NULL);
1090 break;
1091 }
01f31ab3
JS
1092 bytes -= _DIRENT_DIRSIZ(den);
1093 den = _DIRENT_NEXT(den);
fad57d0e
MD
1094 }
1095 if (rncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
1096 goto again;
1097 }
fad57d0e
MD
1098 if (rncp) {
1099 vrele(pvp);
1100 if (rncp->nc_flag & NCF_UNRESOLVED) {
1101 cache_setvp(rncp, dvp);
1102 if (ncvp_debug >= 2) {
1103 printf("cache_inefficient_scan: setvp %s/%s = %p\n",
1104 ncp->nc_name, rncp->nc_name, dvp);
1105 }
1106 } else {
1107 if (ncvp_debug >= 2) {
1108 printf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
1109 ncp->nc_name, rncp->nc_name, dvp,
1110 rncp->nc_vp);
1111 }
1112 }
1113 if (rncp->nc_vp == NULL)
1114 error = rncp->nc_error;
1115 cache_put(rncp);
1116 } else {
1117 printf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
1118 dvp, ncp->nc_name);
1119 vput(pvp);
1120 error = ENOENT;
1121 }
1122 free(rbuf, M_TEMP);
1123 return (error);
1124}
1125
984263bc 1126/*
67773eb3
MD
1127 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
1128 * state, which disassociates it from its vnode or ncneglist.
7ea21ed1 1129 *
67773eb3
MD
1130 * Then, if there are no additional references to the ncp and no children,
1131 * the ncp is removed from the topology and destroyed. This function will
1132 * also run through the nc_parent chain and destroy parent ncps if possible.
1133 * As a side benefit, it turns out the only conditions that allow running
1134 * up the chain are also the conditions to ensure no deadlock will occur.
7ea21ed1 1135 *
67773eb3
MD
1136 * References and/or children may exist if the ncp is in the middle of the
1137 * topology, preventing the ncp from being destroyed.
7ea21ed1 1138 *
67773eb3
MD
1139 * This function must be called with the ncp held and locked and will unlock
1140 * and drop it during zapping.
984263bc
MD
1141 */
1142static void
8987aad7 1143cache_zap(struct namecache *ncp)
984263bc 1144{
7ea21ed1 1145 struct namecache *par;
7ea21ed1
MD
1146
1147 /*
1148 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
1149 */
690a3127 1150 cache_setunresolved(ncp);
7ea21ed1
MD
1151
1152 /*
1153 * Try to scrap the entry and possibly tail-recurse on its parent.
1154 * We only scrap unref'd (other then our ref) unresolved entries,
1155 * we do not scrap 'live' entries.
1156 */
1157 while (ncp->nc_flag & NCF_UNRESOLVED) {
1158 /*
1159 * Someone other then us has a ref, stop.
1160 */
1161 if (ncp->nc_refs > 1)
1162 goto done;
1163
1164 /*
1165 * We have children, stop.
1166 */
1167 if (!TAILQ_EMPTY(&ncp->nc_list))
1168 goto done;
1169
67773eb3
MD
1170 /*
1171 * Remove ncp from the topology: hash table and parent linkage.
1172 */
7ea21ed1
MD
1173 if (ncp->nc_flag & NCF_HASHED) {
1174 ncp->nc_flag &= ~NCF_HASHED;
1175 LIST_REMOVE(ncp, nc_hash);
1176 }
7ea21ed1
MD
1177 if ((par = ncp->nc_parent) != NULL) {
1178 par = cache_hold(par);
1179 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
67773eb3 1180 ncp->nc_parent = NULL;
7ea21ed1
MD
1181 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
1182 vdrop(par->nc_vp);
1183 }
67773eb3
MD
1184
1185 /*
1186 * ncp should not have picked up any refs. Physically
1187 * destroy the ncp.
1188 */
1189 KKASSERT(ncp->nc_refs == 1);
f517a1bb 1190 --numunres;
67773eb3 1191 /* cache_unlock(ncp) not required */
7ea21ed1 1192 ncp->nc_refs = -1; /* safety */
7ea21ed1
MD
1193 if (ncp->nc_name)
1194 free(ncp->nc_name, M_VFSCACHE);
1195 free(ncp, M_VFSCACHE);
67773eb3
MD
1196
1197 /*
1198 * Loop on the parent (it may be NULL). Only bother looping
1199 * if the parent has a single ref (ours), which also means
1200 * we can lock it trivially.
1201 */
1202 ncp = par;
1203 if (ncp == NULL)
1204 return;
1205 if (ncp->nc_refs != 1) {
1206 cache_drop(ncp);
8c361dda 1207 return;
67773eb3
MD
1208 }
1209 KKASSERT(par->nc_exlocks == 0);
1210 cache_lock(ncp);
7ea21ed1
MD
1211 }
1212done:
67773eb3 1213 cache_unlock(ncp);
7ea21ed1 1214 --ncp->nc_refs;
984263bc
MD
1215}
1216
62d0f1f0
MD
1217static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW;
1218
1219static __inline
1220void
1221cache_hysteresis(void)
1222{
1223 /*
1224 * Don't cache too many negative hits. We use hysteresis to reduce
1225 * the impact on the critical path.
1226 */
1227 switch(cache_hysteresis_state) {
1228 case CHI_LOW:
1229 if (numneg > MINNEG && numneg * ncnegfactor > numcache) {
1230 cache_cleanneg(10);
1231 cache_hysteresis_state = CHI_HIGH;
1232 }
1233 break;
1234 case CHI_HIGH:
1235 if (numneg > MINNEG * 9 / 10 &&
1236 numneg * ncnegfactor * 9 / 10 > numcache
1237 ) {
1238 cache_cleanneg(10);
1239 } else {
1240 cache_hysteresis_state = CHI_LOW;
1241 }
1242 break;
1243 }
1244}
1245
14c92d03
MD
1246/*
1247 * NEW NAMECACHE LOOKUP API
1248 *
1249 * Lookup an entry in the cache. A locked, referenced, non-NULL
1250 * entry is *always* returned, even if the supplied component is illegal.
fad57d0e 1251 * The resulting namecache entry should be returned to the system with
14c92d03
MD
1252 * cache_put() or cache_unlock() + cache_drop().
1253 *
1254 * namecache locks are recursive but care must be taken to avoid lock order
1255 * reversals.
1256 *
1257 * Nobody else will be able to manipulate the associated namespace (e.g.
1258 * create, delete, rename, rename-target) until the caller unlocks the
1259 * entry.
1260 *
1261 * The returned entry will be in one of three states: positive hit (non-null
1262 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
1263 * Unresolved entries must be resolved through the filesystem to associate the
1264 * vnode and/or determine whether a positive or negative hit has occured.
1265 *
1266 * It is not necessary to lock a directory in order to lock namespace under
1267 * that directory. In fact, it is explicitly not allowed to do that. A
1268 * directory is typically only locked when being created, renamed, or
1269 * destroyed.
1270 *
1271 * The directory (par) may be unresolved, in which case any returned child
1272 * will likely also be marked unresolved. Likely but not guarenteed. Since
fad57d0e
MD
1273 * the filesystem lookup requires a resolved directory vnode the caller is
1274 * responsible for resolving the namecache chain top-down. This API
14c92d03
MD
1275 * specifically allows whole chains to be created in an unresolved state.
1276 */
1277struct namecache *
690a3127 1278cache_nlookup(struct namecache *par, struct nlcomponent *nlc)
14c92d03 1279{
690a3127
MD
1280 struct namecache *ncp;
1281 struct namecache *new_ncp;
1282 struct nchashhead *nchpp;
1283 u_int32_t hash;
1284 globaldata_t gd;
1285
1286 numcalls++;
1287 gd = mycpu;
1288
690a3127
MD
1289 /*
1290 * Try to locate an existing entry
1291 */
1292 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
1293 hash = fnv_32_buf(&par, sizeof(par), hash);
1294 new_ncp = NULL;
1295restart:
1296 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1297 numchecks++;
1298
1299 /*
1300 * Zap entries that have timed out.
1301 */
1302 if (ncp->nc_timeout &&
67773eb3
MD
1303 (int)(ncp->nc_timeout - ticks) < 0 &&
1304 (ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
1305 ncp->nc_exlocks == 0
690a3127 1306 ) {
67773eb3 1307 cache_zap(cache_get(ncp));
690a3127
MD
1308 goto restart;
1309 }
1310
1311 /*
1312 * Break out if we find a matching entry. Note that
e09206ba
MD
1313 * UNRESOLVED entries may match, but DESTROYED entries
1314 * do not.
690a3127
MD
1315 */
1316 if (ncp->nc_parent == par &&
1317 ncp->nc_nlen == nlc->nlc_namelen &&
e09206ba
MD
1318 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
1319 (ncp->nc_flag & NCF_DESTROYED) == 0
690a3127 1320 ) {
67773eb3
MD
1321 if (cache_get_nonblock(ncp) == 0) {
1322 if (new_ncp)
1323 cache_free(new_ncp);
1324 goto found;
1325 }
8c361dda 1326 cache_get(ncp);
67773eb3
MD
1327 cache_put(ncp);
1328 goto restart;
690a3127
MD
1329 }
1330 }
1331
1332 /*
1333 * We failed to locate an entry, create a new entry and add it to
1334 * the cache. We have to relookup after possibly blocking in
1335 * malloc.
1336 */
1337 if (new_ncp == NULL) {
524c845c 1338 new_ncp = cache_alloc(nlc->nlc_namelen);
690a3127
MD
1339 goto restart;
1340 }
1341
1342 ncp = new_ncp;
1343
1344 /*
1345 * Initialize as a new UNRESOLVED entry, lock (non-blocking),
fad57d0e
MD
1346 * and link to the parent. The mount point is usually inherited
1347 * from the parent unless this is a special case such as a mount
1348 * point where nlc_namelen is 0. The caller is responsible for
1349 * setting nc_mount in that case. If nlc_namelen is 0 nc_name will
1350 * be NULL.
690a3127 1351 */
4fcb1cf7
MD
1352 if (nlc->nlc_namelen) {
1353 bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen);
fad57d0e 1354 ncp->nc_name[nlc->nlc_namelen] = 0;
4fcb1cf7
MD
1355 ncp->nc_mount = par->nc_mount;
1356 }
690a3127
MD
1357 nchpp = NCHHASH(hash);
1358 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
1359 ncp->nc_flag |= NCF_HASHED;
690a3127 1360 cache_link_parent(ncp, par);
690a3127 1361found:
fad57d0e
MD
1362 /*
1363 * stats and namecache size management
1364 */
1365 if (ncp->nc_flag & NCF_UNRESOLVED)
1366 ++gd->gd_nchstats->ncs_miss;
1367 else if (ncp->nc_vp)
1368 ++gd->gd_nchstats->ncs_goodhits;
1369 else
1370 ++gd->gd_nchstats->ncs_neghits;
62d0f1f0 1371 cache_hysteresis();
690a3127
MD
1372 return(ncp);
1373}
1374
1375/*
21739618 1376 * Resolve an unresolved namecache entry, generally by looking it up.
67773eb3 1377 * The passed ncp must be locked and refd.
21739618
MD
1378 *
1379 * Theoretically since a vnode cannot be recycled while held, and since
1380 * the nc_parent chain holds its vnode as long as children exist, the
1381 * direct parent of the cache entry we are trying to resolve should
1382 * have a valid vnode. If not then generate an error that we can
1383 * determine is related to a resolver bug.
fad57d0e
MD
1384 *
1385 * Note that successful resolution does not necessarily return an error
1386 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
1387 * will be returned.
690a3127
MD
1388 */
1389int
21739618 1390cache_resolve(struct namecache *ncp, struct ucred *cred)
690a3127 1391{
21739618 1392 struct namecache *par;
67773eb3 1393 int error;
8e005a45 1394
67773eb3 1395restart:
8e005a45
MD
1396 /*
1397 * If the ncp is already resolved we have nothing to do.
1398 */
1399 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1400 return (ncp->nc_error);
21739618 1401
646a1cda
MD
1402 /*
1403 * Mount points need special handling because the parent does not
1404 * belong to the same filesystem as the ncp.
1405 */
8e005a45 1406 if (ncp->nc_flag & NCF_MOUNTPT)
646a1cda 1407 return (cache_resolve_mp(ncp));
646a1cda
MD
1408
1409 /*
1410 * We expect an unbroken chain of ncps to at least the mount point,
1411 * and even all the way to root (but this code doesn't have to go
1412 * past the mount point).
1413 */
1414 if (ncp->nc_parent == NULL) {
8e005a45 1415 printf("EXDEV case 1 %p %*.*s\n", ncp,
646a1cda 1416 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
21739618 1417 ncp->nc_error = EXDEV;
646a1cda
MD
1418 return(ncp->nc_error);
1419 }
1420
1421 /*
1422 * The vp's of the parent directories in the chain are held via vhold()
1423 * due to the existance of the child, and should not disappear.
1424 * However, there are cases where they can disappear:
1425 *
1426 * - due to filesystem I/O errors.
1427 * - due to NFS being stupid about tracking the namespace and
1428 * destroys the namespace for entire directories quite often.
1429 * - due to forced unmounts.
e09206ba 1430 * - due to an rmdir (parent will be marked DESTROYED)
646a1cda
MD
1431 *
1432 * When this occurs we have to track the chain backwards and resolve
1433 * it, looping until the resolver catches up to the current node. We
1434 * could recurse here but we might run ourselves out of kernel stack
1435 * so we do it in a more painful manner. This situation really should
1436 * not occur all that often, or if it does not have to go back too
1437 * many nodes to resolve the ncp.
1438 */
1439 while (ncp->nc_parent->nc_vp == NULL) {
e09206ba
MD
1440 /*
1441 * This case can occur if a process is CD'd into a
1442 * directory which is then rmdir'd. If the parent is marked
1443 * destroyed there is no point trying to resolve it.
1444 */
1445 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
1446 return(ENOENT);
1447
646a1cda
MD
1448 par = ncp->nc_parent;
1449 while (par->nc_parent && par->nc_parent->nc_vp == NULL)
1450 par = par->nc_parent;
1451 if (par->nc_parent == NULL) {
1452 printf("EXDEV case 2 %*.*s\n",
1453 par->nc_nlen, par->nc_nlen, par->nc_name);
1454 return (EXDEV);
1455 }
1456 printf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
1457 par->nc_nlen, par->nc_nlen, par->nc_name);
1458 /*
67773eb3
MD
1459 * The parent is not set in stone, ref and lock it to prevent
1460 * it from disappearing. Also note that due to renames it
1461 * is possible for our ncp to move and for par to no longer
1462 * be one of its parents. We resolve it anyway, the loop
1463 * will handle any moves.
646a1cda
MD
1464 */
1465 cache_get(par);
1466 if (par->nc_flag & NCF_MOUNTPT) {
1467 cache_resolve_mp(par);
8e005a45
MD
1468 } else if (par->nc_parent->nc_vp == NULL) {
1469 printf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
1470 cache_put(par);
1471 continue;
fad57d0e
MD
1472 } else if (par->nc_flag & NCF_UNRESOLVED) {
1473 par->nc_error = VOP_NRESOLVE(par, cred);
646a1cda 1474 }
67773eb3
MD
1475 if ((error = par->nc_error) != 0) {
1476 if (par->nc_error != EAGAIN) {
1477 printf("EXDEV case 3 %*.*s error %d\n",
1478 par->nc_nlen, par->nc_nlen, par->nc_name,
1479 par->nc_error);
1480 cache_put(par);
1481 return(error);
1482 }
1483 printf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
1484 par, par->nc_nlen, par->nc_nlen, par->nc_name);
646a1cda 1485 }
67773eb3
MD
1486 cache_put(par);
1487 /* loop */
646a1cda 1488 }
8e005a45
MD
1489
1490 /*
fad57d0e 1491 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
8e005a45
MD
1492 * ncp's and reattach them. If this occurs the original ncp is marked
1493 * EAGAIN to force a relookup.
fad57d0e
MD
1494 *
1495 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
1496 * ncp must already be resolved.
8e005a45
MD
1497 */
1498 KKASSERT((ncp->nc_flag & NCF_MOUNTPT) == 0);
fad57d0e 1499 ncp->nc_error = VOP_NRESOLVE(ncp, cred);
6ddb7618 1500 /*vop_nresolve(*ncp->nc_parent->nc_vp->v_ops, ncp, cred);*/
67773eb3
MD
1501 if (ncp->nc_error == EAGAIN) {
1502 printf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
1503 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
1504 goto restart;
1505 }
646a1cda
MD
1506 return(ncp->nc_error);
1507}
1508
1509/*
1510 * Resolve the ncp associated with a mount point. Such ncp's almost always
1511 * remain resolved and this routine is rarely called. NFS MPs tends to force
1512 * re-resolution more often due to its mac-truck-smash-the-namecache
1513 * method of tracking namespace changes.
1514 *
6215aa92
MD
1515 * The semantics for this call is that the passed ncp must be locked on
1516 * entry and will be locked on return. However, if we actually have to
1517 * resolve the mount point we temporarily unlock the entry in order to
1518 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
1519 * the unlock we have to recheck the flags after we relock.
646a1cda
MD
1520 */
1521static int
1522cache_resolve_mp(struct namecache *ncp)
1523{
1524 struct vnode *vp;
1525 struct mount *mp = ncp->nc_mount;
6215aa92 1526 int error;
646a1cda
MD
1527
1528 KKASSERT(mp != NULL);
1529 if (ncp->nc_flag & NCF_UNRESOLVED) {
6215aa92 1530 cache_unlock(ncp);
861905fb 1531 while (vfs_busy(mp, 0, curthread))
646a1cda 1532 ;
6215aa92
MD
1533 error = VFS_ROOT(mp, &vp);
1534 cache_lock(ncp);
1535
1536 /*
1537 * recheck the ncp state after relocking.
1538 */
1539 if (ncp->nc_flag & NCF_UNRESOLVED) {
1540 ncp->nc_error = error;
1541 if (error == 0) {
1542 cache_setvp(ncp, vp);
1543 vput(vp);
1544 } else {
1545 printf("[diagnostic] cache_resolve_mp: failed to resolve mount %p\n", mp);
1546 cache_setvp(ncp, NULL);
1547 }
1548 } else if (error == 0) {
646a1cda 1549 vput(vp);
646a1cda
MD
1550 }
1551 vfs_unbusy(mp, curthread);
21739618
MD
1552 }
1553 return(ncp->nc_error);
14c92d03
MD
1554}
1555
62d0f1f0
MD
1556void
1557cache_cleanneg(int count)
1558{
1559 struct namecache *ncp;
7ea21ed1
MD
1560
1561 /*
62d0f1f0
MD
1562 * Automode from the vnlru proc - clean out 10% of the negative cache
1563 * entries.
7ea21ed1 1564 */
62d0f1f0
MD
1565 if (count == 0)
1566 count = numneg / 10 + 1;
1567
1568 /*
1569 * Attempt to clean out the specified number of negative cache
1570 * entries.
1571 */
1572 while (count) {
7ea21ed1 1573 ncp = TAILQ_FIRST(&ncneglist);
eb82ae62
MD
1574 if (ncp == NULL) {
1575 KKASSERT(numneg == 0);
1576 break;
1577 }
62d0f1f0
MD
1578 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
1579 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
67773eb3
MD
1580 if (cache_get_nonblock(ncp) == 0)
1581 cache_zap(ncp);
62d0f1f0 1582 --count;
984263bc
MD
1583 }
1584}
1585
fad57d0e
MD
1586/*
1587 * Rehash a ncp. Rehashing is typically required if the name changes (should
1588 * not generally occur) or the parent link changes. This function will
1589 * unhash the ncp if the ncp is no longer hashable.
1590 */
8c361dda
MD
1591static void
1592cache_rehash(struct namecache *ncp)
1593{
1594 struct nchashhead *nchpp;
1595 u_int32_t hash;
1596
1597 if (ncp->nc_flag & NCF_HASHED) {
1598 ncp->nc_flag &= ~NCF_HASHED;
1599 LIST_REMOVE(ncp, nc_hash);
1600 }
fad57d0e
MD
1601 if (ncp->nc_nlen && ncp->nc_parent) {
1602 hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT);
1603 hash = fnv_32_buf(&ncp->nc_parent,
1604 sizeof(ncp->nc_parent), hash);
1605 nchpp = NCHHASH(hash);
1606 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
1607 ncp->nc_flag |= NCF_HASHED;
1608 }
8c361dda
MD
1609}
1610
984263bc 1611/*
24e51f36 1612 * Name cache initialization, from vfsinit() when we are booting
984263bc
MD
1613 */
1614void
8987aad7 1615nchinit(void)
984263bc 1616{
24e51f36
HP
1617 int i;
1618 globaldata_t gd;
1619
1620 /* initialise per-cpu namecache effectiveness statistics. */
1621 for (i = 0; i < ncpus; ++i) {
1622 gd = globaldata_find(i);
1623 gd->gd_nchstats = &nchstats[i];
1624 }
7ea21ed1 1625 TAILQ_INIT(&ncneglist);
984263bc 1626 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash);
fc21741a 1627 nclockwarn = 1 * hz;
21739618
MD
1628}
1629
1630/*
1631 * Called from start_init() to bootstrap the root filesystem. Returns
1632 * a referenced, unlocked namecache record.
1633 */
1634struct namecache *
5fd012e0 1635cache_allocroot(struct mount *mp, struct vnode *vp)
21739618 1636{
524c845c 1637 struct namecache *ncp = cache_alloc(0);
21739618 1638
21739618 1639 ncp->nc_flag |= NCF_MOUNTPT | NCF_ROOT;
5fd012e0 1640 ncp->nc_mount = mp;
8c361dda
MD
1641 cache_setvp(ncp, vp);
1642 return(ncp);
984263bc
MD
1643}
1644
1645/*
7ea21ed1 1646 * vfs_cache_setroot()
984263bc 1647 *
7ea21ed1
MD
1648 * Create an association between the root of our namecache and
1649 * the root vnode. This routine may be called several times during
1650 * booting.
690a3127
MD
1651 *
1652 * If the caller intends to save the returned namecache pointer somewhere
1653 * it must cache_hold() it.
7ea21ed1 1654 */
21739618
MD
1655void
1656vfs_cache_setroot(struct vnode *nvp, struct namecache *ncp)
7ea21ed1 1657{
21739618
MD
1658 struct vnode *ovp;
1659 struct namecache *oncp;
1660
1661 ovp = rootvnode;
1662 oncp = rootncp;
1663 rootvnode = nvp;
1664 rootncp = ncp;
1665
1666 if (ovp)
1667 vrele(ovp);
1668 if (oncp)
1669 cache_drop(oncp);
7ea21ed1
MD
1670}
1671
1672/*
fad57d0e
MD
1673 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
1674 * topology and is being removed as quickly as possible. The new VOP_N*()
1675 * API calls are required to make specific adjustments using the supplied
1676 * ncp pointers rather then just bogusly purging random vnodes.
1677 *
7ea21ed1
MD
1678 * Invalidate all namecache entries to a particular vnode as well as
1679 * any direct children of that vnode in the namecache. This is a
1680 * 'catch all' purge used by filesystems that do not know any better.
1681 *
1682 * A new vnode v_id is generated. Note that no vnode will ever have a
1683 * v_id of 0.
1684 *
1685 * Note that the linkage between the vnode and its namecache entries will
1686 * be removed, but the namecache entries themselves might stay put due to
1687 * active references from elsewhere in the system or due to the existance of
1688 * the children. The namecache topology is left intact even if we do not
1689 * know what the vnode association is. Such entries will be marked
1690 * NCF_UNRESOLVED.
984263bc
MD
1691 *
1692 * XXX: Only time and the size of v_id prevents this from failing:
1693 * XXX: In theory we should hunt down all (struct vnode*, v_id)
1694 * XXX: soft references and nuke them, at least on the global
1695 * XXX: v_id wraparound. The period of resistance can be extended
1696 * XXX: by incrementing each vnodes v_id individually instead of
1697 * XXX: using the global v_id.
dc1be39c
MD
1698 *
1699 * Does not support NCP_FSMID accumulation on invalidation (retflags is
1700 * not used).
984263bc 1701 */
984263bc 1702void
8987aad7 1703cache_purge(struct vnode *vp)
984263bc
MD
1704{
1705 static u_long nextid;
dc1be39c 1706 int retflags = 0;
984263bc 1707
dc1be39c 1708 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN, &retflags);
984263bc 1709
7ea21ed1
MD
1710 /*
1711 * Calculate a new unique id for ".." handling
1712 */
8987aad7 1713 do {
984263bc 1714 nextid++;
7ea21ed1 1715 } while (nextid == vp->v_id || nextid == 0);
984263bc 1716 vp->v_id = nextid;
984263bc
MD
1717}
1718
1719/*
1720 * Flush all entries referencing a particular filesystem.
1721 *
1722 * Since we need to check it anyway, we will flush all the invalid
1723 * entries at the same time.
1724 */
1725void
8987aad7 1726cache_purgevfs(struct mount *mp)
984263bc 1727{
bc0c094e 1728 struct nchashhead *nchpp;
984263bc
MD
1729 struct namecache *ncp, *nnp;
1730
7ea21ed1
MD
1731 /*
1732 * Scan hash tables for applicable entries.
1733 */
bc0c094e
MD
1734 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) {
1735 ncp = LIST_FIRST(nchpp);
7ea21ed1
MD
1736 if (ncp)
1737 cache_hold(ncp);
1738 while (ncp) {
984263bc 1739 nnp = LIST_NEXT(ncp, nc_hash);
7ea21ed1
MD
1740 if (nnp)
1741 cache_hold(nnp);
4fcb1cf7 1742 if (ncp->nc_mount == mp) {
67773eb3 1743 cache_lock(ncp);
984263bc 1744 cache_zap(ncp);
67773eb3 1745 } else {
7ea21ed1 1746 cache_drop(ncp);
67773eb3 1747 }
7ea21ed1 1748 ncp = nnp;
984263bc
MD
1749 }
1750 }
1751}
1752
984263bc
MD
1753static int disablecwd;
1754SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
1755
1756static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
1757static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
1758static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
1759static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
1760static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
1761static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
41c20dac 1762
984263bc 1763int
41c20dac 1764__getcwd(struct __getcwd_args *uap)
63f58b90 1765{
02680f1b 1766 int buflen;
63f58b90 1767 int error;
02680f1b
MD
1768 char *buf;
1769 char *bp;
1770
1771 if (disablecwd)
1772 return (ENODEV);
1773
1774 buflen = uap->buflen;
1775 if (buflen < 2)
1776 return (EINVAL);
1777 if (buflen > MAXPATHLEN)
1778 buflen = MAXPATHLEN;
63f58b90 1779
02680f1b
MD
1780 buf = malloc(buflen, M_TEMP, M_WAITOK);
1781 bp = kern_getcwd(buf, buflen, &error);
63f58b90 1782 if (error == 0)
02680f1b
MD
1783 error = copyout(bp, uap->buf, strlen(bp) + 1);
1784 free(buf, M_TEMP);
63f58b90
EN
1785 return (error);
1786}
1787
02680f1b
MD
1788char *
1789kern_getcwd(char *buf, size_t buflen, int *error)
984263bc 1790{
41c20dac 1791 struct proc *p = curproc;
63f58b90 1792 char *bp;
02680f1b 1793 int i, slash_prefixed;
984263bc
MD
1794 struct filedesc *fdp;
1795 struct namecache *ncp;
984263bc
MD
1796
1797 numcwdcalls++;
63f58b90
EN
1798 bp = buf;
1799 bp += buflen - 1;
984263bc
MD
1800 *bp = '\0';
1801 fdp = p->p_fd;
1802 slash_prefixed = 0;
524c845c
MD
1803
1804 ncp = fdp->fd_ncdir;
1805 while (ncp && ncp != fdp->fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) {
1806 if (ncp->nc_flag & NCF_MOUNTPT) {
1807 if (ncp->nc_mount == NULL) {
1808 *error = EBADF; /* forced unmount? */
02680f1b 1809 return(NULL);
984263bc 1810 }
524c845c 1811 ncp = ncp->nc_parent;
984263bc
MD
1812 continue;
1813 }
984263bc
MD
1814 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
1815 if (bp == buf) {
1816 numcwdfail4++;
02680f1b
MD
1817 *error = ENOMEM;
1818 return(NULL);
984263bc
MD
1819 }
1820 *--bp = ncp->nc_name[i];
1821 }
1822 if (bp == buf) {
1823 numcwdfail4++;
02680f1b
MD
1824 *error = ENOMEM;
1825 return(NULL);
984263bc
MD
1826 }
1827 *--bp = '/';
1828 slash_prefixed = 1;
524c845c
MD
1829 ncp = ncp->nc_parent;
1830 }
1831 if (ncp == NULL) {
1832 numcwdfail2++;
1833 *error = ENOENT;
1834 return(NULL);
984263bc
MD
1835 }
1836 if (!slash_prefixed) {
1837 if (bp == buf) {
1838 numcwdfail4++;
02680f1b
MD
1839 *error = ENOMEM;
1840 return(NULL);
984263bc
MD
1841 }
1842 *--bp = '/';
1843 }
1844 numcwdfound++;
02680f1b
MD
1845 *error = 0;
1846 return (bp);
984263bc
MD
1847}
1848
1849/*
1850 * Thus begins the fullpath magic.
1851 */
1852
1853#undef STATNODE
1854#define STATNODE(name) \
1855 static u_int name; \
1856 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
1857
1858static int disablefullpath;
1859SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
1860 &disablefullpath, 0, "");
1861
1862STATNODE(numfullpathcalls);
1863STATNODE(numfullpathfail1);
1864STATNODE(numfullpathfail2);
1865STATNODE(numfullpathfail3);
1866STATNODE(numfullpathfail4);
1867STATNODE(numfullpathfound);
1868
1869int
b6372d22 1870cache_fullpath(struct proc *p, struct namecache *ncp, char **retbuf, char **freebuf)
8987aad7 1871{
984263bc
MD
1872 char *bp, *buf;
1873 int i, slash_prefixed;
75ffff0d 1874 struct namecache *fd_nrdir;
984263bc 1875
b6372d22 1876 numfullpathcalls--;
b310dfc4 1877
984263bc
MD
1878 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1879 bp = buf + MAXPATHLEN - 1;
1880 *bp = '\0';
75ffff0d
JS
1881 if (p != NULL)
1882 fd_nrdir = p->p_fd->fd_nrdir;
1883 else
1884 fd_nrdir = NULL;
984263bc 1885 slash_prefixed = 0;
75ffff0d 1886 while (ncp && ncp != fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) {
524c845c
MD
1887 if (ncp->nc_flag & NCF_MOUNTPT) {
1888 if (ncp->nc_mount == NULL) {
984263bc 1889 free(buf, M_TEMP);
524c845c 1890 return(EBADF);
984263bc 1891 }
524c845c 1892 ncp = ncp->nc_parent;
984263bc
MD
1893 continue;
1894 }
984263bc
MD
1895 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
1896 if (bp == buf) {
1897 numfullpathfail4++;
1898 free(buf, M_TEMP);
b6372d22 1899 return(ENOMEM);
984263bc
MD
1900 }
1901 *--bp = ncp->nc_name[i];
1902 }
1903 if (bp == buf) {
1904 numfullpathfail4++;
1905 free(buf, M_TEMP);
b6372d22 1906 return(ENOMEM);
984263bc
MD
1907 }
1908 *--bp = '/';
1909 slash_prefixed = 1;
524c845c
MD
1910 ncp = ncp->nc_parent;
1911 }
1912 if (ncp == NULL) {
1913 numfullpathfail2++;
1914 free(buf, M_TEMP);
b6372d22 1915 return(ENOENT);
984263bc 1916 }
75ffff0d 1917 if (p != NULL && (ncp->nc_flag & NCF_ROOT) && ncp != fd_nrdir) {
872b00c0
JS
1918 bp = buf + MAXPATHLEN - 1;
1919 *bp = '\0';
1920 slash_prefixed = 0;
1921 }
984263bc
MD
1922 if (!slash_prefixed) {
1923 if (bp == buf) {
1924 numfullpathfail4++;
1925 free(buf, M_TEMP);
b6372d22 1926 return(ENOMEM);
984263bc
MD
1927 }
1928 *--bp = '/';
1929 }
1930 numfullpathfound++;
1931 *retbuf = bp;
b310dfc4 1932 *freebuf = buf;
6a506bad
JS
1933
1934 return(0);
984263bc 1935}
8987aad7 1936
b6372d22
JS
1937int
1938vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf)
1939{
b6372d22
JS
1940 struct namecache *ncp;
1941
1942 numfullpathcalls++;
1943 if (disablefullpath)
1944 return (ENODEV);
1945
1946 if (p == NULL)
1947 return (EINVAL);
1948
1949 /* vn is NULL, client wants us to use p->p_textvp */
1950 if (vn == NULL) {
1951 if ((vn = p->p_textvp) == NULL)
1952 return (EINVAL);
1953 }
1954 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
1955 if (ncp->nc_nlen)
1956 break;
1957 }
1958 if (ncp == NULL)
1959 return (EINVAL);
1960
1961 numfullpathcalls--;
1962 return(cache_fullpath(p, ncp, retbuf, freebuf));
1963}