Modify struct vattr:
[dragonfly.git] / sys / kern / vfs_cache.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
36 *
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $
70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.85 2007/11/02 19:52:25 dillon Exp $
71 */
72
73#include <sys/param.h>
74#include <sys/systm.h>
75#include <sys/kernel.h>
76#include <sys/sysctl.h>
77#include <sys/mount.h>
78#include <sys/vnode.h>
79#include <sys/malloc.h>
80#include <sys/sysproto.h>
81#include <sys/proc.h>
82#include <sys/namei.h>
83#include <sys/nlookup.h>
84#include <sys/filedesc.h>
85#include <sys/fnv_hash.h>
86#include <sys/globaldata.h>
87#include <sys/kern_syscall.h>
88#include <sys/dirent.h>
89#include <ddb/ddb.h>
90
91#include <sys/sysref2.h>
92
93#define MAX_RECURSION_DEPTH 64
94
95/*
96 * Random lookups in the cache are accomplished with a hash table using
97 * a hash key of (nc_src_vp, name).
98 *
99 * Negative entries may exist and correspond to structures where nc_vp
100 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry
101 * corresponds to a whited-out directory entry (verses simply not finding the
102 * entry at all).
103 *
104 * Upon reaching the last segment of a path, if the reference is for DELETE,
105 * or NOCACHE is set (rewrite), and the name is located in the cache, it
106 * will be dropped.
107 */
108
109/*
110 * Structures associated with name cacheing.
111 */
112#define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
113#define MINNEG 1024
114
115MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
116
117static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */
118static struct namecache_list ncneglist; /* instead of vnode */
119
120/*
121 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
122 * to create the namecache infrastructure leading to a dangling vnode.
123 *
124 * 0 Only errors are reported
125 * 1 Successes are reported
126 * 2 Successes + the whole directory scan is reported
127 * 3 Force the directory scan code run as if the parent vnode did not
128 * have a namecache record, even if it does have one.
129 */
130static int ncvp_debug;
131SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, "");
132
133static u_long nchash; /* size of hash table */
134SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
135
136static u_long ncnegfactor = 16; /* ratio of negative entries */
137SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
138
139static int nclockwarn; /* warn on locked entries in ticks */
140SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, "");
141
142static u_long numneg; /* number of cache entries allocated */
143SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
144
145static u_long numcache; /* number of cache entries allocated */
146SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
147
148static u_long numunres; /* number of unresolved entries */
149SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, "");
150
151SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
152SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
153
154static int cache_resolve_mp(struct mount *mp);
155static void _cache_rehash(struct namecache *ncp);
156static void _cache_lock(struct namecache *ncp);
157static void _cache_setunresolved(struct namecache *ncp);
158
159/*
160 * The new name cache statistics
161 */
162SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
163#define STATNODE(mode, name, var) \
164 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
165STATNODE(CTLFLAG_RD, numneg, &numneg);
166STATNODE(CTLFLAG_RD, numcache, &numcache);
167static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
168static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
169static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
170static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
171static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
172static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
173static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
174static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
175static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
176static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
177
178struct nchstats nchstats[SMP_MAXCPU];
179/*
180 * Export VFS cache effectiveness statistics to user-land.
181 *
182 * The statistics are left for aggregation to user-land so
183 * neat things can be achieved, like observing per-CPU cache
184 * distribution.
185 */
186static int
187sysctl_nchstats(SYSCTL_HANDLER_ARGS)
188{
189 struct globaldata *gd;
190 int i, error;
191
192 error = 0;
193 for (i = 0; i < ncpus; ++i) {
194 gd = globaldata_find(i);
195 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
196 sizeof(struct nchstats))))
197 break;
198 }
199
200 return (error);
201}
202SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
203 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
204
205static void cache_zap(struct namecache *ncp);
206
207/*
208 * cache_hold() and cache_drop() prevent the premature deletion of a
209 * namecache entry but do not prevent operations (such as zapping) on
210 * that namecache entry.
211 *
212 * This routine may only be called from outside this source module if
213 * nc_refs is already at least 1.
214 *
215 * This is a rare case where callers are allowed to hold a spinlock,
216 * so we can't ourselves.
217 */
218static __inline
219struct namecache *
220_cache_hold(struct namecache *ncp)
221{
222 atomic_add_int(&ncp->nc_refs, 1);
223 return(ncp);
224}
225
226/*
227 * When dropping an entry, if only one ref remains and the entry has not
228 * been resolved, zap it. Since the one reference is being dropped the
229 * entry had better not be locked.
230 */
231static __inline
232void
233_cache_drop(struct namecache *ncp)
234{
235 KKASSERT(ncp->nc_refs > 0);
236 if (ncp->nc_refs == 1 &&
237 (ncp->nc_flag & NCF_UNRESOLVED) &&
238 TAILQ_EMPTY(&ncp->nc_list)
239 ) {
240 KKASSERT(ncp->nc_exlocks == 0);
241 _cache_lock(ncp);
242 cache_zap(ncp);
243 } else {
244 atomic_subtract_int(&ncp->nc_refs, 1);
245 }
246}
247
248/*
249 * Link a new namecache entry to its parent. Be careful to avoid races
250 * if vhold() blocks in the future.
251 */
252static void
253cache_link_parent(struct namecache *ncp, struct namecache *par)
254{
255 KKASSERT(ncp->nc_parent == NULL);
256 ncp->nc_parent = par;
257 if (TAILQ_EMPTY(&par->nc_list)) {
258 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
259 /*
260 * Any vp associated with an ncp which has children must
261 * be held to prevent it from being recycled.
262 */
263 if (par->nc_vp)
264 vhold(par->nc_vp);
265 } else {
266 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
267 }
268}
269
270/*
271 * Remove the parent association from a namecache structure. If this is
272 * the last child of the parent the cache_drop(par) will attempt to
273 * recursively zap the parent.
274 */
275static void
276cache_unlink_parent(struct namecache *ncp)
277{
278 struct namecache *par;
279
280 if ((par = ncp->nc_parent) != NULL) {
281 ncp->nc_parent = NULL;
282 par = _cache_hold(par);
283 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
284 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
285 vdrop(par->nc_vp);
286 _cache_drop(par);
287 }
288}
289
290/*
291 * Allocate a new namecache structure. Most of the code does not require
292 * zero-termination of the string but it makes vop_compat_ncreate() easier.
293 */
294static struct namecache *
295cache_alloc(int nlen)
296{
297 struct namecache *ncp;
298
299 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
300 if (nlen)
301 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK);
302 ncp->nc_nlen = nlen;
303 ncp->nc_flag = NCF_UNRESOLVED;
304 ncp->nc_error = ENOTCONN; /* needs to be resolved */
305 ncp->nc_refs = 1;
306
307 /*
308 * Construct a fake FSMID based on the time of day and a 32 bit
309 * roller for uniqueness. This is used to generate a useful
310 * FSMID for filesystems which do not support it.
311 */
312 ncp->nc_fsmid = cache_getnewfsmid();
313 TAILQ_INIT(&ncp->nc_list);
314 _cache_lock(ncp);
315 return(ncp);
316}
317
318static void
319_cache_free(struct namecache *ncp)
320{
321 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1);
322 if (ncp->nc_name)
323 kfree(ncp->nc_name, M_VFSCACHE);
324 kfree(ncp, M_VFSCACHE);
325}
326
327void
328cache_zero(struct nchandle *nch)
329{
330 nch->ncp = NULL;
331 nch->mount = NULL;
332}
333
334/*
335 * Ref and deref a namecache structure.
336 *
337 * Warning: caller may hold an unrelated read spinlock, which means we can't
338 * use read spinlocks here.
339 */
340struct nchandle *
341cache_hold(struct nchandle *nch)
342{
343 _cache_hold(nch->ncp);
344 ++nch->mount->mnt_refs;
345 return(nch);
346}
347
348void
349cache_copy(struct nchandle *nch, struct nchandle *target)
350{
351 *target = *nch;
352 _cache_hold(target->ncp);
353 ++nch->mount->mnt_refs;
354}
355
356void
357cache_changemount(struct nchandle *nch, struct mount *mp)
358{
359 --nch->mount->mnt_refs;
360 nch->mount = mp;
361 ++nch->mount->mnt_refs;
362}
363
364void
365cache_drop(struct nchandle *nch)
366{
367 --nch->mount->mnt_refs;
368 _cache_drop(nch->ncp);
369 nch->ncp = NULL;
370 nch->mount = NULL;
371}
372
373/*
374 * Namespace locking. The caller must already hold a reference to the
375 * namecache structure in order to lock/unlock it. This function prevents
376 * the namespace from being created or destroyed by accessors other then
377 * the lock holder.
378 *
379 * Note that holding a locked namecache structure prevents other threads
380 * from making namespace changes (e.g. deleting or creating), prevents
381 * vnode association state changes by other threads, and prevents the
382 * namecache entry from being resolved or unresolved by other threads.
383 *
384 * The lock owner has full authority to associate/disassociate vnodes
385 * and resolve/unresolve the locked ncp.
386 *
387 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
388 * or recycled, but it does NOT help you if the vnode had already initiated
389 * a recyclement. If this is important, use cache_get() rather then
390 * cache_lock() (and deal with the differences in the way the refs counter
391 * is handled). Or, alternatively, make an unconditional call to
392 * cache_validate() or cache_resolve() after cache_lock() returns.
393 */
394static
395void
396_cache_lock(struct namecache *ncp)
397{
398 thread_t td;
399 int didwarn;
400
401 KKASSERT(ncp->nc_refs != 0);
402 didwarn = 0;
403 td = curthread;
404
405 for (;;) {
406 if (ncp->nc_exlocks == 0) {
407 ncp->nc_exlocks = 1;
408 ncp->nc_locktd = td;
409 /*
410 * The vp associated with a locked ncp must be held
411 * to prevent it from being recycled (which would
412 * cause the ncp to become unresolved).
413 *
414 * WARNING! If VRECLAIMED is set the vnode could
415 * already be in the middle of a recycle. Callers
416 * should not assume that nc_vp is usable when
417 * not NULL. cache_vref() or cache_vget() must be
418 * called.
419 *
420 * XXX loop on race for later MPSAFE work.
421 */
422 if (ncp->nc_vp)
423 vhold(ncp->nc_vp);
424 break;
425 }
426 if (ncp->nc_locktd == td) {
427 ++ncp->nc_exlocks;
428 break;
429 }
430 ncp->nc_flag |= NCF_LOCKREQ;
431 if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) {
432 if (didwarn)
433 continue;
434 didwarn = 1;
435 kprintf("[diagnostic] cache_lock: blocked on %p", ncp);
436 kprintf(" \"%*.*s\"\n",
437 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
438 }
439 }
440
441 if (didwarn == 1) {
442 kprintf("[diagnostic] cache_lock: unblocked %*.*s\n",
443 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
444 }
445}
446
447void
448cache_lock(struct nchandle *nch)
449{
450 _cache_lock(nch->ncp);
451}
452
453static
454int
455_cache_lock_nonblock(struct namecache *ncp)
456{
457 thread_t td;
458
459 KKASSERT(ncp->nc_refs != 0);
460 td = curthread;
461 if (ncp->nc_exlocks == 0) {
462 ncp->nc_exlocks = 1;
463 ncp->nc_locktd = td;
464 /*
465 * The vp associated with a locked ncp must be held
466 * to prevent it from being recycled (which would
467 * cause the ncp to become unresolved).
468 *
469 * WARNING! If VRECLAIMED is set the vnode could
470 * already be in the middle of a recycle. Callers
471 * should not assume that nc_vp is usable when
472 * not NULL. cache_vref() or cache_vget() must be
473 * called.
474 *
475 * XXX loop on race for later MPSAFE work.
476 */
477 if (ncp->nc_vp)
478 vhold(ncp->nc_vp);
479 return(0);
480 } else {
481 return(EWOULDBLOCK);
482 }
483}
484
485int
486cache_lock_nonblock(struct nchandle *nch)
487{
488 return(_cache_lock_nonblock(nch->ncp));
489}
490
491static
492void
493_cache_unlock(struct namecache *ncp)
494{
495 thread_t td = curthread;
496
497 KKASSERT(ncp->nc_refs > 0);
498 KKASSERT(ncp->nc_exlocks > 0);
499 KKASSERT(ncp->nc_locktd == td);
500 if (--ncp->nc_exlocks == 0) {
501 if (ncp->nc_vp)
502 vdrop(ncp->nc_vp);
503 ncp->nc_locktd = NULL;
504 if (ncp->nc_flag & NCF_LOCKREQ) {
505 ncp->nc_flag &= ~NCF_LOCKREQ;
506 wakeup(ncp);
507 }
508 }
509}
510
511void
512cache_unlock(struct nchandle *nch)
513{
514 _cache_unlock(nch->ncp);
515}
516
517/*
518 * ref-and-lock, unlock-and-deref functions.
519 *
520 * This function is primarily used by nlookup. Even though cache_lock
521 * holds the vnode, it is possible that the vnode may have already
522 * initiated a recyclement. We want cache_get() to return a definitively
523 * usable vnode or a definitively unresolved ncp.
524 */
525static
526struct namecache *
527_cache_get(struct namecache *ncp)
528{
529 _cache_hold(ncp);
530 _cache_lock(ncp);
531 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
532 _cache_setunresolved(ncp);
533 return(ncp);
534}
535
536/*
537 * note: the same nchandle can be passed for both arguments.
538 */
539void
540cache_get(struct nchandle *nch, struct nchandle *target)
541{
542 target->mount = nch->mount;
543 target->ncp = _cache_get(nch->ncp);
544 ++target->mount->mnt_refs;
545}
546
547static int
548_cache_get_nonblock(struct namecache *ncp)
549{
550 /* XXX MP */
551 if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) {
552 _cache_hold(ncp);
553 _cache_lock(ncp);
554 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
555 _cache_setunresolved(ncp);
556 return(0);
557 }
558 return(EWOULDBLOCK);
559}
560
561int
562cache_get_nonblock(struct nchandle *nch)
563{
564 return(_cache_get_nonblock(nch->ncp));
565}
566
567static __inline
568void
569_cache_put(struct namecache *ncp)
570{
571 _cache_unlock(ncp);
572 _cache_drop(ncp);
573}
574
575void
576cache_put(struct nchandle *nch)
577{
578 --nch->mount->mnt_refs;
579 _cache_put(nch->ncp);
580 nch->ncp = NULL;
581 nch->mount = NULL;
582}
583
584/*
585 * Resolve an unresolved ncp by associating a vnode with it. If the
586 * vnode is NULL, a negative cache entry is created.
587 *
588 * The ncp should be locked on entry and will remain locked on return.
589 */
590static
591void
592_cache_setvp(struct namecache *ncp, struct vnode *vp)
593{
594 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
595 ncp->nc_vp = vp;
596 if (vp != NULL) {
597 /*
598 * Any vp associated with an ncp which has children must
599 * be held. Any vp associated with a locked ncp must be held.
600 */
601 if (!TAILQ_EMPTY(&ncp->nc_list))
602 vhold(vp);
603 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
604 if (ncp->nc_exlocks)
605 vhold(vp);
606
607 /*
608 * Set auxiliary flags
609 */
610 switch(vp->v_type) {
611 case VDIR:
612 ncp->nc_flag |= NCF_ISDIR;
613 break;
614 case VLNK:
615 ncp->nc_flag |= NCF_ISSYMLINK;
616 /* XXX cache the contents of the symlink */
617 break;
618 default:
619 break;
620 }
621 ++numcache;
622 ncp->nc_error = 0;
623 } else {
624 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
625 ++numneg;
626 ncp->nc_error = ENOENT;
627 }
628 ncp->nc_flag &= ~NCF_UNRESOLVED;
629}
630
631void
632cache_setvp(struct nchandle *nch, struct vnode *vp)
633{
634 _cache_setvp(nch->ncp, vp);
635}
636
637void
638cache_settimeout(struct nchandle *nch, int nticks)
639{
640 struct namecache *ncp = nch->ncp;
641
642 if ((ncp->nc_timeout = ticks + nticks) == 0)
643 ncp->nc_timeout = 1;
644}
645
646/*
647 * Disassociate the vnode or negative-cache association and mark a
648 * namecache entry as unresolved again. Note that the ncp is still
649 * left in the hash table and still linked to its parent.
650 *
651 * The ncp should be locked and refd on entry and will remain locked and refd
652 * on return.
653 *
654 * This routine is normally never called on a directory containing children.
655 * However, NFS often does just that in its rename() code as a cop-out to
656 * avoid complex namespace operations. This disconnects a directory vnode
657 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
658 * sync.
659 *
660 * NOTE: NCF_FSMID must be cleared so a refurbishment of the ncp, such as
661 * in a create, properly propogates flag up the chain.
662 */
663static
664void
665_cache_setunresolved(struct namecache *ncp)
666{
667 struct vnode *vp;
668
669 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
670 ncp->nc_flag |= NCF_UNRESOLVED;
671 ncp->nc_timeout = 0;
672 ncp->nc_error = ENOTCONN;
673 ++numunres;
674 if ((vp = ncp->nc_vp) != NULL) {
675 --numcache;
676 ncp->nc_vp = NULL;
677 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
678
679 /*
680 * Any vp associated with an ncp with children is
681 * held by that ncp. Any vp associated with a locked
682 * ncp is held by that ncp. These conditions must be
683 * undone when the vp is cleared out from the ncp.
684 */
685 if (ncp->nc_flag & NCF_FSMID)
686 vupdatefsmid(vp);
687 if (!TAILQ_EMPTY(&ncp->nc_list))
688 vdrop(vp);
689 if (ncp->nc_exlocks)
690 vdrop(vp);
691 } else {
692 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
693 --numneg;
694 }
695 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK|
696 NCF_FSMID);
697 }
698}
699
700void
701cache_setunresolved(struct nchandle *nch)
702{
703 _cache_setunresolved(nch->ncp);
704}
705
706/*
707 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
708 * looking for matches. This flag tells the lookup code when it must
709 * check for a mount linkage and also prevents the directories in question
710 * from being deleted or renamed.
711 */
712static
713int
714cache_clrmountpt_callback(struct mount *mp, void *data)
715{
716 struct nchandle *nch = data;
717
718 if (mp->mnt_ncmounton.ncp == nch->ncp)
719 return(1);
720 if (mp->mnt_ncmountpt.ncp == nch->ncp)
721 return(1);
722 return(0);
723}
724
725void
726cache_clrmountpt(struct nchandle *nch)
727{
728 int count;
729
730 count = mountlist_scan(cache_clrmountpt_callback, nch,
731 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
732 if (count == 0)
733 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT;
734}
735
736/*
737 * Invalidate portions of the namecache topology given a starting entry.
738 * The passed ncp is set to an unresolved state and:
739 *
740 * The passed ncp must be locked.
741 *
742 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
743 * that the physical underlying nodes have been
744 * destroyed... as in deleted. For example, when
745 * a directory is removed. This will cause record
746 * lookups on the name to no longer be able to find
747 * the record and tells the resolver to return failure
748 * rather then trying to resolve through the parent.
749 *
750 * The topology itself, including ncp->nc_name,
751 * remains intact.
752 *
753 * This only applies to the passed ncp, if CINV_CHILDREN
754 * is specified the children are not flagged.
755 *
756 * CINV_CHILDREN - Set all children (recursively) to an unresolved
757 * state as well.
758 *
759 * Note that this will also have the side effect of
760 * cleaning out any unreferenced nodes in the topology
761 * from the leaves up as the recursion backs out.
762 *
763 * Note that the topology for any referenced nodes remains intact.
764 *
765 * It is possible for cache_inval() to race a cache_resolve(), meaning that
766 * the namecache entry may not actually be invalidated on return if it was
767 * revalidated while recursing down into its children. This code guarentees
768 * that the node(s) will go through an invalidation cycle, but does not
769 * guarentee that they will remain in an invalidated state.
770 *
771 * Returns non-zero if a revalidation was detected during the invalidation
772 * recursion, zero otherwise. Note that since only the original ncp is
773 * locked the revalidation ultimately can only indicate that the original ncp
774 * *MIGHT* no have been reresolved.
775 *
776 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
777 * have to avoid blowing out the kernel stack. We do this by saving the
778 * deep namecache node and aborting the recursion, then re-recursing at that
779 * node using a depth-first algorithm in order to allow multiple deep
780 * recursions to chain through each other, then we restart the invalidation
781 * from scratch.
782 */
783
784struct cinvtrack {
785 struct namecache *resume_ncp;
786 int depth;
787};
788
789static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *);
790
791static
792int
793_cache_inval(struct namecache *ncp, int flags)
794{
795 struct cinvtrack track;
796 struct namecache *ncp2;
797 int r;
798
799 track.depth = 0;
800 track.resume_ncp = NULL;
801
802 for (;;) {
803 r = _cache_inval_internal(ncp, flags, &track);
804 if (track.resume_ncp == NULL)
805 break;
806 kprintf("Warning: deep namecache recursion at %s\n",
807 ncp->nc_name);
808 _cache_unlock(ncp);
809 while ((ncp2 = track.resume_ncp) != NULL) {
810 track.resume_ncp = NULL;
811 _cache_lock(ncp2);
812 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY,
813 &track);
814 _cache_put(ncp2);
815 }
816 _cache_lock(ncp);
817 }
818 return(r);
819}
820
821int
822cache_inval(struct nchandle *nch, int flags)
823{
824 return(_cache_inval(nch->ncp, flags));
825}
826
827static int
828_cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track)
829{
830 struct namecache *kid;
831 struct namecache *nextkid;
832 int rcnt = 0;
833
834 KKASSERT(ncp->nc_exlocks);
835
836 _cache_setunresolved(ncp);
837 if (flags & CINV_DESTROY)
838 ncp->nc_flag |= NCF_DESTROYED;
839
840 if ((flags & CINV_CHILDREN) &&
841 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL
842 ) {
843 if (++track->depth > MAX_RECURSION_DEPTH) {
844 track->resume_ncp = ncp;
845 _cache_hold(ncp);
846 ++rcnt;
847 }
848 _cache_hold(kid);
849 _cache_unlock(ncp);
850 while (kid) {
851 if (track->resume_ncp) {
852 _cache_drop(kid);
853 break;
854 }
855 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
856 _cache_hold(nextkid);
857 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
858 TAILQ_FIRST(&kid->nc_list)
859 ) {
860 _cache_lock(kid);
861 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track);
862 _cache_unlock(kid);
863 }
864 _cache_drop(kid);
865 kid = nextkid;
866 }
867 --track->depth;
868 _cache_lock(ncp);
869 }
870
871 /*
872 * Someone could have gotten in there while ncp was unlocked,
873 * retry if so.
874 */
875 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
876 ++rcnt;
877 return (rcnt);
878}
879
880/*
881 * Invalidate a vnode's namecache associations. To avoid races against
882 * the resolver we do not invalidate a node which we previously invalidated
883 * but which was then re-resolved while we were in the invalidation loop.
884 *
885 * Returns non-zero if any namecache entries remain after the invalidation
886 * loop completed.
887 *
888 * NOTE: unlike the namecache topology which guarentees that ncp's will not
889 * be ripped out of the topology while held, the vnode's v_namecache list
890 * has no such restriction. NCP's can be ripped out of the list at virtually
891 * any time if not locked, even if held.
892 */
893int
894cache_inval_vp(struct vnode *vp, int flags)
895{
896 struct namecache *ncp;
897 struct namecache *next;
898
899restart:
900 ncp = TAILQ_FIRST(&vp->v_namecache);
901 if (ncp)
902 _cache_hold(ncp);
903 while (ncp) {
904 /* loop entered with ncp held */
905 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
906 _cache_hold(next);
907 _cache_lock(ncp);
908 if (ncp->nc_vp != vp) {
909 kprintf("Warning: cache_inval_vp: race-A detected on "
910 "%s\n", ncp->nc_name);
911 _cache_put(ncp);
912 if (next)
913 _cache_drop(next);
914 goto restart;
915 }
916 _cache_inval(ncp, flags);
917 _cache_put(ncp); /* also releases reference */
918 ncp = next;
919 if (ncp && ncp->nc_vp != vp) {
920 kprintf("Warning: cache_inval_vp: race-B detected on "
921 "%s\n", ncp->nc_name);
922 _cache_drop(ncp);
923 goto restart;
924 }
925 }
926 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
927}
928
929/*
930 * This routine is used instead of the normal cache_inval_vp() when we
931 * are trying to recycle otherwise good vnodes.
932 *
933 * Return 0 on success, non-zero if not all namecache records could be
934 * disassociated from the vnode (for various reasons).
935 */
936int
937cache_inval_vp_nonblock(struct vnode *vp)
938{
939 struct namecache *ncp;
940 struct namecache *next;
941
942 ncp = TAILQ_FIRST(&vp->v_namecache);
943 if (ncp)
944 _cache_hold(ncp);
945 while (ncp) {
946 /* loop entered with ncp held */
947 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
948 _cache_hold(next);
949 if (_cache_lock_nonblock(ncp)) {
950 _cache_drop(ncp);
951 if (next)
952 _cache_drop(next);
953 break;
954 }
955 if (ncp->nc_vp != vp) {
956 kprintf("Warning: cache_inval_vp: race-A detected on "
957 "%s\n", ncp->nc_name);
958 _cache_put(ncp);
959 if (next)
960 _cache_drop(next);
961 break;
962 }
963 _cache_inval(ncp, 0);
964 _cache_put(ncp); /* also releases reference */
965 ncp = next;
966 if (ncp && ncp->nc_vp != vp) {
967 kprintf("Warning: cache_inval_vp: race-B detected on "
968 "%s\n", ncp->nc_name);
969 _cache_drop(ncp);
970 break;
971 }
972 }
973 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
974}
975
976/*
977 * The source ncp has been renamed to the target ncp. Both fncp and tncp
978 * must be locked. Both will be set to unresolved, any children of tncp
979 * will be disconnected (the prior contents of the target is assumed to be
980 * destroyed by the rename operation, e.g. renaming over an empty directory),
981 * and all children of fncp will be moved to tncp.
982 *
983 * XXX the disconnection could pose a problem, check code paths to make
984 * sure any code that blocks can handle the parent being changed out from
985 * under it. Maybe we should lock the children (watch out for deadlocks) ?
986 *
987 * After we return the caller has the option of calling cache_setvp() if
988 * the vnode of the new target ncp is known.
989 *
990 * Any process CD'd into any of the children will no longer be able to ".."
991 * back out. An rm -rf can cause this situation to occur.
992 */
993void
994cache_rename(struct nchandle *fnch, struct nchandle *tnch)
995{
996 struct namecache *fncp = fnch->ncp;
997 struct namecache *tncp = tnch->ncp;
998 struct namecache *scan;
999 int didwarn = 0;
1000
1001 _cache_setunresolved(fncp);
1002 _cache_setunresolved(tncp);
1003 while (_cache_inval(tncp, CINV_CHILDREN) != 0) {
1004 if (didwarn++ % 10 == 0) {
1005 kprintf("Warning: cache_rename: race during "
1006 "rename %s->%s\n",
1007 fncp->nc_name, tncp->nc_name);
1008 }
1009 tsleep(tncp, 0, "mvrace", hz / 10);
1010 _cache_setunresolved(tncp);
1011 }
1012 while ((scan = TAILQ_FIRST(&fncp->nc_list)) != NULL) {
1013 _cache_hold(scan);
1014 cache_unlink_parent(scan);
1015 cache_link_parent(scan, tncp);
1016 if (scan->nc_flag & NCF_HASHED)
1017 _cache_rehash(scan);
1018 _cache_drop(scan);
1019 }
1020}
1021
1022/*
1023 * vget the vnode associated with the namecache entry. Resolve the namecache
1024 * entry if necessary and deal with namecache/vp races. The passed ncp must
1025 * be referenced and may be locked. The ncp's ref/locking state is not
1026 * effected by this call.
1027 *
1028 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
1029 * (depending on the passed lk_type) will be returned in *vpp with an error
1030 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
1031 * most typical error is ENOENT, meaning that the ncp represents a negative
1032 * cache hit and there is no vnode to retrieve, but other errors can occur
1033 * too.
1034 *
1035 * The main race we have to deal with are namecache zaps. The ncp itself
1036 * will not disappear since it is referenced, and it turns out that the
1037 * validity of the vp pointer can be checked simply by rechecking the
1038 * contents of ncp->nc_vp.
1039 */
1040int
1041cache_vget(struct nchandle *nch, struct ucred *cred,
1042 int lk_type, struct vnode **vpp)
1043{
1044 struct namecache *ncp;
1045 struct vnode *vp;
1046 int error;
1047
1048 ncp = nch->ncp;
1049again:
1050 vp = NULL;
1051 if (ncp->nc_flag & NCF_UNRESOLVED) {
1052 _cache_lock(ncp);
1053 error = cache_resolve(nch, cred);
1054 _cache_unlock(ncp);
1055 } else {
1056 error = 0;
1057 }
1058 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
1059 /*
1060 * Accessing the vnode from the namecache is a bit
1061 * dangerous. Because there are no refs on the vnode, it
1062 * could be in the middle of a reclaim.
1063 */
1064 if (vp->v_flag & VRECLAIMED) {
1065 kprintf("Warning: vnode reclaim race detected in cache_vget on %p (%s)\n", vp, ncp->nc_name);
1066 _cache_lock(ncp);
1067 _cache_setunresolved(ncp);
1068 _cache_unlock(ncp);
1069 goto again;
1070 }
1071 error = vget(vp, lk_type);
1072 if (error) {
1073 if (vp != ncp->nc_vp)
1074 goto again;
1075 vp = NULL;
1076 } else if (vp != ncp->nc_vp) {
1077 vput(vp);
1078 goto again;
1079 } else if (vp->v_flag & VRECLAIMED) {
1080 panic("vget succeeded on a VRECLAIMED node! vp %p", vp);
1081 }
1082 }
1083 if (error == 0 && vp == NULL)
1084 error = ENOENT;
1085 *vpp = vp;
1086 return(error);
1087}
1088
1089int
1090cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp)
1091{
1092 struct namecache *ncp;
1093 struct vnode *vp;
1094 int error;
1095
1096 ncp = nch->ncp;
1097
1098again:
1099 vp = NULL;
1100 if (ncp->nc_flag & NCF_UNRESOLVED) {
1101 _cache_lock(ncp);
1102 error = cache_resolve(nch, cred);
1103 _cache_unlock(ncp);
1104 } else {
1105 error = 0;
1106 }
1107 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
1108 /*
1109 * Since we did not obtain any locks, a cache zap
1110 * race can occur here if the vnode is in the middle
1111 * of being reclaimed and has not yet been able to
1112 * clean out its cache node. If that case occurs,
1113 * we must lock and unresolve the cache, then loop
1114 * to retry.
1115 */
1116 if ((error = vget(vp, LK_SHARED)) != 0) {
1117 if (error == ENOENT) {
1118 kprintf("Warning: vnode reclaim race detected on cache_vref %p (%s)\n", vp, ncp->nc_name);
1119 _cache_lock(ncp);
1120 _cache_setunresolved(ncp);
1121 _cache_unlock(ncp);
1122 goto again;
1123 }
1124 /* fatal error */
1125 } else {
1126 /* caller does not want a lock */
1127 vn_unlock(vp);
1128 }
1129 }
1130 if (error == 0 && vp == NULL)
1131 error = ENOENT;
1132 *vpp = vp;
1133 return(error);
1134}
1135
1136/*
1137 * Recursively set the FSMID update flag for namecache nodes leading
1138 * to root. This will cause the next getattr or reclaim to increment the
1139 * fsmid and mark the inode for lazy updating.
1140 *
1141 * Stop recursing when we hit a node whos NCF_FSMID flag is already set.
1142 * This makes FSMIDs work in an Einsteinian fashion - where the observation
1143 * effects the result. In this case a program monitoring a higher level
1144 * node will have detected some prior change and started its scan (clearing
1145 * NCF_FSMID in higher level nodes), but since it has not yet observed the
1146 * node where we find NCF_FSMID still set, we can safely make the related
1147 * modification without interfering with the theorized program.
1148 *
1149 * This also means that FSMIDs cannot represent time-domain quantities
1150 * in a hierarchical sense. But the main reason for doing it this way
1151 * is to reduce the amount of recursion that occurs in the critical path
1152 * when e.g. a program is writing to a file that sits deep in a directory
1153 * hierarchy.
1154 */
1155void
1156cache_update_fsmid(struct nchandle *nch)
1157{
1158 struct namecache *ncp;
1159 struct namecache *scan;
1160 struct vnode *vp;
1161
1162 ncp = nch->ncp;
1163
1164 /*
1165 * Warning: even if we get a non-NULL vp it could still be in the
1166 * middle of a recyclement. Don't do anything fancy, just set
1167 * NCF_FSMID.
1168 */
1169 if ((vp = ncp->nc_vp) != NULL) {
1170 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
1171 for (scan = ncp; scan; scan = scan->nc_parent) {
1172 if (scan->nc_flag & NCF_FSMID)
1173 break;
1174 scan->nc_flag |= NCF_FSMID;
1175 }
1176 }
1177 } else {
1178 while (ncp && (ncp->nc_flag & NCF_FSMID) == 0) {
1179 ncp->nc_flag |= NCF_FSMID;
1180 ncp = ncp->nc_parent;
1181 }
1182 }
1183}
1184
1185void
1186cache_update_fsmid_vp(struct vnode *vp)
1187{
1188 struct namecache *ncp;
1189 struct namecache *scan;
1190
1191 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
1192 for (scan = ncp; scan; scan = scan->nc_parent) {
1193 if (scan->nc_flag & NCF_FSMID)
1194 break;
1195 scan->nc_flag |= NCF_FSMID;
1196 }
1197 }
1198}
1199
1200/*
1201 * If getattr is called on a vnode (e.g. a stat call), the filesystem
1202 * may call this routine to determine if the namecache has the hierarchical
1203 * change flag set, requiring the fsmid to be updated.
1204 *
1205 * Since 0 indicates no support, make sure the filesystem fsmid is at least
1206 * 1.
1207 */
1208int
1209cache_check_fsmid_vp(struct vnode *vp, int64_t *fsmid)
1210{
1211 struct namecache *ncp;
1212 int changed = 0;
1213
1214 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
1215 if (ncp->nc_flag & NCF_FSMID) {
1216 ncp->nc_flag &= ~NCF_FSMID;
1217 changed = 1;
1218 }
1219 }
1220 if (*fsmid == 0)
1221 ++*fsmid;
1222 if (changed)
1223 ++*fsmid;
1224 return(changed);
1225}
1226
1227/*
1228 * Obtain the FSMID for a vnode for filesystems which do not support
1229 * a built-in FSMID.
1230 */
1231int64_t
1232cache_sync_fsmid_vp(struct vnode *vp)
1233{
1234 struct namecache *ncp;
1235
1236 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) {
1237 if (ncp->nc_flag & NCF_FSMID) {
1238 ncp->nc_flag &= ~NCF_FSMID;
1239 ++ncp->nc_fsmid;
1240 }
1241 return(ncp->nc_fsmid);
1242 }
1243 return(VNOVAL);
1244}
1245
1246/*
1247 * Convert a directory vnode to a namecache record without any other
1248 * knowledge of the topology. This ONLY works with directory vnodes and
1249 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
1250 * returned ncp (if not NULL) will be held and unlocked.
1251 *
1252 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
1253 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
1254 * for dvp. This will fail only if the directory has been deleted out from
1255 * under the caller.
1256 *
1257 * Callers must always check for a NULL return no matter the value of 'makeit'.
1258 *
1259 * To avoid underflowing the kernel stack each recursive call increments
1260 * the makeit variable.
1261 */
1262
1263static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
1264 struct vnode *dvp);
1265static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1266 struct vnode **saved_dvp);
1267
1268int
1269cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit,
1270 struct nchandle *nch)
1271{
1272 struct vnode *saved_dvp;
1273 struct vnode *pvp;
1274 int error;
1275
1276 nch->ncp = NULL;
1277 nch->mount = dvp->v_mount;
1278 saved_dvp = NULL;
1279
1280 /*
1281 * Temporary debugging code to force the directory scanning code
1282 * to be exercised.
1283 */
1284 if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) {
1285 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
1286 kprintf("cache_fromdvp: forcing %s\n", nch->ncp->nc_name);
1287 goto force;
1288 }
1289
1290 /*
1291 * Loop until resolution, inside code will break out on error.
1292 */
1293 while ((nch->ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) {
1294force:
1295 /*
1296 * If dvp is the root of its filesystem it should already
1297 * have a namecache pointer associated with it as a side
1298 * effect of the mount, but it may have been disassociated.
1299 */
1300 if (dvp->v_flag & VROOT) {
1301 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp);
1302 error = cache_resolve_mp(nch->mount);
1303 _cache_put(nch->ncp);
1304 if (ncvp_debug) {
1305 kprintf("cache_fromdvp: resolve root of mount %p error %d",
1306 dvp->v_mount, error);
1307 }
1308 if (error) {
1309 if (ncvp_debug)
1310 kprintf(" failed\n");
1311 nch->ncp = NULL;
1312 break;
1313 }
1314 if (ncvp_debug)
1315 kprintf(" succeeded\n");
1316 continue;
1317 }
1318
1319 /*
1320 * If we are recursed too deeply resort to an O(n^2)
1321 * algorithm to resolve the namecache topology. The
1322 * resolved pvp is left referenced in saved_dvp to
1323 * prevent the tree from being destroyed while we loop.
1324 */
1325 if (makeit > 20) {
1326 error = cache_fromdvp_try(dvp, cred, &saved_dvp);
1327 if (error) {
1328 kprintf("lookupdotdot(longpath) failed %d "
1329 "dvp %p\n", error, dvp);
1330 break;
1331 }
1332 continue;
1333 }
1334
1335 /*
1336 * Get the parent directory and resolve its ncp.
1337 */
1338 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred);
1339 if (error) {
1340 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp);
1341 break;
1342 }
1343 vn_unlock(pvp);
1344
1345 /*
1346 * Reuse makeit as a recursion depth counter.
1347 */
1348 cache_fromdvp(pvp, cred, makeit + 1, nch);
1349 vrele(pvp);
1350 if (nch->ncp == NULL)
1351 break;
1352
1353 /*
1354 * Do an inefficient scan of pvp (embodied by ncp) to look
1355 * for dvp. This will create a namecache record for dvp on
1356 * success. We loop up to recheck on success.
1357 *
1358 * ncp and dvp are both held but not locked.
1359 */
1360 error = cache_inefficient_scan(nch, cred, dvp);
1361 _cache_drop(nch->ncp);
1362 if (error) {
1363 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
1364 pvp, nch->ncp->nc_name, dvp);
1365 nch->ncp = NULL;
1366 break;
1367 }
1368 if (ncvp_debug) {
1369 kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
1370 pvp, nch->ncp->nc_name);
1371 }
1372 }
1373
1374 /*
1375 * hold it for real so the mount gets a ref
1376 */
1377 if (nch->ncp)
1378 cache_hold(nch);
1379 if (saved_dvp)
1380 vrele(saved_dvp);
1381 if (nch->ncp)
1382 return (0);
1383 return (EINVAL);
1384}
1385
1386/*
1387 * Go up the chain of parent directories until we find something
1388 * we can resolve into the namecache. This is very inefficient.
1389 */
1390static
1391int
1392cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1393 struct vnode **saved_dvp)
1394{
1395 struct nchandle nch;
1396 struct vnode *pvp;
1397 int error;
1398 static time_t last_fromdvp_report;
1399
1400 /*
1401 * Loop getting the parent directory vnode until we get something we
1402 * can resolve in the namecache.
1403 */
1404 vref(dvp);
1405 nch.mount = dvp->v_mount;
1406
1407 for (;;) {
1408 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred);
1409 if (error) {
1410 vrele(dvp);
1411 return (error);
1412 }
1413 vn_unlock(pvp);
1414 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
1415 _cache_hold(nch.ncp);
1416 vrele(pvp);
1417 break;
1418 }
1419 if (pvp->v_flag & VROOT) {
1420 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp);
1421 error = cache_resolve_mp(nch.mount);
1422 _cache_unlock(nch.ncp);
1423 vrele(pvp);
1424 if (error) {
1425 _cache_drop(nch.ncp);
1426 vrele(dvp);
1427 return (error);
1428 }
1429 break;
1430 }
1431 vrele(dvp);
1432 dvp = pvp;
1433 }
1434 if (last_fromdvp_report != time_second) {
1435 last_fromdvp_report = time_second;
1436 kprintf("Warning: extremely inefficient path resolution on %s\n",
1437 nch.ncp->nc_name);
1438 }
1439 error = cache_inefficient_scan(&nch, cred, dvp);
1440
1441 /*
1442 * Hopefully dvp now has a namecache record associated with it.
1443 * Leave it referenced to prevent the kernel from recycling the
1444 * vnode. Otherwise extremely long directory paths could result
1445 * in endless recycling.
1446 */
1447 if (*saved_dvp)
1448 vrele(*saved_dvp);
1449 *saved_dvp = dvp;
1450 return (error);
1451}
1452
1453
1454/*
1455 * Do an inefficient scan of the directory represented by ncp looking for
1456 * the directory vnode dvp. ncp must be held but not locked on entry and
1457 * will be held on return. dvp must be refd but not locked on entry and
1458 * will remain refd on return.
1459 *
1460 * Why do this at all? Well, due to its stateless nature the NFS server
1461 * converts file handles directly to vnodes without necessarily going through
1462 * the namecache ops that would otherwise create the namecache topology
1463 * leading to the vnode. We could either (1) Change the namecache algorithms
1464 * to allow disconnect namecache records that are re-merged opportunistically,
1465 * or (2) Make the NFS server backtrack and scan to recover a connected
1466 * namecache topology in order to then be able to issue new API lookups.
1467 *
1468 * It turns out that (1) is a huge mess. It takes a nice clean set of
1469 * namecache algorithms and introduces a lot of complication in every subsystem
1470 * that calls into the namecache to deal with the re-merge case, especially
1471 * since we are using the namecache to placehold negative lookups and the
1472 * vnode might not be immediately assigned. (2) is certainly far less
1473 * efficient then (1), but since we are only talking about directories here
1474 * (which are likely to remain cached), the case does not actually run all
1475 * that often and has the supreme advantage of not polluting the namecache
1476 * algorithms.
1477 */
1478static int
1479cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
1480 struct vnode *dvp)
1481{
1482 struct nlcomponent nlc;
1483 struct nchandle rncp;
1484 struct dirent *den;
1485 struct vnode *pvp;
1486 struct vattr vat;
1487 struct iovec iov;
1488 struct uio uio;
1489 int blksize;
1490 int eofflag;
1491 int bytes;
1492 char *rbuf;
1493 int error;
1494
1495 vat.va_blocksize = 0;
1496 if ((error = VOP_GETATTR(dvp, &vat)) != 0)
1497 return (error);
1498 if ((error = cache_vref(nch, cred, &pvp)) != 0)
1499 return (error);
1500 if (ncvp_debug)
1501 kprintf("inefficient_scan: directory iosize %ld vattr fileid = %lld\n", vat.va_blocksize, vat.va_fileid);
1502 if ((blksize = vat.va_blocksize) == 0)
1503 blksize = DEV_BSIZE;
1504 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK);
1505 rncp.ncp = NULL;
1506
1507 eofflag = 0;
1508 uio.uio_offset = 0;
1509again:
1510 iov.iov_base = rbuf;
1511 iov.iov_len = blksize;
1512 uio.uio_iov = &iov;
1513 uio.uio_iovcnt = 1;
1514 uio.uio_resid = blksize;
1515 uio.uio_segflg = UIO_SYSSPACE;
1516 uio.uio_rw = UIO_READ;
1517 uio.uio_td = curthread;
1518
1519 if (ncvp_debug >= 2)
1520 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
1521 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
1522 if (error == 0) {
1523 den = (struct dirent *)rbuf;
1524 bytes = blksize - uio.uio_resid;
1525
1526 while (bytes > 0) {
1527 if (ncvp_debug >= 2) {
1528 kprintf("cache_inefficient_scan: %*.*s\n",
1529 den->d_namlen, den->d_namlen,
1530 den->d_name);
1531 }
1532 if (den->d_type != DT_WHT &&
1533 den->d_ino == vat.va_fileid) {
1534 if (ncvp_debug) {
1535 kprintf("cache_inefficient_scan: "
1536 "MATCHED inode %lld path %s/%*.*s\n",
1537 vat.va_fileid, nch->ncp->nc_name,
1538 den->d_namlen, den->d_namlen,
1539 den->d_name);
1540 }
1541 nlc.nlc_nameptr = den->d_name;
1542 nlc.nlc_namelen = den->d_namlen;
1543 rncp = cache_nlookup(nch, &nlc);
1544 KKASSERT(rncp.ncp != NULL);
1545 break;
1546 }
1547 bytes -= _DIRENT_DIRSIZ(den);
1548 den = _DIRENT_NEXT(den);
1549 }
1550 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
1551 goto again;
1552 }
1553 vrele(pvp);
1554 if (rncp.ncp) {
1555 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) {
1556 _cache_setvp(rncp.ncp, dvp);
1557 if (ncvp_debug >= 2) {
1558 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
1559 nch->ncp->nc_name, rncp.ncp->nc_name, dvp);
1560 }
1561 } else {
1562 if (ncvp_debug >= 2) {
1563 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
1564 nch->ncp->nc_name, rncp.ncp->nc_name, dvp,
1565 rncp.ncp->nc_vp);
1566 }
1567 }
1568 if (rncp.ncp->nc_vp == NULL)
1569 error = rncp.ncp->nc_error;
1570 _cache_put(rncp.ncp);
1571 } else {
1572 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
1573 dvp, nch->ncp->nc_name);
1574 error = ENOENT;
1575 }
1576 kfree(rbuf, M_TEMP);
1577 return (error);
1578}
1579
1580/*
1581 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
1582 * state, which disassociates it from its vnode or ncneglist.
1583 *
1584 * Then, if there are no additional references to the ncp and no children,
1585 * the ncp is removed from the topology and destroyed. This function will
1586 * also run through the nc_parent chain and destroy parent ncps if possible.
1587 * As a side benefit, it turns out the only conditions that allow running
1588 * up the chain are also the conditions to ensure no deadlock will occur.
1589 *
1590 * References and/or children may exist if the ncp is in the middle of the
1591 * topology, preventing the ncp from being destroyed.
1592 *
1593 * This function must be called with the ncp held and locked and will unlock
1594 * and drop it during zapping.
1595 */
1596static void
1597cache_zap(struct namecache *ncp)
1598{
1599 struct namecache *par;
1600
1601 /*
1602 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
1603 */
1604 _cache_setunresolved(ncp);
1605
1606 /*
1607 * Try to scrap the entry and possibly tail-recurse on its parent.
1608 * We only scrap unref'd (other then our ref) unresolved entries,
1609 * we do not scrap 'live' entries.
1610 */
1611 while (ncp->nc_flag & NCF_UNRESOLVED) {
1612 /*
1613 * Someone other then us has a ref, stop.
1614 */
1615 if (ncp->nc_refs > 1)
1616 goto done;
1617
1618 /*
1619 * We have children, stop.
1620 */
1621 if (!TAILQ_EMPTY(&ncp->nc_list))
1622 goto done;
1623
1624 /*
1625 * Remove ncp from the topology: hash table and parent linkage.
1626 */
1627 if (ncp->nc_flag & NCF_HASHED) {
1628 ncp->nc_flag &= ~NCF_HASHED;
1629 LIST_REMOVE(ncp, nc_hash);
1630 }
1631 if ((par = ncp->nc_parent) != NULL) {
1632 par = _cache_hold(par);
1633 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
1634 ncp->nc_parent = NULL;
1635 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
1636 vdrop(par->nc_vp);
1637 }
1638
1639 /*
1640 * ncp should not have picked up any refs. Physically
1641 * destroy the ncp.
1642 */
1643 KKASSERT(ncp->nc_refs == 1);
1644 --numunres;
1645 /* _cache_unlock(ncp) not required */
1646 ncp->nc_refs = -1; /* safety */
1647 if (ncp->nc_name)
1648 kfree(ncp->nc_name, M_VFSCACHE);
1649 kfree(ncp, M_VFSCACHE);
1650
1651 /*
1652 * Loop on the parent (it may be NULL). Only bother looping
1653 * if the parent has a single ref (ours), which also means
1654 * we can lock it trivially.
1655 */
1656 ncp = par;
1657 if (ncp == NULL)
1658 return;
1659 if (ncp->nc_refs != 1) {
1660 _cache_drop(ncp);
1661 return;
1662 }
1663 KKASSERT(par->nc_exlocks == 0);
1664 _cache_lock(ncp);
1665 }
1666done:
1667 _cache_unlock(ncp);
1668 atomic_subtract_int(&ncp->nc_refs, 1);
1669}
1670
1671static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW;
1672
1673static __inline
1674void
1675cache_hysteresis(void)
1676{
1677 /*
1678 * Don't cache too many negative hits. We use hysteresis to reduce
1679 * the impact on the critical path.
1680 */
1681 switch(cache_hysteresis_state) {
1682 case CHI_LOW:
1683 if (numneg > MINNEG && numneg * ncnegfactor > numcache) {
1684 cache_cleanneg(10);
1685 cache_hysteresis_state = CHI_HIGH;
1686 }
1687 break;
1688 case CHI_HIGH:
1689 if (numneg > MINNEG * 9 / 10 &&
1690 numneg * ncnegfactor * 9 / 10 > numcache
1691 ) {
1692 cache_cleanneg(10);
1693 } else {
1694 cache_hysteresis_state = CHI_LOW;
1695 }
1696 break;
1697 }
1698}
1699
1700/*
1701 * NEW NAMECACHE LOOKUP API
1702 *
1703 * Lookup an entry in the cache. A locked, referenced, non-NULL
1704 * entry is *always* returned, even if the supplied component is illegal.
1705 * The resulting namecache entry should be returned to the system with
1706 * cache_put() or _cache_unlock() + cache_drop().
1707 *
1708 * namecache locks are recursive but care must be taken to avoid lock order
1709 * reversals.
1710 *
1711 * Nobody else will be able to manipulate the associated namespace (e.g.
1712 * create, delete, rename, rename-target) until the caller unlocks the
1713 * entry.
1714 *
1715 * The returned entry will be in one of three states: positive hit (non-null
1716 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
1717 * Unresolved entries must be resolved through the filesystem to associate the
1718 * vnode and/or determine whether a positive or negative hit has occured.
1719 *
1720 * It is not necessary to lock a directory in order to lock namespace under
1721 * that directory. In fact, it is explicitly not allowed to do that. A
1722 * directory is typically only locked when being created, renamed, or
1723 * destroyed.
1724 *
1725 * The directory (par) may be unresolved, in which case any returned child
1726 * will likely also be marked unresolved. Likely but not guarenteed. Since
1727 * the filesystem lookup requires a resolved directory vnode the caller is
1728 * responsible for resolving the namecache chain top-down. This API
1729 * specifically allows whole chains to be created in an unresolved state.
1730 */
1731struct nchandle
1732cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc)
1733{
1734 struct nchandle nch;
1735 struct namecache *ncp;
1736 struct namecache *new_ncp;
1737 struct nchashhead *nchpp;
1738 u_int32_t hash;
1739 globaldata_t gd;
1740
1741 numcalls++;
1742 gd = mycpu;
1743
1744 /*
1745 * Try to locate an existing entry
1746 */
1747 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
1748 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
1749 new_ncp = NULL;
1750restart:
1751 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1752 numchecks++;
1753
1754 /*
1755 * Try to zap entries that have timed out. We have
1756 * to be careful here because locked leafs may depend
1757 * on the vnode remaining intact in a parent, so only
1758 * do this under very specific conditions.
1759 */
1760 if (ncp->nc_timeout &&
1761 (int)(ncp->nc_timeout - ticks) < 0 &&
1762 (ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
1763 ncp->nc_exlocks == 0 &&
1764 TAILQ_EMPTY(&ncp->nc_list)
1765 ) {
1766 cache_zap(_cache_get(ncp));
1767 goto restart;
1768 }
1769
1770 /*
1771 * Break out if we find a matching entry. Note that
1772 * UNRESOLVED entries may match, but DESTROYED entries
1773 * do not.
1774 */
1775 if (ncp->nc_parent == par_nch->ncp &&
1776 ncp->nc_nlen == nlc->nlc_namelen &&
1777 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
1778 (ncp->nc_flag & NCF_DESTROYED) == 0
1779 ) {
1780 if (_cache_get_nonblock(ncp) == 0) {
1781 if (new_ncp)
1782 _cache_free(new_ncp);
1783 goto found;
1784 }
1785 _cache_get(ncp);
1786 _cache_put(ncp);
1787 goto restart;
1788 }
1789 }
1790
1791 /*
1792 * We failed to locate an entry, create a new entry and add it to
1793 * the cache. We have to relookup after possibly blocking in
1794 * malloc.
1795 */
1796 if (new_ncp == NULL) {
1797 new_ncp = cache_alloc(nlc->nlc_namelen);
1798 goto restart;
1799 }
1800
1801 ncp = new_ncp;
1802
1803 /*
1804 * Initialize as a new UNRESOLVED entry, lock (non-blocking),
1805 * and link to the parent. The mount point is usually inherited
1806 * from the parent unless this is a special case such as a mount
1807 * point where nlc_namelen is 0. If nlc_namelen is 0 nc_name will
1808 * be NULL.
1809 */
1810 if (nlc->nlc_namelen) {
1811 bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen);
1812 ncp->nc_name[nlc->nlc_namelen] = 0;
1813 }
1814 nchpp = NCHHASH(hash);
1815 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
1816 ncp->nc_flag |= NCF_HASHED;
1817 cache_link_parent(ncp, par_nch->ncp);
1818found:
1819 /*
1820 * stats and namecache size management
1821 */
1822 if (ncp->nc_flag & NCF_UNRESOLVED)
1823 ++gd->gd_nchstats->ncs_miss;
1824 else if (ncp->nc_vp)
1825 ++gd->gd_nchstats->ncs_goodhits;
1826 else
1827 ++gd->gd_nchstats->ncs_neghits;
1828 cache_hysteresis();
1829 nch.mount = par_nch->mount;
1830 nch.ncp = ncp;
1831 ++nch.mount->mnt_refs;
1832 return(nch);
1833}
1834
1835/*
1836 * The namecache entry is marked as being used as a mount point.
1837 * Locate the mount if it is visible to the caller.
1838 */
1839struct findmount_info {
1840 struct mount *result;
1841 struct mount *nch_mount;
1842 struct namecache *nch_ncp;
1843};
1844
1845static
1846int
1847cache_findmount_callback(struct mount *mp, void *data)
1848{
1849 struct findmount_info *info = data;
1850
1851 /*
1852 * Check the mount's mounted-on point against the passed nch.
1853 */
1854 if (mp->mnt_ncmounton.mount == info->nch_mount &&
1855 mp->mnt_ncmounton.ncp == info->nch_ncp
1856 ) {
1857 info->result = mp;
1858 return(-1);
1859 }
1860 return(0);
1861}
1862
1863struct mount *
1864cache_findmount(struct nchandle *nch)
1865{
1866 struct findmount_info info;
1867
1868 info.result = NULL;
1869 info.nch_mount = nch->mount;
1870 info.nch_ncp = nch->ncp;
1871 mountlist_scan(cache_findmount_callback, &info,
1872 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
1873 return(info.result);
1874}
1875
1876/*
1877 * Resolve an unresolved namecache entry, generally by looking it up.
1878 * The passed ncp must be locked and refd.
1879 *
1880 * Theoretically since a vnode cannot be recycled while held, and since
1881 * the nc_parent chain holds its vnode as long as children exist, the
1882 * direct parent of the cache entry we are trying to resolve should
1883 * have a valid vnode. If not then generate an error that we can
1884 * determine is related to a resolver bug.
1885 *
1886 * However, if a vnode was in the middle of a recyclement when the NCP
1887 * got locked, ncp->nc_vp might point to a vnode that is about to become
1888 * invalid. cache_resolve() handles this case by unresolving the entry
1889 * and then re-resolving it.
1890 *
1891 * Note that successful resolution does not necessarily return an error
1892 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
1893 * will be returned.
1894 */
1895int
1896cache_resolve(struct nchandle *nch, struct ucred *cred)
1897{
1898 struct namecache *par;
1899 struct namecache *ncp;
1900 struct nchandle nctmp;
1901 struct mount *mp;
1902 struct vnode *dvp;
1903 int error;
1904
1905 ncp = nch->ncp;
1906 mp = nch->mount;
1907restart:
1908 /*
1909 * If the ncp is already resolved we have nothing to do. However,
1910 * we do want to guarentee that a usable vnode is returned when
1911 * a vnode is present, so make sure it hasn't been reclaimed.
1912 */
1913 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1914 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1915 _cache_setunresolved(ncp);
1916 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1917 return (ncp->nc_error);
1918 }
1919
1920 /*
1921 * Mount points need special handling because the parent does not
1922 * belong to the same filesystem as the ncp.
1923 */
1924 if (ncp == mp->mnt_ncmountpt.ncp)
1925 return (cache_resolve_mp(mp));
1926
1927 /*
1928 * We expect an unbroken chain of ncps to at least the mount point,
1929 * and even all the way to root (but this code doesn't have to go
1930 * past the mount point).
1931 */
1932 if (ncp->nc_parent == NULL) {
1933 kprintf("EXDEV case 1 %p %*.*s\n", ncp,
1934 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
1935 ncp->nc_error = EXDEV;
1936 return(ncp->nc_error);
1937 }
1938
1939 /*
1940 * The vp's of the parent directories in the chain are held via vhold()
1941 * due to the existance of the child, and should not disappear.
1942 * However, there are cases where they can disappear:
1943 *
1944 * - due to filesystem I/O errors.
1945 * - due to NFS being stupid about tracking the namespace and
1946 * destroys the namespace for entire directories quite often.
1947 * - due to forced unmounts.
1948 * - due to an rmdir (parent will be marked DESTROYED)
1949 *
1950 * When this occurs we have to track the chain backwards and resolve
1951 * it, looping until the resolver catches up to the current node. We
1952 * could recurse here but we might run ourselves out of kernel stack
1953 * so we do it in a more painful manner. This situation really should
1954 * not occur all that often, or if it does not have to go back too
1955 * many nodes to resolve the ncp.
1956 */
1957 while (ncp->nc_parent->nc_vp == NULL) {
1958 /*
1959 * This case can occur if a process is CD'd into a
1960 * directory which is then rmdir'd. If the parent is marked
1961 * destroyed there is no point trying to resolve it.
1962 */
1963 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
1964 return(ENOENT);
1965
1966 par = ncp->nc_parent;
1967 while (par->nc_parent && par->nc_parent->nc_vp == NULL)
1968 par = par->nc_parent;
1969 if (par->nc_parent == NULL) {
1970 kprintf("EXDEV case 2 %*.*s\n",
1971 par->nc_nlen, par->nc_nlen, par->nc_name);
1972 return (EXDEV);
1973 }
1974 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
1975 par->nc_nlen, par->nc_nlen, par->nc_name);
1976 /*
1977 * The parent is not set in stone, ref and lock it to prevent
1978 * it from disappearing. Also note that due to renames it
1979 * is possible for our ncp to move and for par to no longer
1980 * be one of its parents. We resolve it anyway, the loop
1981 * will handle any moves.
1982 */
1983 _cache_get(par);
1984 if (par == nch->mount->mnt_ncmountpt.ncp) {
1985 cache_resolve_mp(nch->mount);
1986 } else if ((dvp = par->nc_parent->nc_vp) == NULL) {
1987 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
1988 _cache_put(par);
1989 continue;
1990 } else if (par->nc_flag & NCF_UNRESOLVED) {
1991 /* vhold(dvp); - DVP can't go away */
1992 nctmp.mount = mp;
1993 nctmp.ncp = par;
1994 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
1995 /* vdrop(dvp); */
1996 }
1997 if ((error = par->nc_error) != 0) {
1998 if (par->nc_error != EAGAIN) {
1999 kprintf("EXDEV case 3 %*.*s error %d\n",
2000 par->nc_nlen, par->nc_nlen, par->nc_name,
2001 par->nc_error);
2002 _cache_put(par);
2003 return(error);
2004 }
2005 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
2006 par, par->nc_nlen, par->nc_nlen, par->nc_name);
2007 }
2008 _cache_put(par);
2009 /* loop */
2010 }
2011
2012 /*
2013 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
2014 * ncp's and reattach them. If this occurs the original ncp is marked
2015 * EAGAIN to force a relookup.
2016 *
2017 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
2018 * ncp must already be resolved.
2019 */
2020 dvp = ncp->nc_parent->nc_vp;
2021 /* vhold(dvp); - dvp can't go away */
2022 nctmp.mount = mp;
2023 nctmp.ncp = ncp;
2024 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
2025 /* vdrop(dvp); */
2026 if (ncp->nc_error == EAGAIN) {
2027 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
2028 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
2029 goto restart;
2030 }
2031 return(ncp->nc_error);
2032}
2033
2034/*
2035 * Resolve the ncp associated with a mount point. Such ncp's almost always
2036 * remain resolved and this routine is rarely called. NFS MPs tends to force
2037 * re-resolution more often due to its mac-truck-smash-the-namecache
2038 * method of tracking namespace changes.
2039 *
2040 * The semantics for this call is that the passed ncp must be locked on
2041 * entry and will be locked on return. However, if we actually have to
2042 * resolve the mount point we temporarily unlock the entry in order to
2043 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
2044 * the unlock we have to recheck the flags after we relock.
2045 */
2046static int
2047cache_resolve_mp(struct mount *mp)
2048{
2049 struct namecache *ncp = mp->mnt_ncmountpt.ncp;
2050 struct vnode *vp;
2051 int error;
2052
2053 KKASSERT(mp != NULL);
2054
2055 /*
2056 * If the ncp is already resolved we have nothing to do. However,
2057 * we do want to guarentee that a usable vnode is returned when
2058 * a vnode is present, so make sure it hasn't been reclaimed.
2059 */
2060 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
2061 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
2062 _cache_setunresolved(ncp);
2063 }
2064
2065 if (ncp->nc_flag & NCF_UNRESOLVED) {
2066 _cache_unlock(ncp);
2067 while (vfs_busy(mp, 0))
2068 ;
2069 error = VFS_ROOT(mp, &vp);
2070 _cache_lock(ncp);
2071
2072 /*
2073 * recheck the ncp state after relocking.
2074 */
2075 if (ncp->nc_flag & NCF_UNRESOLVED) {
2076 ncp->nc_error = error;
2077 if (error == 0) {
2078 _cache_setvp(ncp, vp);
2079 vput(vp);
2080 } else {
2081 kprintf("[diagnostic] cache_resolve_mp: failed to resolve mount %p\n", mp);
2082 _cache_setvp(ncp, NULL);
2083 }
2084 } else if (error == 0) {
2085 vput(vp);
2086 }
2087 vfs_unbusy(mp);
2088 }
2089 return(ncp->nc_error);
2090}
2091
2092void
2093cache_cleanneg(int count)
2094{
2095 struct namecache *ncp;
2096
2097 /*
2098 * Automode from the vnlru proc - clean out 10% of the negative cache
2099 * entries.
2100 */
2101 if (count == 0)
2102 count = numneg / 10 + 1;
2103
2104 /*
2105 * Attempt to clean out the specified number of negative cache
2106 * entries.
2107 */
2108 while (count) {
2109 ncp = TAILQ_FIRST(&ncneglist);
2110 if (ncp == NULL) {
2111 KKASSERT(numneg == 0);
2112 break;
2113 }
2114 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
2115 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
2116 if (_cache_get_nonblock(ncp) == 0)
2117 cache_zap(ncp);
2118 --count;
2119 }
2120}
2121
2122/*
2123 * Rehash a ncp. Rehashing is typically required if the name changes (should
2124 * not generally occur) or the parent link changes. This function will
2125 * unhash the ncp if the ncp is no longer hashable.
2126 */
2127static void
2128_cache_rehash(struct namecache *ncp)
2129{
2130 struct nchashhead *nchpp;
2131 u_int32_t hash;
2132
2133 if (ncp->nc_flag & NCF_HASHED) {
2134 ncp->nc_flag &= ~NCF_HASHED;
2135 LIST_REMOVE(ncp, nc_hash);
2136 }
2137 if (ncp->nc_nlen && ncp->nc_parent) {
2138 hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT);
2139 hash = fnv_32_buf(&ncp->nc_parent,
2140 sizeof(ncp->nc_parent), hash);
2141 nchpp = NCHHASH(hash);
2142 LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
2143 ncp->nc_flag |= NCF_HASHED;
2144 }
2145}
2146
2147/*
2148 * Name cache initialization, from vfsinit() when we are booting
2149 */
2150void
2151nchinit(void)
2152{
2153 int i;
2154 globaldata_t gd;
2155
2156 /* initialise per-cpu namecache effectiveness statistics. */
2157 for (i = 0; i < ncpus; ++i) {
2158 gd = globaldata_find(i);
2159 gd->gd_nchstats = &nchstats[i];
2160 }
2161 TAILQ_INIT(&ncneglist);
2162 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash);
2163 nclockwarn = 1 * hz;
2164}
2165
2166/*
2167 * Called from start_init() to bootstrap the root filesystem. Returns
2168 * a referenced, unlocked namecache record.
2169 */
2170void
2171cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp)
2172{
2173 nch->ncp = cache_alloc(0);
2174 nch->mount = mp;
2175 ++mp->mnt_refs;
2176 if (vp)
2177 _cache_setvp(nch->ncp, vp);
2178}
2179
2180/*
2181 * vfs_cache_setroot()
2182 *
2183 * Create an association between the root of our namecache and
2184 * the root vnode. This routine may be called several times during
2185 * booting.
2186 *
2187 * If the caller intends to save the returned namecache pointer somewhere
2188 * it must cache_hold() it.
2189 */
2190void
2191vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch)
2192{
2193 struct vnode *ovp;
2194 struct nchandle onch;
2195
2196 ovp = rootvnode;
2197 onch = rootnch;
2198 rootvnode = nvp;
2199 if (nch)
2200 rootnch = *nch;
2201 else
2202 cache_zero(&rootnch);
2203 if (ovp)
2204 vrele(ovp);
2205 if (onch.ncp)
2206 cache_drop(&onch);
2207}
2208
2209/*
2210 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
2211 * topology and is being removed as quickly as possible. The new VOP_N*()
2212 * API calls are required to make specific adjustments using the supplied
2213 * ncp pointers rather then just bogusly purging random vnodes.
2214 *
2215 * Invalidate all namecache entries to a particular vnode as well as
2216 * any direct children of that vnode in the namecache. This is a
2217 * 'catch all' purge used by filesystems that do not know any better.
2218 *
2219 * Note that the linkage between the vnode and its namecache entries will
2220 * be removed, but the namecache entries themselves might stay put due to
2221 * active references from elsewhere in the system or due to the existance of
2222 * the children. The namecache topology is left intact even if we do not
2223 * know what the vnode association is. Such entries will be marked
2224 * NCF_UNRESOLVED.
2225 */
2226void
2227cache_purge(struct vnode *vp)
2228{
2229 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
2230}
2231
2232/*
2233 * Flush all entries referencing a particular filesystem.
2234 *
2235 * Since we need to check it anyway, we will flush all the invalid
2236 * entries at the same time.
2237 */
2238#if 0
2239
2240void
2241cache_purgevfs(struct mount *mp)
2242{
2243 struct nchashhead *nchpp;
2244 struct namecache *ncp, *nnp;
2245
2246 /*
2247 * Scan hash tables for applicable entries.
2248 */
2249 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) {
2250 ncp = LIST_FIRST(nchpp);
2251 if (ncp)
2252 _cache_hold(ncp);
2253 while (ncp) {
2254 nnp = LIST_NEXT(ncp, nc_hash);
2255 if (nnp)
2256 _cache_hold(nnp);
2257 if (ncp->nc_mount == mp) {
2258 _cache_lock(ncp);
2259 cache_zap(ncp);
2260 } else {
2261 _cache_drop(ncp);
2262 }
2263 ncp = nnp;
2264 }
2265 }
2266}
2267
2268#endif
2269
2270/*
2271 * Create a new (theoretically) unique fsmid
2272 */
2273int64_t
2274cache_getnewfsmid(void)
2275{
2276 static int fsmid_roller;
2277 int64_t fsmid;
2278
2279 ++fsmid_roller;
2280 fsmid = ((int64_t)time_second << 32) |
2281 (fsmid_roller & 0x7FFFFFFF);
2282 return (fsmid);
2283}
2284
2285
2286static int disablecwd;
2287SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
2288
2289static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
2290static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
2291static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
2292static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
2293static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
2294static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
2295
2296int
2297sys___getcwd(struct __getcwd_args *uap)
2298{
2299 int buflen;
2300 int error;
2301 char *buf;
2302 char *bp;
2303
2304 if (disablecwd)
2305 return (ENODEV);
2306
2307 buflen = uap->buflen;
2308 if (buflen < 2)
2309 return (EINVAL);
2310 if (buflen > MAXPATHLEN)
2311 buflen = MAXPATHLEN;
2312
2313 buf = kmalloc(buflen, M_TEMP, M_WAITOK);
2314 bp = kern_getcwd(buf, buflen, &error);
2315 if (error == 0)
2316 error = copyout(bp, uap->buf, strlen(bp) + 1);
2317 kfree(buf, M_TEMP);
2318 return (error);
2319}
2320
2321char *
2322kern_getcwd(char *buf, size_t buflen, int *error)
2323{
2324 struct proc *p = curproc;
2325 char *bp;
2326 int i, slash_prefixed;
2327 struct filedesc *fdp;
2328 struct nchandle nch;
2329
2330 numcwdcalls++;
2331 bp = buf;
2332 bp += buflen - 1;
2333 *bp = '\0';
2334 fdp = p->p_fd;
2335 slash_prefixed = 0;
2336
2337 nch = fdp->fd_ncdir;
2338 while (nch.ncp && (nch.ncp != fdp->fd_nrdir.ncp ||
2339 nch.mount != fdp->fd_nrdir.mount)
2340 ) {
2341 /*
2342 * While traversing upwards if we encounter the root
2343 * of the current mount we have to skip to the mount point
2344 * in the underlying filesystem.
2345 */
2346 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) {
2347 nch = nch.mount->mnt_ncmounton;
2348 continue;
2349 }
2350
2351 /*
2352 * Prepend the path segment
2353 */
2354 for (i = nch.ncp->nc_nlen - 1; i >= 0; i--) {
2355 if (bp == buf) {
2356 numcwdfail4++;
2357 *error = ENOMEM;
2358 return(NULL);
2359 }
2360 *--bp = nch.ncp->nc_name[i];
2361 }
2362 if (bp == buf) {
2363 numcwdfail4++;
2364 *error = ENOMEM;
2365 return(NULL);
2366 }
2367 *--bp = '/';
2368 slash_prefixed = 1;
2369
2370 /*
2371 * Go up a directory. This isn't a mount point so we don't
2372 * have to check again.
2373 */
2374 nch.ncp = nch.ncp->nc_parent;
2375 }
2376 if (nch.ncp == NULL) {
2377 numcwdfail2++;
2378 *error = ENOENT;
2379 return(NULL);
2380 }
2381 if (!slash_prefixed) {
2382 if (bp == buf) {
2383 numcwdfail4++;
2384 *error = ENOMEM;
2385 return(NULL);
2386 }
2387 *--bp = '/';
2388 }
2389 numcwdfound++;
2390 *error = 0;
2391 return (bp);
2392}
2393
2394/*
2395 * Thus begins the fullpath magic.
2396 */
2397
2398#undef STATNODE
2399#define STATNODE(name) \
2400 static u_int name; \
2401 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
2402
2403static int disablefullpath;
2404SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
2405 &disablefullpath, 0, "");
2406
2407STATNODE(numfullpathcalls);
2408STATNODE(numfullpathfail1);
2409STATNODE(numfullpathfail2);
2410STATNODE(numfullpathfail3);
2411STATNODE(numfullpathfail4);
2412STATNODE(numfullpathfound);
2413
2414int
2415cache_fullpath(struct proc *p, struct nchandle *nchp, char **retbuf, char **freebuf)
2416{
2417 char *bp, *buf;
2418 int i, slash_prefixed;
2419 struct nchandle fd_nrdir;
2420 struct nchandle nch;
2421
2422 numfullpathcalls--;
2423
2424 *retbuf = NULL;
2425 *freebuf = NULL;
2426
2427 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK);
2428 bp = buf + MAXPATHLEN - 1;
2429 *bp = '\0';
2430 if (p != NULL)
2431 fd_nrdir = p->p_fd->fd_nrdir;
2432 else
2433 fd_nrdir = rootnch;
2434 slash_prefixed = 0;
2435 nch = *nchp;
2436
2437 while (nch.ncp &&
2438 (nch.ncp != fd_nrdir.ncp || nch.mount != fd_nrdir.mount)
2439 ) {
2440 /*
2441 * While traversing upwards if we encounter the root
2442 * of the current mount we have to skip to the mount point.
2443 */
2444 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) {
2445 nch = nch.mount->mnt_ncmounton;
2446 continue;
2447 }
2448
2449 /*
2450 * Prepend the path segment
2451 */
2452 for (i = nch.ncp->nc_nlen - 1; i >= 0; i--) {
2453 if (bp == buf) {
2454 numfullpathfail4++;
2455 kfree(buf, M_TEMP);
2456 return(ENOMEM);
2457 }
2458 *--bp = nch.ncp->nc_name[i];
2459 }
2460 if (bp == buf) {
2461 numfullpathfail4++;
2462 kfree(buf, M_TEMP);
2463 return(ENOMEM);
2464 }
2465 *--bp = '/';
2466 slash_prefixed = 1;
2467
2468 /*
2469 * Go up a directory. This isn't a mount point so we don't
2470 * have to check again.
2471 */
2472 nch.ncp = nch.ncp->nc_parent;
2473 }
2474 if (nch.ncp == NULL) {
2475 numfullpathfail2++;
2476 kfree(buf, M_TEMP);
2477 return(ENOENT);
2478 }
2479
2480 if (!slash_prefixed) {
2481 if (bp == buf) {
2482 numfullpathfail4++;
2483 kfree(buf, M_TEMP);
2484 return(ENOMEM);
2485 }
2486 *--bp = '/';
2487 }
2488 numfullpathfound++;
2489 *retbuf = bp;
2490 *freebuf = buf;
2491
2492 return(0);
2493}
2494
2495int
2496vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf)
2497{
2498 struct namecache *ncp;
2499 struct nchandle nch;
2500
2501 numfullpathcalls++;
2502 if (disablefullpath)
2503 return (ENODEV);
2504
2505 if (p == NULL)
2506 return (EINVAL);
2507
2508 /* vn is NULL, client wants us to use p->p_textvp */
2509 if (vn == NULL) {
2510 if ((vn = p->p_textvp) == NULL)
2511 return (EINVAL);
2512 }
2513 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
2514 if (ncp->nc_nlen)
2515 break;
2516 }
2517 if (ncp == NULL)
2518 return (EINVAL);
2519
2520 numfullpathcalls--;
2521 nch.ncp = ncp;;
2522 nch.mount = vn->v_mount;
2523 return(cache_fullpath(p, &nch, retbuf, freebuf));
2524}