vfs_cache.c revision 289798
1/*-
2 * Copyright (c) 1989, 1993, 1995
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Poul-Henning Kamp of the FreeBSD Project.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)vfs_cache.c	8.5 (Berkeley) 3/22/95
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: stable/10/sys/kern/vfs_cache.c 289798 2015-10-23 07:40:43Z avg $");
37
38#include "opt_kdtrace.h"
39#include "opt_ktrace.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/filedesc.h>
44#include <sys/fnv_hash.h>
45#include <sys/kernel.h>
46#include <sys/lock.h>
47#include <sys/malloc.h>
48#include <sys/fcntl.h>
49#include <sys/mount.h>
50#include <sys/namei.h>
51#include <sys/proc.h>
52#include <sys/rwlock.h>
53#include <sys/sdt.h>
54#include <sys/syscallsubr.h>
55#include <sys/sysctl.h>
56#include <sys/sysproto.h>
57#include <sys/vnode.h>
58#ifdef KTRACE
59#include <sys/ktrace.h>
60#endif
61
62#include <vm/uma.h>
63
64SDT_PROVIDER_DECLARE(vfs);
65SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *",
66    "struct vnode *");
67SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *",
68    "char *");
69SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *");
70SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *",
71    "char *", "struct vnode *");
72SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *");
73SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int",
74    "struct vnode *", "char *");
75SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *",
76    "struct vnode *");
77SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative,
78    "struct vnode *", "char *");
79SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *",
80    "char *");
81SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *");
82SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *");
83SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *");
84SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *",
85    "struct vnode *");
86SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, "struct vnode *",
87    "char *");
88
89/*
90 * This structure describes the elements in the cache of recent
91 * names looked up by namei.
92 */
93
94struct	namecache {
95	LIST_ENTRY(namecache) nc_hash;	/* hash chain */
96	LIST_ENTRY(namecache) nc_src;	/* source vnode list */
97	TAILQ_ENTRY(namecache) nc_dst;	/* destination vnode list */
98	struct	vnode *nc_dvp;		/* vnode of parent of name */
99	struct	vnode *nc_vp;		/* vnode the name refers to */
100	u_char	nc_flag;		/* flag bits */
101	u_char	nc_nlen;		/* length of name */
102	char	nc_name[0];		/* segment name + nul */
103};
104
105/*
106 * struct namecache_ts repeats struct namecache layout up to the
107 * nc_nlen member.
108 * struct namecache_ts is used in place of struct namecache when time(s) need
109 * to be stored.  The nc_dotdottime field is used when a cache entry is mapping
110 * both a non-dotdot directory name plus dotdot for the directory's
111 * parent.
112 */
113struct	namecache_ts {
114	LIST_ENTRY(namecache) nc_hash;	/* hash chain */
115	LIST_ENTRY(namecache) nc_src;	/* source vnode list */
116	TAILQ_ENTRY(namecache) nc_dst;	/* destination vnode list */
117	struct	vnode *nc_dvp;		/* vnode of parent of name */
118	struct	vnode *nc_vp;		/* vnode the name refers to */
119	u_char	nc_flag;		/* flag bits */
120	u_char	nc_nlen;		/* length of name */
121	struct	timespec nc_time;	/* timespec provided by fs */
122	struct	timespec nc_dotdottime;	/* dotdot timespec provided by fs */
123	int	nc_ticks;		/* ticks value when entry was added */
124	char	nc_name[0];		/* segment name + nul */
125};
126
127/*
128 * Flags in namecache.nc_flag
129 */
130#define NCF_WHITE	0x01
131#define NCF_ISDOTDOT	0x02
132#define	NCF_TS		0x04
133#define	NCF_DTS		0x08
134
135/*
136 * Name caching works as follows:
137 *
138 * Names found by directory scans are retained in a cache
139 * for future reference.  It is managed LRU, so frequently
140 * used names will hang around.  Cache is indexed by hash value
141 * obtained from (vp, name) where vp refers to the directory
142 * containing name.
143 *
144 * If it is a "negative" entry, (i.e. for a name that is known NOT to
145 * exist) the vnode pointer will be NULL.
146 *
147 * Upon reaching the last segment of a path, if the reference
148 * is for DELETE, or NOCACHE is set (rewrite), and the
149 * name is located in the cache, it will be dropped.
150 */
151
152/*
153 * Structures associated with name cacheing.
154 */
155#define NCHHASH(hash) \
156	(&nchashtbl[(hash) & nchash])
157static LIST_HEAD(nchashhead, namecache) *nchashtbl;	/* Hash Table */
158static TAILQ_HEAD(, namecache) ncneg;	/* Hash Table */
159static u_long	nchash;			/* size of hash table */
160SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
161    "Size of namecache hash table");
162static u_long	ncnegfactor = 16;	/* ratio of negative entries */
163SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
164    "Ratio of negative namecache entries");
165static u_long	numneg;			/* number of negative entries allocated */
166SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0,
167    "Number of negative entries in namecache");
168static u_long	numcache;		/* number of cache entries allocated */
169SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0,
170    "Number of namecache entries");
171static u_long	numcachehv;		/* number of cache entries with vnodes held */
172SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0,
173    "Number of namecache entries with vnodes held");
174static u_int	ncsizefactor = 2;
175SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0,
176    "Size factor for namecache");
177
178struct nchstats	nchstats;		/* cache effectiveness statistics */
179
180static struct rwlock cache_lock;
181RW_SYSINIT(vfscache, &cache_lock, "Name Cache");
182
183#define	CACHE_UPGRADE_LOCK()	rw_try_upgrade(&cache_lock)
184#define	CACHE_RLOCK()		rw_rlock(&cache_lock)
185#define	CACHE_RUNLOCK()		rw_runlock(&cache_lock)
186#define	CACHE_WLOCK()		rw_wlock(&cache_lock)
187#define	CACHE_WUNLOCK()		rw_wunlock(&cache_lock)
188
189/*
190 * UMA zones for the VFS cache.
191 *
192 * The small cache is used for entries with short names, which are the
193 * most common.  The large cache is used for entries which are too big to
194 * fit in the small cache.
195 */
196static uma_zone_t cache_zone_small;
197static uma_zone_t cache_zone_small_ts;
198static uma_zone_t cache_zone_large;
199static uma_zone_t cache_zone_large_ts;
200
201#define	CACHE_PATH_CUTOFF	35
202
203static struct namecache *
204cache_alloc(int len, int ts)
205{
206
207	if (len > CACHE_PATH_CUTOFF) {
208		if (ts)
209			return (uma_zalloc(cache_zone_large_ts, M_WAITOK));
210		else
211			return (uma_zalloc(cache_zone_large, M_WAITOK));
212	}
213	if (ts)
214		return (uma_zalloc(cache_zone_small_ts, M_WAITOK));
215	else
216		return (uma_zalloc(cache_zone_small, M_WAITOK));
217}
218
219static void
220cache_free(struct namecache *ncp)
221{
222	int ts;
223
224	if (ncp == NULL)
225		return;
226	ts = ncp->nc_flag & NCF_TS;
227	if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) {
228		if (ts)
229			uma_zfree(cache_zone_small_ts, ncp);
230		else
231			uma_zfree(cache_zone_small, ncp);
232	} else if (ts)
233		uma_zfree(cache_zone_large_ts, ncp);
234	else
235		uma_zfree(cache_zone_large, ncp);
236}
237
238static char *
239nc_get_name(struct namecache *ncp)
240{
241	struct namecache_ts *ncp_ts;
242
243	if ((ncp->nc_flag & NCF_TS) == 0)
244		return (ncp->nc_name);
245	ncp_ts = (struct namecache_ts *)ncp;
246	return (ncp_ts->nc_name);
247}
248
249static void
250cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp)
251{
252
253	KASSERT((ncp->nc_flag & NCF_TS) != 0 ||
254	    (tsp == NULL && ticksp == NULL),
255	    ("No NCF_TS"));
256
257	if (tsp != NULL)
258		*tsp = ((struct namecache_ts *)ncp)->nc_time;
259	if (ticksp != NULL)
260		*ticksp = ((struct namecache_ts *)ncp)->nc_ticks;
261}
262
263static int	doingcache = 1;		/* 1 => enable the cache */
264SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0,
265    "VFS namecache enabled");
266
267/* Export size information to userland */
268SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR,
269    sizeof(struct namecache), "sizeof(struct namecache)");
270
271/*
272 * The new name cache statistics
273 */
274static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0,
275    "Name cache statistics");
276#define STATNODE(mode, name, var, descr) \
277	SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, descr);
278STATNODE(CTLFLAG_RD, numneg, &numneg, "Number of negative cache entries");
279STATNODE(CTLFLAG_RD, numcache, &numcache, "Number of cache entries");
280static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls,
281    "Number of cache lookups");
282static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits,
283    "Number of '.' hits");
284static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits,
285    "Number of '..' hits");
286static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks,
287    "Number of checks in lookup");
288static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss,
289    "Number of cache misses");
290static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap,
291    "Number of cache misses we do not want to cache");
292static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps,
293    "Number of cache hits (positive) we do not want to cache");
294static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits,
295    "Number of cache hits (positive)");
296static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps,
297    "Number of cache hits (negative) we do not want to cache");
298static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits,
299    "Number of cache hits (negative)");
300static u_long numupgrades; STATNODE(CTLFLAG_RD, numupgrades, &numupgrades,
301    "Number of updates of the cache after lookup (write lock + retry)");
302
303SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD | CTLFLAG_MPSAFE,
304    &nchstats, sizeof(nchstats), "LU",
305    "VFS cache effectiveness statistics");
306
307
308
309static void cache_zap(struct namecache *ncp);
310static int vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
311    u_int *buflen);
312static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
313    char *buf, char **retbuf, u_int buflen);
314
315static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
316
317#ifdef DIAGNOSTIC
318/*
319 * Grab an atomic snapshot of the name cache hash chain lengths
320 */
321static SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL,
322    "hash table stats");
323
324static int
325sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
326{
327	struct nchashhead *ncpp;
328	struct namecache *ncp;
329	int i, error, n_nchash, *cntbuf;
330
331retry:
332	n_nchash = nchash + 1;	/* nchash is max index, not count */
333	if (!req->oldptr)
334		return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
335	cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK);
336	CACHE_RLOCK();
337	if (n_nchash != nchash + 1) {
338		CACHE_RUNLOCK();
339		free(cntbuf, M_TEMP);
340		goto retry;
341	}
342	/* Scan hash tables counting entries */
343	for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++)
344		LIST_FOREACH(ncp, ncpp, nc_hash)
345			cntbuf[i]++;
346	CACHE_RUNLOCK();
347	for (error = 0, i = 0; i < n_nchash; i++)
348		if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0)
349			break;
350	free(cntbuf, M_TEMP);
351	return (error);
352}
353SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD|
354    CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int",
355    "nchash chain lengths");
356
357static int
358sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
359{
360	int error;
361	struct nchashhead *ncpp;
362	struct namecache *ncp;
363	int n_nchash;
364	int count, maxlength, used, pct;
365
366	if (!req->oldptr)
367		return SYSCTL_OUT(req, 0, 4 * sizeof(int));
368
369	n_nchash = nchash + 1;	/* nchash is max index, not count */
370	used = 0;
371	maxlength = 0;
372
373	/* Scan hash tables for applicable entries */
374	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
375		count = 0;
376		CACHE_RLOCK();
377		LIST_FOREACH(ncp, ncpp, nc_hash) {
378			count++;
379		}
380		CACHE_RUNLOCK();
381		if (count)
382			used++;
383		if (maxlength < count)
384			maxlength = count;
385	}
386	n_nchash = nchash + 1;
387	pct = (used * 100) / (n_nchash / 100);
388	error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
389	if (error)
390		return (error);
391	error = SYSCTL_OUT(req, &used, sizeof(used));
392	if (error)
393		return (error);
394	error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
395	if (error)
396		return (error);
397	error = SYSCTL_OUT(req, &pct, sizeof(pct));
398	if (error)
399		return (error);
400	return (0);
401}
402SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD|
403    CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I",
404    "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)");
405#endif
406
407/*
408 * cache_zap():
409 *
410 *   Removes a namecache entry from cache, whether it contains an actual
411 *   pointer to a vnode or if it is just a negative cache entry.
412 */
413static void
414cache_zap(ncp)
415	struct namecache *ncp;
416{
417	struct vnode *vp;
418
419	rw_assert(&cache_lock, RA_WLOCKED);
420	CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp);
421#ifdef KDTRACE_HOOKS
422	if (ncp->nc_vp != NULL) {
423		SDT_PROBE3(vfs, namecache, zap, done, ncp->nc_dvp,
424		    nc_get_name(ncp), ncp->nc_vp);
425	} else {
426		SDT_PROBE2(vfs, namecache, zap_negative, done, ncp->nc_dvp,
427		    nc_get_name(ncp));
428	}
429#endif
430	vp = NULL;
431	LIST_REMOVE(ncp, nc_hash);
432	if (ncp->nc_flag & NCF_ISDOTDOT) {
433		if (ncp == ncp->nc_dvp->v_cache_dd)
434			ncp->nc_dvp->v_cache_dd = NULL;
435	} else {
436		LIST_REMOVE(ncp, nc_src);
437		if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
438			vp = ncp->nc_dvp;
439			numcachehv--;
440		}
441	}
442	if (ncp->nc_vp) {
443		TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
444		if (ncp == ncp->nc_vp->v_cache_dd)
445			ncp->nc_vp->v_cache_dd = NULL;
446	} else {
447		TAILQ_REMOVE(&ncneg, ncp, nc_dst);
448		numneg--;
449	}
450	numcache--;
451	cache_free(ncp);
452	if (vp)
453		vdrop(vp);
454}
455
456/*
457 * Lookup an entry in the cache
458 *
459 * Lookup is called with dvp pointing to the directory to search,
460 * cnp pointing to the name of the entry being sought. If the lookup
461 * succeeds, the vnode is returned in *vpp, and a status of -1 is
462 * returned. If the lookup determines that the name does not exist
463 * (negative cacheing), a status of ENOENT is returned. If the lookup
464 * fails, a status of zero is returned.  If the directory vnode is
465 * recycled out from under us due to a forced unmount, a status of
466 * ENOENT is returned.
467 *
468 * vpp is locked and ref'd on return.  If we're looking up DOTDOT, dvp is
469 * unlocked.  If we're looking up . an extra ref is taken, but the lock is
470 * not recursively acquired.
471 */
472
473int
474cache_lookup(dvp, vpp, cnp, tsp, ticksp)
475	struct vnode *dvp;
476	struct vnode **vpp;
477	struct componentname *cnp;
478	struct timespec *tsp;
479	int *ticksp;
480{
481	struct namecache *ncp;
482	uint32_t hash;
483	int error, ltype, wlocked;
484
485	if (!doingcache) {
486		cnp->cn_flags &= ~MAKEENTRY;
487		return (0);
488	}
489retry:
490	CACHE_RLOCK();
491	wlocked = 0;
492	numcalls++;
493	error = 0;
494
495retry_wlocked:
496	if (cnp->cn_nameptr[0] == '.') {
497		if (cnp->cn_namelen == 1) {
498			*vpp = dvp;
499			CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
500			    dvp, cnp->cn_nameptr);
501			dothits++;
502			SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp);
503			if (tsp != NULL)
504				timespecclear(tsp);
505			if (ticksp != NULL)
506				*ticksp = ticks;
507			goto success;
508		}
509		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
510			dotdothits++;
511			if (dvp->v_cache_dd == NULL) {
512				SDT_PROBE3(vfs, namecache, lookup, miss, dvp,
513				    "..", NULL);
514				goto unlock;
515			}
516			if ((cnp->cn_flags & MAKEENTRY) == 0) {
517				if (!wlocked && !CACHE_UPGRADE_LOCK())
518					goto wlock;
519				if (dvp->v_cache_dd->nc_flag & NCF_ISDOTDOT)
520					cache_zap(dvp->v_cache_dd);
521				dvp->v_cache_dd = NULL;
522				CACHE_WUNLOCK();
523				return (0);
524			}
525			ncp = dvp->v_cache_dd;
526			if (ncp->nc_flag & NCF_ISDOTDOT)
527				*vpp = ncp->nc_vp;
528			else
529				*vpp = ncp->nc_dvp;
530			/* Return failure if negative entry was found. */
531			if (*vpp == NULL)
532				goto negative_success;
533			CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
534			    dvp, cnp->cn_nameptr, *vpp);
535			SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..",
536			    *vpp);
537			cache_out_ts(ncp, tsp, ticksp);
538			if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) ==
539			    NCF_DTS && tsp != NULL)
540				*tsp = ((struct namecache_ts *)ncp)->
541				    nc_dotdottime;
542			goto success;
543		}
544	}
545
546	hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT);
547	hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
548	LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
549		numchecks++;
550		if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
551		    !bcmp(nc_get_name(ncp), cnp->cn_nameptr, ncp->nc_nlen))
552			break;
553	}
554
555	/* We failed to find an entry */
556	if (ncp == NULL) {
557		SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr,
558		    NULL);
559		if ((cnp->cn_flags & MAKEENTRY) == 0) {
560			nummisszap++;
561		} else {
562			nummiss++;
563		}
564		nchstats.ncs_miss++;
565		goto unlock;
566	}
567
568	/* We don't want to have an entry, so dump it */
569	if ((cnp->cn_flags & MAKEENTRY) == 0) {
570		numposzaps++;
571		nchstats.ncs_badhits++;
572		if (!wlocked && !CACHE_UPGRADE_LOCK())
573			goto wlock;
574		cache_zap(ncp);
575		CACHE_WUNLOCK();
576		return (0);
577	}
578
579	/* We found a "positive" match, return the vnode */
580	if (ncp->nc_vp) {
581		numposhits++;
582		nchstats.ncs_goodhits++;
583		*vpp = ncp->nc_vp;
584		CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
585		    dvp, cnp->cn_nameptr, *vpp, ncp);
586		SDT_PROBE3(vfs, namecache, lookup, hit, dvp, nc_get_name(ncp),
587		    *vpp);
588		cache_out_ts(ncp, tsp, ticksp);
589		goto success;
590	}
591
592negative_success:
593	/* We found a negative match, and want to create it, so purge */
594	if (cnp->cn_nameiop == CREATE) {
595		numnegzaps++;
596		nchstats.ncs_badhits++;
597		if (!wlocked && !CACHE_UPGRADE_LOCK())
598			goto wlock;
599		cache_zap(ncp);
600		CACHE_WUNLOCK();
601		return (0);
602	}
603
604	if (!wlocked && !CACHE_UPGRADE_LOCK())
605		goto wlock;
606	numneghits++;
607	/*
608	 * We found a "negative" match, so we shift it to the end of
609	 * the "negative" cache entries queue to satisfy LRU.  Also,
610	 * check to see if the entry is a whiteout; indicate this to
611	 * the componentname, if so.
612	 */
613	TAILQ_REMOVE(&ncneg, ncp, nc_dst);
614	TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
615	nchstats.ncs_neghits++;
616	if (ncp->nc_flag & NCF_WHITE)
617		cnp->cn_flags |= ISWHITEOUT;
618	SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp,
619	    nc_get_name(ncp));
620	cache_out_ts(ncp, tsp, ticksp);
621	CACHE_WUNLOCK();
622	return (ENOENT);
623
624wlock:
625	/*
626	 * We need to update the cache after our lookup, so upgrade to
627	 * a write lock and retry the operation.
628	 */
629	CACHE_RUNLOCK();
630	CACHE_WLOCK();
631	numupgrades++;
632	wlocked = 1;
633	goto retry_wlocked;
634
635success:
636	/*
637	 * On success we return a locked and ref'd vnode as per the lookup
638	 * protocol.
639	 */
640	if (dvp == *vpp) {   /* lookup on "." */
641		VREF(*vpp);
642		if (wlocked)
643			CACHE_WUNLOCK();
644		else
645			CACHE_RUNLOCK();
646		/*
647		 * When we lookup "." we still can be asked to lock it
648		 * differently...
649		 */
650		ltype = cnp->cn_lkflags & LK_TYPE_MASK;
651		if (ltype != VOP_ISLOCKED(*vpp)) {
652			if (ltype == LK_EXCLUSIVE) {
653				vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
654				if ((*vpp)->v_iflag & VI_DOOMED) {
655					/* forced unmount */
656					vrele(*vpp);
657					*vpp = NULL;
658					return (ENOENT);
659				}
660			} else
661				vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY);
662		}
663		return (-1);
664	}
665	ltype = 0;	/* silence gcc warning */
666	if (cnp->cn_flags & ISDOTDOT) {
667		ltype = VOP_ISLOCKED(dvp);
668		VOP_UNLOCK(dvp, 0);
669	}
670	VI_LOCK(*vpp);
671	if (wlocked)
672		CACHE_WUNLOCK();
673	else
674		CACHE_RUNLOCK();
675	error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread);
676	if (cnp->cn_flags & ISDOTDOT) {
677		vn_lock(dvp, ltype | LK_RETRY);
678		if (dvp->v_iflag & VI_DOOMED) {
679			if (error == 0)
680				vput(*vpp);
681			*vpp = NULL;
682			return (ENOENT);
683		}
684	}
685	if (error) {
686		*vpp = NULL;
687		goto retry;
688	}
689	if ((cnp->cn_flags & ISLASTCN) &&
690	    (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
691		ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
692	}
693	return (-1);
694
695unlock:
696	if (wlocked)
697		CACHE_WUNLOCK();
698	else
699		CACHE_RUNLOCK();
700	return (0);
701}
702
703/*
704 * Add an entry to the cache.
705 */
706void
707cache_enter_time(dvp, vp, cnp, tsp, dtsp)
708	struct vnode *dvp;
709	struct vnode *vp;
710	struct componentname *cnp;
711	struct timespec *tsp;
712	struct timespec *dtsp;
713{
714	struct namecache *ncp, *n2;
715	struct namecache_ts *n3;
716	struct nchashhead *ncpp;
717	uint32_t hash;
718	int flag;
719	int hold;
720	int zap;
721	int len;
722
723	CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
724	VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
725	    ("cache_enter: Adding a doomed vnode"));
726	VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp,
727	    ("cache_enter: Doomed vnode used as src"));
728
729	if (!doingcache)
730		return;
731
732	/*
733	 * Avoid blowout in namecache entries.
734	 */
735	if (numcache >= desiredvnodes * ncsizefactor)
736		return;
737
738	flag = 0;
739	if (cnp->cn_nameptr[0] == '.') {
740		if (cnp->cn_namelen == 1)
741			return;
742		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
743			CACHE_WLOCK();
744			/*
745			 * If dotdot entry already exists, just retarget it
746			 * to new parent vnode, otherwise continue with new
747			 * namecache entry allocation.
748			 */
749			if ((ncp = dvp->v_cache_dd) != NULL &&
750			    ncp->nc_flag & NCF_ISDOTDOT) {
751				KASSERT(ncp->nc_dvp == dvp,
752				    ("wrong isdotdot parent"));
753				if (ncp->nc_vp != NULL) {
754					TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst,
755					    ncp, nc_dst);
756				} else {
757					TAILQ_REMOVE(&ncneg, ncp, nc_dst);
758					numneg--;
759				}
760				if (vp != NULL) {
761					TAILQ_INSERT_HEAD(&vp->v_cache_dst,
762					    ncp, nc_dst);
763				} else {
764					TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
765					numneg++;
766				}
767				ncp->nc_vp = vp;
768				CACHE_WUNLOCK();
769				return;
770			}
771			dvp->v_cache_dd = NULL;
772			SDT_PROBE3(vfs, namecache, enter, done, dvp, "..", vp);
773			CACHE_WUNLOCK();
774			flag = NCF_ISDOTDOT;
775		}
776	}
777
778	hold = 0;
779	zap = 0;
780
781	/*
782	 * Calculate the hash key and setup as much of the new
783	 * namecache entry as possible before acquiring the lock.
784	 */
785	ncp = cache_alloc(cnp->cn_namelen, tsp != NULL);
786	ncp->nc_vp = vp;
787	ncp->nc_dvp = dvp;
788	ncp->nc_flag = flag;
789	if (tsp != NULL) {
790		n3 = (struct namecache_ts *)ncp;
791		n3->nc_time = *tsp;
792		n3->nc_ticks = ticks;
793		n3->nc_flag |= NCF_TS;
794		if (dtsp != NULL) {
795			n3->nc_dotdottime = *dtsp;
796			n3->nc_flag |= NCF_DTS;
797		}
798	}
799	len = ncp->nc_nlen = cnp->cn_namelen;
800	hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT);
801	strlcpy(nc_get_name(ncp), cnp->cn_nameptr, len + 1);
802	hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
803	CACHE_WLOCK();
804
805	/*
806	 * See if this vnode or negative entry is already in the cache
807	 * with this name.  This can happen with concurrent lookups of
808	 * the same path name.
809	 */
810	ncpp = NCHHASH(hash);
811	LIST_FOREACH(n2, ncpp, nc_hash) {
812		if (n2->nc_dvp == dvp &&
813		    n2->nc_nlen == cnp->cn_namelen &&
814		    !bcmp(nc_get_name(n2), cnp->cn_nameptr, n2->nc_nlen)) {
815			if (tsp != NULL) {
816				KASSERT((n2->nc_flag & NCF_TS) != 0,
817				    ("no NCF_TS"));
818				n3 = (struct namecache_ts *)n2;
819				n3->nc_time =
820				    ((struct namecache_ts *)ncp)->nc_time;
821				n3->nc_ticks =
822				    ((struct namecache_ts *)ncp)->nc_ticks;
823				if (dtsp != NULL) {
824					n3->nc_dotdottime =
825					    ((struct namecache_ts *)ncp)->
826					    nc_dotdottime;
827					n3->nc_flag |= NCF_DTS;
828				}
829			}
830			CACHE_WUNLOCK();
831			cache_free(ncp);
832			return;
833		}
834	}
835
836	if (flag == NCF_ISDOTDOT) {
837		/*
838		 * See if we are trying to add .. entry, but some other lookup
839		 * has populated v_cache_dd pointer already.
840		 */
841		if (dvp->v_cache_dd != NULL) {
842		    CACHE_WUNLOCK();
843		    cache_free(ncp);
844		    return;
845		}
846		KASSERT(vp == NULL || vp->v_type == VDIR,
847		    ("wrong vnode type %p", vp));
848		dvp->v_cache_dd = ncp;
849	}
850
851	numcache++;
852	if (!vp) {
853		numneg++;
854		if (cnp->cn_flags & ISWHITEOUT)
855			ncp->nc_flag |= NCF_WHITE;
856	} else if (vp->v_type == VDIR) {
857		if (flag != NCF_ISDOTDOT) {
858			/*
859			 * For this case, the cache entry maps both the
860			 * directory name in it and the name ".." for the
861			 * directory's parent.
862			 */
863			if ((n2 = vp->v_cache_dd) != NULL &&
864			    (n2->nc_flag & NCF_ISDOTDOT) != 0)
865				cache_zap(n2);
866			vp->v_cache_dd = ncp;
867		}
868	} else {
869		vp->v_cache_dd = NULL;
870	}
871
872	/*
873	 * Insert the new namecache entry into the appropriate chain
874	 * within the cache entries table.
875	 */
876	LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
877	if (flag != NCF_ISDOTDOT) {
878		if (LIST_EMPTY(&dvp->v_cache_src)) {
879			hold = 1;
880			numcachehv++;
881		}
882		LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
883	}
884
885	/*
886	 * If the entry is "negative", we place it into the
887	 * "negative" cache queue, otherwise, we place it into the
888	 * destination vnode's cache entries queue.
889	 */
890	if (vp) {
891		TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
892		SDT_PROBE3(vfs, namecache, enter, done, dvp, nc_get_name(ncp),
893		    vp);
894	} else {
895		TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
896		SDT_PROBE2(vfs, namecache, enter_negative, done, dvp,
897		    nc_get_name(ncp));
898	}
899	if (numneg * ncnegfactor > numcache) {
900		ncp = TAILQ_FIRST(&ncneg);
901		KASSERT(ncp->nc_vp == NULL, ("ncp %p vp %p on ncneg",
902		    ncp, ncp->nc_vp));
903		zap = 1;
904	}
905	if (hold)
906		vhold(dvp);
907	if (zap)
908		cache_zap(ncp);
909	CACHE_WUNLOCK();
910}
911
912/*
913 * Name cache initialization, from vfs_init() when we are booting
914 */
915static void
916nchinit(void *dummy __unused)
917{
918
919	TAILQ_INIT(&ncneg);
920
921	cache_zone_small = uma_zcreate("S VFS Cache",
922	    sizeof(struct namecache) + CACHE_PATH_CUTOFF + 1,
923	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
924	cache_zone_small_ts = uma_zcreate("STS VFS Cache",
925	    sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1,
926	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
927	cache_zone_large = uma_zcreate("L VFS Cache",
928	    sizeof(struct namecache) + NAME_MAX + 1,
929	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
930	cache_zone_large_ts = uma_zcreate("LTS VFS Cache",
931	    sizeof(struct namecache_ts) + NAME_MAX + 1,
932	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
933
934	nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
935}
936SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL);
937
938void
939cache_changesize(int newmaxvnodes)
940{
941	struct nchashhead *new_nchashtbl, *old_nchashtbl;
942	u_long new_nchash, old_nchash;
943	struct namecache *ncp;
944	uint32_t hash;
945	int i;
946
947	new_nchashtbl = hashinit(newmaxvnodes * 2, M_VFSCACHE, &new_nchash);
948	/* If same hash table size, nothing to do */
949	if (nchash == new_nchash) {
950		free(new_nchashtbl, M_VFSCACHE);
951		return;
952	}
953	/*
954	 * Move everything from the old hash table to the new table.
955	 * None of the namecache entries in the table can be removed
956	 * because to do so, they have to be removed from the hash table.
957	 */
958	CACHE_WLOCK();
959	old_nchashtbl = nchashtbl;
960	old_nchash = nchash;
961	nchashtbl = new_nchashtbl;
962	nchash = new_nchash;
963	for (i = 0; i <= old_nchash; i++) {
964		while ((ncp = LIST_FIRST(&old_nchashtbl[i])) != NULL) {
965			hash = fnv_32_buf(nc_get_name(ncp), ncp->nc_nlen,
966			    FNV1_32_INIT);
967			hash = fnv_32_buf(&ncp->nc_dvp, sizeof(ncp->nc_dvp),
968			    hash);
969			LIST_REMOVE(ncp, nc_hash);
970			LIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash);
971		}
972	}
973	CACHE_WUNLOCK();
974	free(old_nchashtbl, M_VFSCACHE);
975}
976
977/*
978 * Invalidate all entries to a particular vnode.
979 */
980void
981cache_purge(vp)
982	struct vnode *vp;
983{
984
985	CTR1(KTR_VFS, "cache_purge(%p)", vp);
986	SDT_PROBE1(vfs, namecache, purge, done, vp);
987	CACHE_WLOCK();
988	while (!LIST_EMPTY(&vp->v_cache_src))
989		cache_zap(LIST_FIRST(&vp->v_cache_src));
990	while (!TAILQ_EMPTY(&vp->v_cache_dst))
991		cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
992	if (vp->v_cache_dd != NULL) {
993		KASSERT(vp->v_cache_dd->nc_flag & NCF_ISDOTDOT,
994		   ("lost dotdot link"));
995		cache_zap(vp->v_cache_dd);
996	}
997	KASSERT(vp->v_cache_dd == NULL, ("incomplete purge"));
998	CACHE_WUNLOCK();
999}
1000
1001/*
1002 * Invalidate all negative entries for a particular directory vnode.
1003 */
1004void
1005cache_purge_negative(vp)
1006	struct vnode *vp;
1007{
1008	struct namecache *cp, *ncp;
1009
1010	CTR1(KTR_VFS, "cache_purge_negative(%p)", vp);
1011	SDT_PROBE1(vfs, namecache, purge_negative, done, vp);
1012	CACHE_WLOCK();
1013	LIST_FOREACH_SAFE(cp, &vp->v_cache_src, nc_src, ncp) {
1014		if (cp->nc_vp == NULL)
1015			cache_zap(cp);
1016	}
1017	CACHE_WUNLOCK();
1018}
1019
1020/*
1021 * Flush all entries referencing a particular filesystem.
1022 */
1023void
1024cache_purgevfs(mp)
1025	struct mount *mp;
1026{
1027	struct nchashhead *ncpp;
1028	struct namecache *ncp, *nnp;
1029
1030	/* Scan hash tables for applicable entries */
1031	SDT_PROBE1(vfs, namecache, purgevfs, done, mp);
1032	CACHE_WLOCK();
1033	for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
1034		LIST_FOREACH_SAFE(ncp, ncpp, nc_hash, nnp) {
1035			if (ncp->nc_dvp->v_mount == mp)
1036				cache_zap(ncp);
1037		}
1038	}
1039	CACHE_WUNLOCK();
1040}
1041
1042/*
1043 * Perform canonical checks and cache lookup and pass on to filesystem
1044 * through the vop_cachedlookup only if needed.
1045 */
1046
1047int
1048vfs_cache_lookup(ap)
1049	struct vop_lookup_args /* {
1050		struct vnode *a_dvp;
1051		struct vnode **a_vpp;
1052		struct componentname *a_cnp;
1053	} */ *ap;
1054{
1055	struct vnode *dvp;
1056	int error;
1057	struct vnode **vpp = ap->a_vpp;
1058	struct componentname *cnp = ap->a_cnp;
1059	struct ucred *cred = cnp->cn_cred;
1060	int flags = cnp->cn_flags;
1061	struct thread *td = cnp->cn_thread;
1062
1063	*vpp = NULL;
1064	dvp = ap->a_dvp;
1065
1066	if (dvp->v_type != VDIR)
1067		return (ENOTDIR);
1068
1069	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
1070	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
1071		return (EROFS);
1072
1073	error = VOP_ACCESS(dvp, VEXEC, cred, td);
1074	if (error)
1075		return (error);
1076
1077	error = cache_lookup(dvp, vpp, cnp, NULL, NULL);
1078	if (error == 0)
1079		return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
1080	if (error == -1)
1081		return (0);
1082	return (error);
1083}
1084
1085/*
1086 * XXX All of these sysctls would probably be more productive dead.
1087 */
1088static int disablecwd;
1089SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
1090   "Disable the getcwd syscall");
1091
1092/* Implementation of the getcwd syscall. */
1093int
1094sys___getcwd(td, uap)
1095	struct thread *td;
1096	struct __getcwd_args *uap;
1097{
1098
1099	return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen));
1100}
1101
1102int
1103kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, u_int buflen)
1104{
1105	char *bp, *tmpbuf;
1106	struct filedesc *fdp;
1107	struct vnode *cdir, *rdir;
1108	int error;
1109
1110	if (disablecwd)
1111		return (ENODEV);
1112	if (buflen < 2)
1113		return (EINVAL);
1114	if (buflen > MAXPATHLEN)
1115		buflen = MAXPATHLEN;
1116
1117	tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
1118	fdp = td->td_proc->p_fd;
1119	FILEDESC_SLOCK(fdp);
1120	cdir = fdp->fd_cdir;
1121	VREF(cdir);
1122	rdir = fdp->fd_rdir;
1123	VREF(rdir);
1124	FILEDESC_SUNLOCK(fdp);
1125	error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen);
1126	vrele(rdir);
1127	vrele(cdir);
1128
1129	if (!error) {
1130		if (bufseg == UIO_SYSSPACE)
1131			bcopy(bp, buf, strlen(bp) + 1);
1132		else
1133			error = copyout(bp, buf, strlen(bp) + 1);
1134#ifdef KTRACE
1135	if (KTRPOINT(curthread, KTR_NAMEI))
1136		ktrnamei(bp);
1137#endif
1138	}
1139	free(tmpbuf, M_TEMP);
1140	return (error);
1141}
1142
1143/*
1144 * Thus begins the fullpath magic.
1145 */
1146
1147#undef STATNODE
1148#define STATNODE(name, descr)						\
1149	static u_int name;						\
1150	SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr)
1151
1152static int disablefullpath;
1153SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
1154    "Disable the vn_fullpath function");
1155
1156/* These count for kern___getcwd(), too. */
1157STATNODE(numfullpathcalls, "Number of fullpath search calls");
1158STATNODE(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)");
1159STATNODE(numfullpathfail2,
1160    "Number of fullpath search errors (VOP_VPTOCNP failures)");
1161STATNODE(numfullpathfail4, "Number of fullpath search errors (ENOMEM)");
1162STATNODE(numfullpathfound, "Number of successful fullpath calls");
1163
1164/*
1165 * Retrieve the full filesystem path that correspond to a vnode from the name
1166 * cache (if available)
1167 */
1168int
1169vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
1170{
1171	char *buf;
1172	struct filedesc *fdp;
1173	struct vnode *rdir;
1174	int error;
1175
1176	if (disablefullpath)
1177		return (ENODEV);
1178	if (vn == NULL)
1179		return (EINVAL);
1180
1181	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1182	fdp = td->td_proc->p_fd;
1183	FILEDESC_SLOCK(fdp);
1184	rdir = fdp->fd_rdir;
1185	VREF(rdir);
1186	FILEDESC_SUNLOCK(fdp);
1187	error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN);
1188	vrele(rdir);
1189
1190	if (!error)
1191		*freebuf = buf;
1192	else
1193		free(buf, M_TEMP);
1194	return (error);
1195}
1196
1197/*
1198 * This function is similar to vn_fullpath, but it attempts to lookup the
1199 * pathname relative to the global root mount point.  This is required for the
1200 * auditing sub-system, as audited pathnames must be absolute, relative to the
1201 * global root mount point.
1202 */
1203int
1204vn_fullpath_global(struct thread *td, struct vnode *vn,
1205    char **retbuf, char **freebuf)
1206{
1207	char *buf;
1208	int error;
1209
1210	if (disablefullpath)
1211		return (ENODEV);
1212	if (vn == NULL)
1213		return (EINVAL);
1214	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1215	error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN);
1216	if (!error)
1217		*freebuf = buf;
1218	else
1219		free(buf, M_TEMP);
1220	return (error);
1221}
1222
1223int
1224vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen)
1225{
1226	int error;
1227
1228	CACHE_RLOCK();
1229	error = vn_vptocnp_locked(vp, cred, buf, buflen);
1230	if (error == 0)
1231		CACHE_RUNLOCK();
1232	return (error);
1233}
1234
1235static int
1236vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
1237    u_int *buflen)
1238{
1239	struct vnode *dvp;
1240	struct namecache *ncp;
1241	int error;
1242
1243	TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) {
1244		if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1245			break;
1246	}
1247	if (ncp != NULL) {
1248		if (*buflen < ncp->nc_nlen) {
1249			CACHE_RUNLOCK();
1250			vrele(*vp);
1251			numfullpathfail4++;
1252			error = ENOMEM;
1253			SDT_PROBE3(vfs, namecache, fullpath, return, error,
1254			    vp, NULL);
1255			return (error);
1256		}
1257		*buflen -= ncp->nc_nlen;
1258		memcpy(buf + *buflen, nc_get_name(ncp), ncp->nc_nlen);
1259		SDT_PROBE3(vfs, namecache, fullpath, hit, ncp->nc_dvp,
1260		    nc_get_name(ncp), vp);
1261		dvp = *vp;
1262		*vp = ncp->nc_dvp;
1263		vref(*vp);
1264		CACHE_RUNLOCK();
1265		vrele(dvp);
1266		CACHE_RLOCK();
1267		return (0);
1268	}
1269	SDT_PROBE1(vfs, namecache, fullpath, miss, vp);
1270
1271	CACHE_RUNLOCK();
1272	vn_lock(*vp, LK_SHARED | LK_RETRY);
1273	error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen);
1274	vput(*vp);
1275	if (error) {
1276		numfullpathfail2++;
1277		SDT_PROBE3(vfs, namecache, fullpath, return,  error, vp, NULL);
1278		return (error);
1279	}
1280
1281	*vp = dvp;
1282	CACHE_RLOCK();
1283	if (dvp->v_iflag & VI_DOOMED) {
1284		/* forced unmount */
1285		CACHE_RUNLOCK();
1286		vrele(dvp);
1287		error = ENOENT;
1288		SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL);
1289		return (error);
1290	}
1291	/*
1292	 * *vp has its use count incremented still.
1293	 */
1294
1295	return (0);
1296}
1297
1298/*
1299 * The magic behind kern___getcwd() and vn_fullpath().
1300 */
1301static int
1302vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
1303    char *buf, char **retbuf, u_int buflen)
1304{
1305	int error, slash_prefixed;
1306#ifdef KDTRACE_HOOKS
1307	struct vnode *startvp = vp;
1308#endif
1309	struct vnode *vp1;
1310
1311	buflen--;
1312	buf[buflen] = '\0';
1313	error = 0;
1314	slash_prefixed = 0;
1315
1316	SDT_PROBE1(vfs, namecache, fullpath, entry, vp);
1317	numfullpathcalls++;
1318	vref(vp);
1319	CACHE_RLOCK();
1320	if (vp->v_type != VDIR) {
1321		error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen);
1322		if (error)
1323			return (error);
1324		if (buflen == 0) {
1325			CACHE_RUNLOCK();
1326			vrele(vp);
1327			return (ENOMEM);
1328		}
1329		buf[--buflen] = '/';
1330		slash_prefixed = 1;
1331	}
1332	while (vp != rdir && vp != rootvnode) {
1333		if (vp->v_vflag & VV_ROOT) {
1334			if (vp->v_iflag & VI_DOOMED) {	/* forced unmount */
1335				CACHE_RUNLOCK();
1336				vrele(vp);
1337				error = ENOENT;
1338				SDT_PROBE3(vfs, namecache, fullpath, return,
1339				    error, vp, NULL);
1340				break;
1341			}
1342			vp1 = vp->v_mount->mnt_vnodecovered;
1343			vref(vp1);
1344			CACHE_RUNLOCK();
1345			vrele(vp);
1346			vp = vp1;
1347			CACHE_RLOCK();
1348			continue;
1349		}
1350		if (vp->v_type != VDIR) {
1351			CACHE_RUNLOCK();
1352			vrele(vp);
1353			numfullpathfail1++;
1354			error = ENOTDIR;
1355			SDT_PROBE3(vfs, namecache, fullpath, return,
1356			    error, vp, NULL);
1357			break;
1358		}
1359		error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen);
1360		if (error)
1361			break;
1362		if (buflen == 0) {
1363			CACHE_RUNLOCK();
1364			vrele(vp);
1365			error = ENOMEM;
1366			SDT_PROBE3(vfs, namecache, fullpath, return, error,
1367			    startvp, NULL);
1368			break;
1369		}
1370		buf[--buflen] = '/';
1371		slash_prefixed = 1;
1372	}
1373	if (error)
1374		return (error);
1375	if (!slash_prefixed) {
1376		if (buflen == 0) {
1377			CACHE_RUNLOCK();
1378			vrele(vp);
1379			numfullpathfail4++;
1380			SDT_PROBE3(vfs, namecache, fullpath, return, ENOMEM,
1381			    startvp, NULL);
1382			return (ENOMEM);
1383		}
1384		buf[--buflen] = '/';
1385	}
1386	numfullpathfound++;
1387	CACHE_RUNLOCK();
1388	vrele(vp);
1389
1390	SDT_PROBE3(vfs, namecache, fullpath, return, 0, startvp, buf + buflen);
1391	*retbuf = buf + buflen;
1392	return (0);
1393}
1394
1395struct vnode *
1396vn_dir_dd_ino(struct vnode *vp)
1397{
1398	struct namecache *ncp;
1399	struct vnode *ddvp;
1400
1401	ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino");
1402	CACHE_RLOCK();
1403	TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) {
1404		if ((ncp->nc_flag & NCF_ISDOTDOT) != 0)
1405			continue;
1406		ddvp = ncp->nc_dvp;
1407		VI_LOCK(ddvp);
1408		CACHE_RUNLOCK();
1409		if (vget(ddvp, LK_INTERLOCK | LK_SHARED | LK_NOWAIT, curthread))
1410			return (NULL);
1411		return (ddvp);
1412	}
1413	CACHE_RUNLOCK();
1414	return (NULL);
1415}
1416
1417int
1418vn_commname(struct vnode *vp, char *buf, u_int buflen)
1419{
1420	struct namecache *ncp;
1421	int l;
1422
1423	CACHE_RLOCK();
1424	TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst)
1425		if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1426			break;
1427	if (ncp == NULL) {
1428		CACHE_RUNLOCK();
1429		return (ENOENT);
1430	}
1431	l = min(ncp->nc_nlen, buflen - 1);
1432	memcpy(buf, nc_get_name(ncp), l);
1433	CACHE_RUNLOCK();
1434	buf[l] = '\0';
1435	return (0);
1436}
1437
1438/* ABI compat shims for old kernel modules. */
1439#undef cache_enter
1440
1441void	cache_enter(struct vnode *dvp, struct vnode *vp,
1442	    struct componentname *cnp);
1443
1444void
1445cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
1446{
1447
1448	cache_enter_time(dvp, vp, cnp, NULL, NULL);
1449}
1450
1451/*
1452 * This function updates path string to vnode's full global path
1453 * and checks the size of the new path string against the pathlen argument.
1454 *
1455 * Requires a locked, referenced vnode and GIANT lock held.
1456 * Vnode is re-locked on success or ENODEV, otherwise unlocked.
1457 *
1458 * If sysctl debug.disablefullpath is set, ENODEV is returned,
1459 * vnode is left locked and path remain untouched.
1460 *
1461 * If vp is a directory, the call to vn_fullpath_global() always succeeds
1462 * because it falls back to the ".." lookup if the namecache lookup fails.
1463 */
1464int
1465vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path,
1466    u_int pathlen)
1467{
1468	struct nameidata nd;
1469	struct vnode *vp1;
1470	char *rpath, *fbuf;
1471	int error;
1472
1473	ASSERT_VOP_ELOCKED(vp, __func__);
1474
1475	/* Return ENODEV if sysctl debug.disablefullpath==1 */
1476	if (disablefullpath)
1477		return (ENODEV);
1478
1479	/* Construct global filesystem path from vp. */
1480	VOP_UNLOCK(vp, 0);
1481	error = vn_fullpath_global(td, vp, &rpath, &fbuf);
1482
1483	if (error != 0) {
1484		vrele(vp);
1485		return (error);
1486	}
1487
1488	if (strlen(rpath) >= pathlen) {
1489		vrele(vp);
1490		error = ENAMETOOLONG;
1491		goto out;
1492	}
1493
1494	/*
1495	 * Re-lookup the vnode by path to detect a possible rename.
1496	 * As a side effect, the vnode is relocked.
1497	 * If vnode was renamed, return ENOENT.
1498	 */
1499	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
1500	    UIO_SYSSPACE, path, td);
1501	error = namei(&nd);
1502	if (error != 0) {
1503		vrele(vp);
1504		goto out;
1505	}
1506	NDFREE(&nd, NDF_ONLY_PNBUF);
1507	vp1 = nd.ni_vp;
1508	vrele(vp);
1509	if (vp1 == vp)
1510		strcpy(path, rpath);
1511	else {
1512		vput(vp1);
1513		error = ENOENT;
1514	}
1515
1516out:
1517	free(fbuf, M_TEMP);
1518	return (error);
1519}
1520