1/*-
2 * Copyright (c) 1989, 1991, 1993, 1995
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)nfs_socket.c	8.5 (Berkeley) 3/30/95
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD$");
37
38/*
39 * Socket operations for use by nfs
40 */
41
42#include "opt_inet6.h"
43#include "opt_kdtrace.h"
44#include "opt_kgssapi.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/kernel.h>
49#include <sys/limits.h>
50#include <sys/lock.h>
51#include <sys/malloc.h>
52#include <sys/mbuf.h>
53#include <sys/mount.h>
54#include <sys/mutex.h>
55#include <sys/proc.h>
56#include <sys/signalvar.h>
57#include <sys/syscallsubr.h>
58#include <sys/sysctl.h>
59#include <sys/syslog.h>
60#include <sys/vnode.h>
61
62#include <rpc/rpc.h>
63
64#include <nfs/nfsproto.h>
65#include <nfsclient/nfs.h>
66#include <nfs/xdr_subs.h>
67#include <nfsclient/nfsm_subs.h>
68#include <nfsclient/nfsmount.h>
69#include <nfsclient/nfsnode.h>
70
71#ifdef KDTRACE_HOOKS
72#include <sys/dtrace_bsd.h>
73
74dtrace_nfsclient_nfs23_start_probe_func_t
75    dtrace_nfsclient_nfs23_start_probe;
76
77dtrace_nfsclient_nfs23_done_probe_func_t
78    dtrace_nfsclient_nfs23_done_probe;
79
80/*
81 * Registered probes by RPC type.
82 */
83uint32_t	nfsclient_nfs2_start_probes[NFS_NPROCS];
84uint32_t	nfsclient_nfs2_done_probes[NFS_NPROCS];
85
86uint32_t	nfsclient_nfs3_start_probes[NFS_NPROCS];
87uint32_t	nfsclient_nfs3_done_probes[NFS_NPROCS];
88#endif
89
90static int	nfs_bufpackets = 4;
91static int	nfs_reconnects;
92static int	nfs3_jukebox_delay = 10;
93static int	nfs_skip_wcc_data_onerr = 1;
94static int	fake_wchan;
95
96SYSCTL_DECL(_vfs_oldnfs);
97
98SYSCTL_INT(_vfs_oldnfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0,
99    "Buffer reservation size 2 < x < 64");
100SYSCTL_INT(_vfs_oldnfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
101    "Number of times the nfs client has had to reconnect");
102SYSCTL_INT(_vfs_oldnfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW,
103    &nfs3_jukebox_delay, 0,
104    "Number of seconds to delay a retry after receiving EJUKEBOX");
105SYSCTL_INT(_vfs_oldnfs, OID_AUTO, skip_wcc_data_onerr, CTLFLAG_RW,
106    &nfs_skip_wcc_data_onerr, 0,
107    "Disable weak cache consistency checking when server returns an error");
108
109static void	nfs_down(struct nfsmount *, struct thread *, const char *,
110    int, int);
111static void	nfs_up(struct nfsmount *, struct thread *, const char *,
112    int, int);
113static int	nfs_msg(struct thread *, const char *, const char *, int);
114
115extern int nfsv2_procid[];
116
117struct nfs_cached_auth {
118	int		ca_refs; /* refcount, including 1 from the cache */
119	uid_t		ca_uid;	 /* uid that corresponds to this auth */
120	AUTH		*ca_auth; /* RPC auth handle */
121};
122
123/*
124 * RTT estimator
125 */
126
127static enum nfs_rto_timer_t nfs_proct[NFS_NPROCS] = {
128	NFS_DEFAULT_TIMER,	/* NULL */
129	NFS_GETATTR_TIMER,	/* GETATTR */
130	NFS_DEFAULT_TIMER,	/* SETATTR */
131	NFS_LOOKUP_TIMER,	/* LOOKUP */
132	NFS_GETATTR_TIMER,	/* ACCESS */
133	NFS_READ_TIMER,		/* READLINK */
134	NFS_READ_TIMER,		/* READ */
135	NFS_WRITE_TIMER,	/* WRITE */
136	NFS_DEFAULT_TIMER,	/* CREATE */
137	NFS_DEFAULT_TIMER,	/* MKDIR */
138	NFS_DEFAULT_TIMER,	/* SYMLINK */
139	NFS_DEFAULT_TIMER,	/* MKNOD */
140	NFS_DEFAULT_TIMER,	/* REMOVE */
141	NFS_DEFAULT_TIMER,	/* RMDIR */
142	NFS_DEFAULT_TIMER,	/* RENAME */
143	NFS_DEFAULT_TIMER,	/* LINK */
144	NFS_READ_TIMER,		/* READDIR */
145	NFS_READ_TIMER,		/* READDIRPLUS */
146	NFS_DEFAULT_TIMER,	/* FSSTAT */
147	NFS_DEFAULT_TIMER,	/* FSINFO */
148	NFS_DEFAULT_TIMER,	/* PATHCONF */
149	NFS_DEFAULT_TIMER,	/* COMMIT */
150	NFS_DEFAULT_TIMER,	/* NOOP */
151};
152
153/*
154 * Choose the correct RTT timer for this NFS procedure.
155 */
156static inline enum nfs_rto_timer_t
157nfs_rto_timer(u_int32_t procnum)
158{
159
160	return (nfs_proct[procnum]);
161}
162
163/*
164 * Initialize the RTT estimator state for a new mount point.
165 */
166static void
167nfs_init_rtt(struct nfsmount *nmp)
168{
169	int i;
170
171	for (i = 0; i < NFS_MAX_TIMER; i++) {
172		nmp->nm_timers[i].rt_srtt = hz;
173		nmp->nm_timers[i].rt_deviate = 0;
174		nmp->nm_timers[i].rt_rtxcur = hz;
175	}
176}
177
178/*
179 * Initialize sockets and congestion for a new NFS connection.
180 * We do not free the sockaddr if error.
181 */
182int
183nfs_connect(struct nfsmount *nmp)
184{
185	int rcvreserve, sndreserve;
186	int pktscale;
187	struct sockaddr *saddr;
188	struct ucred *origcred;
189	struct thread *td = curthread;
190	CLIENT *client;
191	struct netconfig *nconf;
192	rpcvers_t vers;
193	int one = 1, retries;
194	struct timeval timo;
195
196	/*
197	 * We need to establish the socket using the credentials of
198	 * the mountpoint.  Some parts of this process (such as
199	 * sobind() and soconnect()) will use the curent thread's
200	 * credential instead of the socket credential.  To work
201	 * around this, temporarily change the current thread's
202	 * credential to that of the mountpoint.
203	 *
204	 * XXX: It would be better to explicitly pass the correct
205	 * credential to sobind() and soconnect().
206	 */
207	origcred = td->td_ucred;
208	td->td_ucred = nmp->nm_mountp->mnt_cred;
209	saddr = nmp->nm_nam;
210
211	vers = NFS_VER2;
212	if (nmp->nm_flag & NFSMNT_NFSV3)
213		vers = NFS_VER3;
214	else if (nmp->nm_flag & NFSMNT_NFSV4)
215		vers = NFS_VER4;
216	if (saddr->sa_family == AF_INET)
217		if (nmp->nm_sotype == SOCK_DGRAM)
218			nconf = getnetconfigent("udp");
219		else
220			nconf = getnetconfigent("tcp");
221	else
222		if (nmp->nm_sotype == SOCK_DGRAM)
223			nconf = getnetconfigent("udp6");
224		else
225			nconf = getnetconfigent("tcp6");
226
227	/*
228	 * Get buffer reservation size from sysctl, but impose reasonable
229	 * limits.
230	 */
231	pktscale = nfs_bufpackets;
232	if (pktscale < 2)
233		pktscale = 2;
234	if (pktscale > 64)
235		pktscale = 64;
236	mtx_lock(&nmp->nm_mtx);
237	if (nmp->nm_sotype == SOCK_DGRAM) {
238		sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
239		rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
240		    NFS_MAXPKTHDR) * pktscale;
241	} else if (nmp->nm_sotype == SOCK_SEQPACKET) {
242		sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
243		rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
244		    NFS_MAXPKTHDR) * pktscale;
245	} else {
246		if (nmp->nm_sotype != SOCK_STREAM)
247			panic("nfscon sotype");
248		sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR +
249		    sizeof (u_int32_t)) * pktscale;
250		rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR +
251		    sizeof (u_int32_t)) * pktscale;
252	}
253	mtx_unlock(&nmp->nm_mtx);
254
255	client = clnt_reconnect_create(nconf, saddr, NFS_PROG, vers,
256	    sndreserve, rcvreserve);
257	CLNT_CONTROL(client, CLSET_WAITCHAN, "nfsreq");
258	if (nmp->nm_flag & NFSMNT_INT)
259		CLNT_CONTROL(client, CLSET_INTERRUPTIBLE, &one);
260	if (nmp->nm_flag & NFSMNT_RESVPORT)
261		CLNT_CONTROL(client, CLSET_PRIVPORT, &one);
262	if ((nmp->nm_flag & NFSMNT_SOFT) != 0) {
263		if (nmp->nm_sotype == SOCK_DGRAM)
264			/*
265			 * For UDP, the large timeout for a reconnect will
266			 * be set to "nm_retry * nm_timeo / 2", so we only
267			 * want to do 2 reconnect timeout retries.
268			 */
269			retries = 2;
270		else
271			retries = nmp->nm_retry;
272	} else
273		retries = INT_MAX;
274	CLNT_CONTROL(client, CLSET_RETRIES, &retries);
275
276	/*
277	 * For UDP, there are 2 timeouts:
278	 * - CLSET_RETRY_TIMEOUT sets the initial timeout for the timer
279	 *   that does a retransmit of an RPC request using the same socket
280	 *   and xid. This is what you normally want to do, since NFS
281	 *   servers depend on "same xid" for their Duplicate Request Cache.
282	 * - timeout specified in CLNT_CALL_MBUF(), which specifies when
283	 *   retransmits on the same socket should fail and a fresh socket
284	 *   created. Each of these timeouts counts as one CLSET_RETRIES,
285	 *   as set above.
286	 * Set the initial retransmit timeout for UDP. This timeout doesn't
287	 * exist for TCP and the following call just fails, which is ok.
288	 */
289	timo.tv_sec = nmp->nm_timeo / NFS_HZ;
290	timo.tv_usec = (nmp->nm_timeo % NFS_HZ) * 1000000 / NFS_HZ;
291	CLNT_CONTROL(client, CLSET_RETRY_TIMEOUT, &timo);
292
293	mtx_lock(&nmp->nm_mtx);
294	if (nmp->nm_client) {
295		/*
296		 * Someone else already connected.
297		 */
298		CLNT_RELEASE(client);
299	} else
300		nmp->nm_client = client;
301
302	/*
303	 * Protocols that do not require connections may be optionally left
304	 * unconnected for servers that reply from a port other than NFS_PORT.
305	 */
306	if (!(nmp->nm_flag & NFSMNT_NOCONN)) {
307		mtx_unlock(&nmp->nm_mtx);
308		CLNT_CONTROL(client, CLSET_CONNECT, &one);
309	} else
310		mtx_unlock(&nmp->nm_mtx);
311
312	/* Restore current thread's credentials. */
313	td->td_ucred = origcred;
314
315	mtx_lock(&nmp->nm_mtx);
316	/* Initialize other non-zero congestion variables. */
317	nfs_init_rtt(nmp);
318	mtx_unlock(&nmp->nm_mtx);
319	return (0);
320}
321
322/*
323 * NFS disconnect.  Clean up and unlink.
324 */
325void
326nfs_disconnect(struct nfsmount *nmp)
327{
328	CLIENT *client;
329
330	mtx_lock(&nmp->nm_mtx);
331	if (nmp->nm_client) {
332		client = nmp->nm_client;
333		nmp->nm_client = NULL;
334		mtx_unlock(&nmp->nm_mtx);
335		rpc_gss_secpurge_call(client);
336		CLNT_CLOSE(client);
337		CLNT_RELEASE(client);
338	} else
339		mtx_unlock(&nmp->nm_mtx);
340}
341
342void
343nfs_safedisconnect(struct nfsmount *nmp)
344{
345
346	nfs_disconnect(nmp);
347}
348
349static AUTH *
350nfs_getauth(struct nfsmount *nmp, struct ucred *cred)
351{
352	rpc_gss_service_t svc;
353	AUTH *auth;
354
355	switch (nmp->nm_secflavor) {
356	case RPCSEC_GSS_KRB5:
357	case RPCSEC_GSS_KRB5I:
358	case RPCSEC_GSS_KRB5P:
359		if (!nmp->nm_mech_oid)
360			if (!rpc_gss_mech_to_oid_call("kerberosv5",
361			    &nmp->nm_mech_oid))
362				return (NULL);
363		if (nmp->nm_secflavor == RPCSEC_GSS_KRB5)
364			svc = rpc_gss_svc_none;
365		else if (nmp->nm_secflavor == RPCSEC_GSS_KRB5I)
366			svc = rpc_gss_svc_integrity;
367		else
368			svc = rpc_gss_svc_privacy;
369		auth = rpc_gss_secfind_call(nmp->nm_client, cred,
370		    nmp->nm_principal, nmp->nm_mech_oid, svc);
371		if (auth)
372			return (auth);
373		/* fallthrough */
374	case AUTH_SYS:
375	default:
376		return (authunix_create(cred));
377
378	}
379}
380
381/*
382 * Callback from the RPC code to generate up/down notifications.
383 */
384
385struct nfs_feedback_arg {
386	struct nfsmount *nf_mount;
387	int		nf_lastmsg;	/* last tprintf */
388	int		nf_tprintfmsg;
389	struct thread	*nf_td;
390};
391
392static void
393nfs_feedback(int type, int proc, void *arg)
394{
395	struct nfs_feedback_arg *nf = (struct nfs_feedback_arg *) arg;
396	struct nfsmount *nmp = nf->nf_mount;
397	time_t now;
398
399	switch (type) {
400	case FEEDBACK_REXMIT2:
401	case FEEDBACK_RECONNECT:
402		now = time_uptime;
403		if (nf->nf_lastmsg + nmp->nm_tprintf_delay < now) {
404			nfs_down(nmp, nf->nf_td,
405			    "not responding", 0, NFSSTA_TIMEO);
406			nf->nf_tprintfmsg = TRUE;
407			nf->nf_lastmsg = now;
408		}
409		break;
410
411	case FEEDBACK_OK:
412		nfs_up(nf->nf_mount, nf->nf_td,
413		    "is alive again", NFSSTA_TIMEO, nf->nf_tprintfmsg);
414		break;
415	}
416}
417
418/*
419 * nfs_request - goes something like this
420 *	- fill in request struct
421 *	- links it into list
422 *	- calls nfs_send() for first transmit
423 *	- calls nfs_receive() to get reply
424 *	- break down rpc header and return with nfs reply pointed to
425 *	  by mrep or error
426 * nb: always frees up mreq mbuf list
427 */
428int
429nfs_request(struct vnode *vp, struct mbuf *mreq, int procnum,
430    struct thread *td, struct ucred *cred, struct mbuf **mrp,
431    struct mbuf **mdp, caddr_t *dposp)
432{
433	struct mbuf *mrep;
434	u_int32_t *tl;
435	struct nfsmount *nmp;
436	struct mbuf *md;
437	time_t waituntil;
438	caddr_t dpos;
439	int error = 0, timeo;
440	AUTH *auth = NULL;
441	enum nfs_rto_timer_t timer;
442	struct nfs_feedback_arg nf;
443	struct rpc_callextra ext;
444	enum clnt_stat stat;
445	struct timeval timo;
446
447	/* Reject requests while attempting a forced unmount. */
448	if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
449		m_freem(mreq);
450		return (ESTALE);
451	}
452	nmp = VFSTONFS(vp->v_mount);
453	bzero(&nf, sizeof(struct nfs_feedback_arg));
454	nf.nf_mount = nmp;
455	nf.nf_td = td;
456	nf.nf_lastmsg = time_uptime -
457	    ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
458
459	/*
460	 * XXX if not already connected call nfs_connect now.  Longer
461	 * term, change nfs_mount to call nfs_connect unconditionally
462	 * and let clnt_reconnect_create handle reconnects.
463	 */
464	if (!nmp->nm_client)
465		nfs_connect(nmp);
466
467	auth = nfs_getauth(nmp, cred);
468	if (!auth) {
469		m_freem(mreq);
470		return (EACCES);
471	}
472	bzero(&ext, sizeof(ext));
473	ext.rc_auth = auth;
474
475	ext.rc_feedback = nfs_feedback;
476	ext.rc_feedback_arg = &nf;
477
478	/*
479	 * Use a conservative timeout for RPCs other than getattr,
480	 * lookup, read or write.  The justification for doing "other"
481	 * this way is that these RPCs happen so infrequently that
482	 * timer est. would probably be stale.  Also, since many of
483	 * these RPCs are non-idempotent, a conservative timeout is
484	 * desired.
485	 */
486	timer = nfs_rto_timer(procnum);
487	if (timer != NFS_DEFAULT_TIMER)
488		ext.rc_timers = &nmp->nm_timers[timer - 1];
489	else
490		ext.rc_timers = NULL;
491
492#ifdef KDTRACE_HOOKS
493	if (dtrace_nfsclient_nfs23_start_probe != NULL) {
494		uint32_t probe_id;
495		int probe_procnum;
496
497		if (nmp->nm_flag & NFSMNT_NFSV3) {
498			probe_id = nfsclient_nfs3_start_probes[procnum];
499			probe_procnum = procnum;
500		} else {
501			probe_id = nfsclient_nfs2_start_probes[procnum];
502			probe_procnum = nfsv2_procid[procnum];
503		}
504		if (probe_id != 0)
505			(dtrace_nfsclient_nfs23_start_probe)(probe_id, vp,
506			    mreq, cred, probe_procnum);
507	}
508#endif
509
510	nfsstats.rpcrequests++;
511tryagain:
512	/*
513	 * This timeout specifies when a new socket should be created,
514	 * along with new xid values. For UDP, this should be done
515	 * infrequently, since retransmits of RPC requests should normally
516	 * use the same xid.
517	 */
518	if (nmp->nm_sotype == SOCK_DGRAM) {
519		if ((nmp->nm_flag & NFSMNT_SOFT) != 0) {
520			/*
521			 * CLSET_RETRIES is set to 2, so this should be half
522			 * of the total timeout required.
523			 */
524			timeo = nmp->nm_retry * nmp->nm_timeo / 2;
525			if (timeo < 1)
526				timeo = 1;
527			timo.tv_sec = timeo / NFS_HZ;
528			timo.tv_usec = (timeo % NFS_HZ) * 1000000 / NFS_HZ;
529		} else {
530			/* For UDP hard mounts, use a large value. */
531			timo.tv_sec = NFS_MAXTIMEO / NFS_HZ;
532			timo.tv_usec = 0;
533		}
534	} else {
535		timo.tv_sec = nmp->nm_timeo / NFS_HZ;
536		timo.tv_usec = (nmp->nm_timeo % NFS_HZ) * 1000000 / NFS_HZ;
537	}
538	mrep = NULL;
539	stat = CLNT_CALL_MBUF(nmp->nm_client, &ext,
540	    (nmp->nm_flag & NFSMNT_NFSV3) ? procnum : nfsv2_procid[procnum],
541	    mreq, &mrep, timo);
542
543	/*
544	 * If there was a successful reply and a tprintf msg.
545	 * tprintf a response.
546	 */
547	if (stat == RPC_SUCCESS)
548		error = 0;
549	else if (stat == RPC_TIMEDOUT) {
550		nfsstats.rpctimeouts++;
551		error = ETIMEDOUT;
552	} else if (stat == RPC_VERSMISMATCH) {
553		nfsstats.rpcinvalid++;
554		error = EOPNOTSUPP;
555	} else if (stat == RPC_PROGVERSMISMATCH) {
556		nfsstats.rpcinvalid++;
557		error = EPROTONOSUPPORT;
558	} else if (stat == RPC_INTR) {
559		error = EINTR;
560	} else {
561		nfsstats.rpcinvalid++;
562		error = EACCES;
563	}
564	if (error)
565		goto nfsmout;
566
567	KASSERT(mrep != NULL, ("mrep shouldn't be NULL if no error\n"));
568
569	/*
570	 * Search for any mbufs that are not a multiple of 4 bytes long
571	 * or with m_data not longword aligned.
572	 * These could cause pointer alignment problems, so copy them to
573	 * well aligned mbufs.
574	 */
575	error = nfs_realign(&mrep, M_NOWAIT);
576	if (error == ENOMEM) {
577		m_freem(mrep);
578		AUTH_DESTROY(auth);
579		nfsstats.rpcinvalid++;
580		return (error);
581	}
582
583	md = mrep;
584	dpos = mtod(mrep, caddr_t);
585	tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
586	if (*tl != 0) {
587		error = fxdr_unsigned(int, *tl);
588		if ((nmp->nm_flag & NFSMNT_NFSV3) &&
589		    error == NFSERR_TRYLATER) {
590			m_freem(mrep);
591			error = 0;
592			waituntil = time_second + nfs3_jukebox_delay;
593			while (time_second < waituntil)
594				(void)tsleep(&fake_wchan, PSOCK, "nqnfstry",
595				    hz);
596			goto tryagain;
597		}
598		/*
599		 * Make sure NFSERR_RETERR isn't bogusly set by a server
600		 * such as amd. (No actual NFS error has bit 31 set.)
601		 */
602		error &= ~NFSERR_RETERR;
603
604		/*
605		 * If the File Handle was stale, invalidate the lookup
606		 * cache, just in case.
607		 */
608		if (error == ESTALE)
609			nfs_purgecache(vp);
610		/*
611		 * Skip wcc data on non-ENOENT NFS errors for now.
612		 * NetApp filers return corrupt postop attrs in the
613		 * wcc data for NFS err EROFS.  Not sure if they could
614		 * return corrupt postop attrs for others errors.
615		 * Blocking ENOENT post-op attributes breaks negative
616		 * name caching, so always allow it through.
617		 */
618		if ((nmp->nm_flag & NFSMNT_NFSV3) &&
619		    (!nfs_skip_wcc_data_onerr || error == ENOENT)) {
620			*mrp = mrep;
621			*mdp = md;
622			*dposp = dpos;
623			error |= NFSERR_RETERR;
624		} else
625			m_freem(mrep);
626		goto nfsmout;
627	}
628
629#ifdef KDTRACE_HOOKS
630	if (dtrace_nfsclient_nfs23_done_probe != NULL) {
631		uint32_t probe_id;
632		int probe_procnum;
633
634		if (nmp->nm_flag & NFSMNT_NFSV3) {
635			probe_id = nfsclient_nfs3_done_probes[procnum];
636			probe_procnum = procnum;
637		} else {
638			probe_id = nfsclient_nfs2_done_probes[procnum];
639			probe_procnum = (nmp->nm_flag & NFSMNT_NFSV3) ?
640			    procnum : nfsv2_procid[procnum];
641		}
642		if (probe_id != 0)
643			(dtrace_nfsclient_nfs23_done_probe)(probe_id, vp,
644			    mreq, cred, probe_procnum, 0);
645	}
646#endif
647	m_freem(mreq);
648	*mrp = mrep;
649	*mdp = md;
650	*dposp = dpos;
651	AUTH_DESTROY(auth);
652	return (0);
653
654nfsmout:
655#ifdef KDTRACE_HOOKS
656	if (dtrace_nfsclient_nfs23_done_probe != NULL) {
657		uint32_t probe_id;
658		int probe_procnum;
659
660		if (nmp->nm_flag & NFSMNT_NFSV3) {
661			probe_id = nfsclient_nfs3_done_probes[procnum];
662			probe_procnum = procnum;
663		} else {
664			probe_id = nfsclient_nfs2_done_probes[procnum];
665			probe_procnum = (nmp->nm_flag & NFSMNT_NFSV3) ?
666			    procnum : nfsv2_procid[procnum];
667		}
668		if (probe_id != 0)
669			(dtrace_nfsclient_nfs23_done_probe)(probe_id, vp,
670			    mreq, cred, probe_procnum, error);
671	}
672#endif
673	m_freem(mreq);
674	if (auth)
675		AUTH_DESTROY(auth);
676	return (error);
677}
678
679/*
680 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
681 * wait for all requests to complete.  This is used by forced unmounts
682 * to terminate any outstanding RPCs.
683 */
684int
685nfs_nmcancelreqs(struct nfsmount *nmp)
686{
687
688	if (nmp->nm_client)
689		CLNT_CLOSE(nmp->nm_client);
690	return (0);
691}
692
693/*
694 * Any signal that can interrupt an NFS operation in an intr mount
695 * should be added to this set.  SIGSTOP and SIGKILL cannot be masked.
696 */
697int nfs_sig_set[] = {
698	SIGINT,
699	SIGTERM,
700	SIGHUP,
701	SIGKILL,
702	SIGQUIT
703};
704
705/*
706 * Check to see if one of the signals in our subset is pending on
707 * the process (in an intr mount).
708 */
709static int
710nfs_sig_pending(sigset_t set)
711{
712	int i;
713
714	for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++)
715		if (SIGISMEMBER(set, nfs_sig_set[i]))
716			return (1);
717	return (0);
718}
719
720/*
721 * The set/restore sigmask functions are used to (temporarily) overwrite
722 * the thread td_sigmask during an RPC call (for example).  These are also
723 * used in other places in the NFS client that might tsleep().
724 */
725void
726nfs_set_sigmask(struct thread *td, sigset_t *oldset)
727{
728	sigset_t newset;
729	int i;
730	struct proc *p;
731
732	SIGFILLSET(newset);
733	if (td == NULL)
734		td = curthread; /* XXX */
735	p = td->td_proc;
736	/* Remove the NFS set of signals from newset. */
737	PROC_LOCK(p);
738	mtx_lock(&p->p_sigacts->ps_mtx);
739	for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++) {
740		/*
741		 * But make sure we leave the ones already masked
742		 * by the process, i.e. remove the signal from the
743		 * temporary signalmask only if it wasn't already
744		 * in p_sigmask.
745		 */
746		if (!SIGISMEMBER(td->td_sigmask, nfs_sig_set[i]) &&
747		    !SIGISMEMBER(p->p_sigacts->ps_sigignore, nfs_sig_set[i]))
748			SIGDELSET(newset, nfs_sig_set[i]);
749	}
750	mtx_unlock(&p->p_sigacts->ps_mtx);
751	kern_sigprocmask(td, SIG_SETMASK, &newset, oldset,
752	    SIGPROCMASK_PROC_LOCKED);
753	PROC_UNLOCK(p);
754}
755
756void
757nfs_restore_sigmask(struct thread *td, sigset_t *set)
758{
759	if (td == NULL)
760		td = curthread; /* XXX */
761	kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0);
762}
763
764/*
765 * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the
766 * old one after msleep() returns.
767 */
768int
769nfs_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority,
770    char *wmesg, int timo)
771{
772	sigset_t oldset;
773	int error;
774	struct proc *p;
775
776	if ((priority & PCATCH) == 0)
777		return msleep(ident, mtx, priority, wmesg, timo);
778	if (td == NULL)
779		td = curthread; /* XXX */
780	nfs_set_sigmask(td, &oldset);
781	error = msleep(ident, mtx, priority, wmesg, timo);
782	nfs_restore_sigmask(td, &oldset);
783	p = td->td_proc;
784	return (error);
785}
786
787/*
788 * Test for a termination condition pending on the process.
789 * This is used for NFSMNT_INT mounts.
790 */
791int
792nfs_sigintr(struct nfsmount *nmp, struct thread *td)
793{
794	struct proc *p;
795	sigset_t tmpset;
796
797	/* Terminate all requests while attempting a forced unmount. */
798	if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
799		return (EIO);
800	if (!(nmp->nm_flag & NFSMNT_INT))
801		return (0);
802	if (td == NULL)
803		return (0);
804	p = td->td_proc;
805	PROC_LOCK(p);
806	tmpset = p->p_siglist;
807	SIGSETOR(tmpset, td->td_siglist);
808	SIGSETNAND(tmpset, td->td_sigmask);
809	mtx_lock(&p->p_sigacts->ps_mtx);
810	SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore);
811	mtx_unlock(&p->p_sigacts->ps_mtx);
812	if ((SIGNOTEMPTY(p->p_siglist) || SIGNOTEMPTY(td->td_siglist))
813	    && nfs_sig_pending(tmpset)) {
814		PROC_UNLOCK(p);
815		return (EINTR);
816	}
817	PROC_UNLOCK(p);
818	return (0);
819}
820
821static int
822nfs_msg(struct thread *td, const char *server, const char *msg, int error)
823{
824	struct proc *p;
825
826	p = td ? td->td_proc : NULL;
827	if (error)
828		tprintf(p, LOG_INFO, "nfs server %s: %s, error %d\n", server,
829		    msg, error);
830	else
831		tprintf(p, LOG_INFO, "nfs server %s: %s\n", server, msg);
832	return (0);
833}
834
835static void
836nfs_down(struct nfsmount *nmp, struct thread *td, const char *msg,
837    int error, int flags)
838{
839	if (nmp == NULL)
840		return;
841	mtx_lock(&nmp->nm_mtx);
842	if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
843		nmp->nm_state |= NFSSTA_TIMEO;
844		mtx_unlock(&nmp->nm_mtx);
845		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
846		    VQ_NOTRESP, 0);
847	} else
848		mtx_unlock(&nmp->nm_mtx);
849	mtx_lock(&nmp->nm_mtx);
850	if ((flags & NFSSTA_LOCKTIMEO) &&
851	    !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
852		nmp->nm_state |= NFSSTA_LOCKTIMEO;
853		mtx_unlock(&nmp->nm_mtx);
854		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
855		    VQ_NOTRESPLOCK, 0);
856	} else
857		mtx_unlock(&nmp->nm_mtx);
858	nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, error);
859}
860
861static void
862nfs_up(struct nfsmount *nmp, struct thread *td, const char *msg,
863    int flags, int tprintfmsg)
864{
865	if (nmp == NULL)
866		return;
867	if (tprintfmsg)
868		nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0);
869
870	mtx_lock(&nmp->nm_mtx);
871	if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
872		nmp->nm_state &= ~NFSSTA_TIMEO;
873		mtx_unlock(&nmp->nm_mtx);
874		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
875		    VQ_NOTRESP, 1);
876	} else
877		mtx_unlock(&nmp->nm_mtx);
878
879	mtx_lock(&nmp->nm_mtx);
880	if ((flags & NFSSTA_LOCKTIMEO) &&
881	    (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
882		nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
883		mtx_unlock(&nmp->nm_mtx);
884		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
885		    VQ_NOTRESPLOCK, 1);
886	} else
887		mtx_unlock(&nmp->nm_mtx);
888}
889