nfs_commonkrpc.c revision 338308
1/*-
2 * Copyright (c) 1989, 1991, 1993, 1995
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: stable/10/sys/fs/nfs/nfs_commonkrpc.c 338308 2018-08-24 22:48:19Z rmacklem $");
36
37/*
38 * Socket operations for use by nfs
39 */
40
41#include "opt_kdtrace.h"
42#include "opt_kgssapi.h"
43#include "opt_nfs.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/kernel.h>
48#include <sys/limits.h>
49#include <sys/lock.h>
50#include <sys/malloc.h>
51#include <sys/mbuf.h>
52#include <sys/mount.h>
53#include <sys/mutex.h>
54#include <sys/proc.h>
55#include <sys/signalvar.h>
56#include <sys/syscallsubr.h>
57#include <sys/sysctl.h>
58#include <sys/syslog.h>
59#include <sys/vnode.h>
60
61#include <rpc/rpc.h>
62#include <rpc/krpc.h>
63
64#include <kgssapi/krb5/kcrypto.h>
65
66#include <fs/nfs/nfsport.h>
67
68#ifdef KDTRACE_HOOKS
69#include <sys/dtrace_bsd.h>
70
71dtrace_nfsclient_nfs23_start_probe_func_t
72		dtrace_nfscl_nfs234_start_probe;
73
74dtrace_nfsclient_nfs23_done_probe_func_t
75		dtrace_nfscl_nfs234_done_probe;
76
77/*
78 * Registered probes by RPC type.
79 */
80uint32_t	nfscl_nfs2_start_probes[NFSV41_NPROCS + 1];
81uint32_t	nfscl_nfs2_done_probes[NFSV41_NPROCS + 1];
82
83uint32_t	nfscl_nfs3_start_probes[NFSV41_NPROCS + 1];
84uint32_t	nfscl_nfs3_done_probes[NFSV41_NPROCS + 1];
85
86uint32_t	nfscl_nfs4_start_probes[NFSV41_NPROCS + 1];
87uint32_t	nfscl_nfs4_done_probes[NFSV41_NPROCS + 1];
88#endif
89
90NFSSTATESPINLOCK;
91NFSREQSPINLOCK;
92NFSDLOCKMUTEX;
93NFSCLSTATEMUTEX;
94extern struct nfsstats newnfsstats;
95extern struct nfsreqhead nfsd_reqq;
96extern int nfscl_ticks;
97extern void (*ncl_call_invalcaches)(struct vnode *);
98extern int nfs_numnfscbd;
99extern int nfscl_debuglevel;
100
101SVCPOOL		*nfscbd_pool;
102static int	nfsrv_gsscallbackson = 0;
103static int	nfs_bufpackets = 4;
104static int	nfs_reconnects;
105static int	nfs3_jukebox_delay = 10;
106static int	nfs_skip_wcc_data_onerr = 1;
107
108SYSCTL_DECL(_vfs_nfs);
109
110SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0,
111    "Buffer reservation size 2 < x < 64");
112SYSCTL_INT(_vfs_nfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
113    "Number of times the nfs client has had to reconnect");
114SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW, &nfs3_jukebox_delay, 0,
115    "Number of seconds to delay a retry after receiving EJUKEBOX");
116SYSCTL_INT(_vfs_nfs, OID_AUTO, skip_wcc_data_onerr, CTLFLAG_RW, &nfs_skip_wcc_data_onerr, 0,
117    "Disable weak cache consistency checking when server returns an error");
118
119static void	nfs_down(struct nfsmount *, struct thread *, const char *,
120    int, int);
121static void	nfs_up(struct nfsmount *, struct thread *, const char *,
122    int, int);
123static int	nfs_msg(struct thread *, const char *, const char *, int);
124
125struct nfs_cached_auth {
126	int		ca_refs; /* refcount, including 1 from the cache */
127	uid_t		ca_uid;	 /* uid that corresponds to this auth */
128	AUTH		*ca_auth; /* RPC auth handle */
129};
130
131static int nfsv2_procid[NFS_V3NPROCS] = {
132	NFSV2PROC_NULL,
133	NFSV2PROC_GETATTR,
134	NFSV2PROC_SETATTR,
135	NFSV2PROC_LOOKUP,
136	NFSV2PROC_NOOP,
137	NFSV2PROC_READLINK,
138	NFSV2PROC_READ,
139	NFSV2PROC_WRITE,
140	NFSV2PROC_CREATE,
141	NFSV2PROC_MKDIR,
142	NFSV2PROC_SYMLINK,
143	NFSV2PROC_CREATE,
144	NFSV2PROC_REMOVE,
145	NFSV2PROC_RMDIR,
146	NFSV2PROC_RENAME,
147	NFSV2PROC_LINK,
148	NFSV2PROC_READDIR,
149	NFSV2PROC_NOOP,
150	NFSV2PROC_STATFS,
151	NFSV2PROC_NOOP,
152	NFSV2PROC_NOOP,
153	NFSV2PROC_NOOP,
154};
155
156/*
157 * Initialize sockets and congestion for a new NFS connection.
158 * We do not free the sockaddr if error.
159 */
160int
161newnfs_connect(struct nfsmount *nmp, struct nfssockreq *nrp,
162    struct ucred *cred, NFSPROC_T *p, int callback_retry_mult)
163{
164	int rcvreserve, sndreserve;
165	int pktscale, pktscalesav;
166	struct sockaddr *saddr;
167	struct ucred *origcred;
168	CLIENT *client;
169	struct netconfig *nconf;
170	struct socket *so;
171	int one = 1, retries, error = 0;
172	struct thread *td = curthread;
173	SVCXPRT *xprt;
174	struct timeval timo;
175
176	/*
177	 * We need to establish the socket using the credentials of
178	 * the mountpoint.  Some parts of this process (such as
179	 * sobind() and soconnect()) will use the curent thread's
180	 * credential instead of the socket credential.  To work
181	 * around this, temporarily change the current thread's
182	 * credential to that of the mountpoint.
183	 *
184	 * XXX: It would be better to explicitly pass the correct
185	 * credential to sobind() and soconnect().
186	 */
187	origcred = td->td_ucred;
188
189	/*
190	 * Use the credential in nr_cred, if not NULL.
191	 */
192	if (nrp->nr_cred != NULL)
193		td->td_ucred = nrp->nr_cred;
194	else
195		td->td_ucred = cred;
196	saddr = nrp->nr_nam;
197
198	if (saddr->sa_family == AF_INET)
199		if (nrp->nr_sotype == SOCK_DGRAM)
200			nconf = getnetconfigent("udp");
201		else
202			nconf = getnetconfigent("tcp");
203	else
204		if (nrp->nr_sotype == SOCK_DGRAM)
205			nconf = getnetconfigent("udp6");
206		else
207			nconf = getnetconfigent("tcp6");
208
209	pktscale = nfs_bufpackets;
210	if (pktscale < 2)
211		pktscale = 2;
212	if (pktscale > 64)
213		pktscale = 64;
214	pktscalesav = pktscale;
215	/*
216	 * soreserve() can fail if sb_max is too small, so shrink pktscale
217	 * and try again if there is an error.
218	 * Print a log message suggesting increasing sb_max.
219	 * Creating a socket and doing this is necessary since, if the
220	 * reservation sizes are too large and will make soreserve() fail,
221	 * the connection will work until a large send is attempted and
222	 * then it will loop in the krpc code.
223	 */
224	so = NULL;
225	saddr = NFSSOCKADDR(nrp->nr_nam, struct sockaddr *);
226	error = socreate(saddr->sa_family, &so, nrp->nr_sotype,
227	    nrp->nr_soproto, td->td_ucred, td);
228	if (error) {
229		td->td_ucred = origcred;
230		goto out;
231	}
232	do {
233	    if (error != 0 && pktscale > 2) {
234		if (nmp != NULL && nrp->nr_sotype == SOCK_STREAM &&
235		    pktscale == pktscalesav)
236		    printf("Consider increasing kern.ipc.maxsockbuf\n");
237		pktscale--;
238	    }
239	    if (nrp->nr_sotype == SOCK_DGRAM) {
240		if (nmp != NULL) {
241			sndreserve = (NFS_MAXDGRAMDATA + NFS_MAXPKTHDR) *
242			    pktscale;
243			rcvreserve = (NFS_MAXDGRAMDATA + NFS_MAXPKTHDR) *
244			    pktscale;
245		} else {
246			sndreserve = rcvreserve = 1024 * pktscale;
247		}
248	    } else {
249		if (nrp->nr_sotype != SOCK_STREAM)
250			panic("nfscon sotype");
251		if (nmp != NULL) {
252			sndreserve = (NFS_MAXBSIZE + NFS_MAXXDR +
253			    sizeof (u_int32_t)) * pktscale;
254			rcvreserve = (NFS_MAXBSIZE + NFS_MAXXDR +
255			    sizeof (u_int32_t)) * pktscale;
256		} else {
257			sndreserve = rcvreserve = 1024 * pktscale;
258		}
259	    }
260	    error = soreserve(so, sndreserve, rcvreserve);
261	    if (error != 0 && nmp != NULL && nrp->nr_sotype == SOCK_STREAM &&
262		pktscale <= 2)
263		printf("Must increase kern.ipc.maxsockbuf or reduce"
264		    " rsize, wsize\n");
265	} while (error != 0 && pktscale > 2);
266	soclose(so);
267	if (error) {
268		td->td_ucred = origcred;
269		goto out;
270	}
271
272	client = clnt_reconnect_create(nconf, saddr, nrp->nr_prog,
273	    nrp->nr_vers, sndreserve, rcvreserve);
274	CLNT_CONTROL(client, CLSET_WAITCHAN, "newnfsreq");
275	if (nmp != NULL) {
276		if ((nmp->nm_flag & NFSMNT_INT))
277			CLNT_CONTROL(client, CLSET_INTERRUPTIBLE, &one);
278		if ((nmp->nm_flag & NFSMNT_RESVPORT))
279			CLNT_CONTROL(client, CLSET_PRIVPORT, &one);
280		if (NFSHASSOFT(nmp)) {
281			if (nmp->nm_sotype == SOCK_DGRAM)
282				/*
283				 * For UDP, the large timeout for a reconnect
284				 * will be set to "nm_retry * nm_timeo / 2", so
285				 * we only want to do 2 reconnect timeout
286				 * retries.
287				 */
288				retries = 2;
289			else
290				retries = nmp->nm_retry;
291		} else
292			retries = INT_MAX;
293		/* cred == NULL for DS connects. */
294		if (NFSHASNFSV4N(nmp) && cred != NULL) {
295			/*
296			 * Make sure the nfscbd_pool doesn't get destroyed
297			 * while doing this.
298			 */
299			NFSD_LOCK();
300			if (nfs_numnfscbd > 0) {
301				nfs_numnfscbd++;
302				NFSD_UNLOCK();
303				xprt = svc_vc_create_backchannel(nfscbd_pool);
304				CLNT_CONTROL(client, CLSET_BACKCHANNEL, xprt);
305				NFSD_LOCK();
306				nfs_numnfscbd--;
307				if (nfs_numnfscbd == 0)
308					wakeup(&nfs_numnfscbd);
309			}
310			NFSD_UNLOCK();
311		}
312	} else {
313		/*
314		 * Three cases:
315		 * - Null RPC callback to client
316		 * - Non-Null RPC callback to client, wait a little longer
317		 * - upcalls to nfsuserd and gssd (clp == NULL)
318		 */
319		if (callback_retry_mult == 0) {
320			retries = NFSV4_UPCALLRETRY;
321			CLNT_CONTROL(client, CLSET_PRIVPORT, &one);
322		} else {
323			retries = NFSV4_CALLBACKRETRY * callback_retry_mult;
324		}
325	}
326	CLNT_CONTROL(client, CLSET_RETRIES, &retries);
327
328	if (nmp != NULL) {
329		/*
330		 * For UDP, there are 2 timeouts:
331		 * - CLSET_RETRY_TIMEOUT sets the initial timeout for the timer
332		 *   that does a retransmit of an RPC request using the same
333		 *   socket and xid. This is what you normally want to do,
334		 *   since NFS servers depend on "same xid" for their
335		 *   Duplicate Request Cache.
336		 * - timeout specified in CLNT_CALL_MBUF(), which specifies when
337		 *   retransmits on the same socket should fail and a fresh
338		 *   socket created. Each of these timeouts counts as one
339		 *   CLSET_RETRIES as set above.
340		 * Set the initial retransmit timeout for UDP. This timeout
341		 * doesn't exist for TCP and the following call just fails,
342		 * which is ok.
343		 */
344		timo.tv_sec = nmp->nm_timeo / NFS_HZ;
345		timo.tv_usec = (nmp->nm_timeo % NFS_HZ) * 1000000 / NFS_HZ;
346		CLNT_CONTROL(client, CLSET_RETRY_TIMEOUT, &timo);
347	}
348
349	mtx_lock(&nrp->nr_mtx);
350	if (nrp->nr_client != NULL) {
351		mtx_unlock(&nrp->nr_mtx);
352		/*
353		 * Someone else already connected.
354		 */
355		CLNT_RELEASE(client);
356	} else {
357		nrp->nr_client = client;
358		/*
359		 * Protocols that do not require connections may be optionally
360		 * left unconnected for servers that reply from a port other
361		 * than NFS_PORT.
362		 */
363		if (nmp == NULL || (nmp->nm_flag & NFSMNT_NOCONN) == 0) {
364			mtx_unlock(&nrp->nr_mtx);
365			CLNT_CONTROL(client, CLSET_CONNECT, &one);
366		} else
367			mtx_unlock(&nrp->nr_mtx);
368	}
369
370
371	/* Restore current thread's credentials. */
372	td->td_ucred = origcred;
373
374out:
375	NFSEXITCODE(error);
376	return (error);
377}
378
379/*
380 * NFS disconnect. Clean up and unlink.
381 */
382void
383newnfs_disconnect(struct nfssockreq *nrp)
384{
385	CLIENT *client;
386
387	mtx_lock(&nrp->nr_mtx);
388	if (nrp->nr_client != NULL) {
389		client = nrp->nr_client;
390		nrp->nr_client = NULL;
391		mtx_unlock(&nrp->nr_mtx);
392		rpc_gss_secpurge_call(client);
393		CLNT_CLOSE(client);
394		CLNT_RELEASE(client);
395	} else {
396		mtx_unlock(&nrp->nr_mtx);
397	}
398}
399
400static AUTH *
401nfs_getauth(struct nfssockreq *nrp, int secflavour, char *clnt_principal,
402    char *srv_principal, gss_OID mech_oid, struct ucred *cred)
403{
404	rpc_gss_service_t svc;
405	AUTH *auth;
406
407	switch (secflavour) {
408	case RPCSEC_GSS_KRB5:
409	case RPCSEC_GSS_KRB5I:
410	case RPCSEC_GSS_KRB5P:
411		if (!mech_oid) {
412			if (!rpc_gss_mech_to_oid_call("kerberosv5", &mech_oid))
413				return (NULL);
414		}
415		if (secflavour == RPCSEC_GSS_KRB5)
416			svc = rpc_gss_svc_none;
417		else if (secflavour == RPCSEC_GSS_KRB5I)
418			svc = rpc_gss_svc_integrity;
419		else
420			svc = rpc_gss_svc_privacy;
421
422		if (clnt_principal == NULL)
423			auth = rpc_gss_secfind_call(nrp->nr_client, cred,
424			    srv_principal, mech_oid, svc);
425		else {
426			auth = rpc_gss_seccreate_call(nrp->nr_client, cred,
427			    clnt_principal, srv_principal, "kerberosv5",
428			    svc, NULL, NULL, NULL);
429			return (auth);
430		}
431		if (auth != NULL)
432			return (auth);
433		/* fallthrough */
434	case AUTH_SYS:
435	default:
436		return (authunix_create(cred));
437
438	}
439}
440
441/*
442 * Callback from the RPC code to generate up/down notifications.
443 */
444
445struct nfs_feedback_arg {
446	struct nfsmount *nf_mount;
447	int		nf_lastmsg;	/* last tprintf */
448	int		nf_tprintfmsg;
449	struct thread	*nf_td;
450};
451
452static void
453nfs_feedback(int type, int proc, void *arg)
454{
455	struct nfs_feedback_arg *nf = (struct nfs_feedback_arg *) arg;
456	struct nfsmount *nmp = nf->nf_mount;
457	time_t now;
458
459	switch (type) {
460	case FEEDBACK_REXMIT2:
461	case FEEDBACK_RECONNECT:
462		now = NFSD_MONOSEC;
463		if (nf->nf_lastmsg + nmp->nm_tprintf_delay < now) {
464			nfs_down(nmp, nf->nf_td,
465			    "not responding", 0, NFSSTA_TIMEO);
466			nf->nf_tprintfmsg = TRUE;
467			nf->nf_lastmsg = now;
468		}
469		break;
470
471	case FEEDBACK_OK:
472		nfs_up(nf->nf_mount, nf->nf_td,
473		    "is alive again", NFSSTA_TIMEO, nf->nf_tprintfmsg);
474		break;
475	}
476}
477
478/*
479 * newnfs_request - goes something like this
480 *	- does the rpc by calling the krpc layer
481 *	- break down rpc header and return with nfs reply
482 * nb: always frees up nd_mreq mbuf list
483 */
484int
485newnfs_request(struct nfsrv_descript *nd, struct nfsmount *nmp,
486    struct nfsclient *clp, struct nfssockreq *nrp, vnode_t vp,
487    struct thread *td, struct ucred *cred, u_int32_t prog, u_int32_t vers,
488    u_char *retsum, int toplevel, u_int64_t *xidp, struct nfsclsession *dssep)
489{
490	uint32_t retseq, retval, slotseq, *tl;
491	time_t waituntil;
492	int i = 0, j = 0, opcnt, set_sigset = 0, slot;
493	int trycnt, error = 0, usegssname = 0, secflavour = AUTH_SYS;
494	int freeslot, maxslot, reterr, slotpos, timeo;
495	u_int16_t procnum;
496	u_int trylater_delay = 1;
497	struct nfs_feedback_arg nf;
498	struct timeval timo;
499	AUTH *auth;
500	struct rpc_callextra ext;
501	enum clnt_stat stat;
502	struct nfsreq *rep = NULL;
503	char *srv_principal = NULL, *clnt_principal = NULL;
504	sigset_t oldset;
505	struct ucred *authcred;
506	struct nfsclsession *sep;
507	uint8_t sessionid[NFSX_V4SESSIONID];
508
509	sep = dssep;
510	if (xidp != NULL)
511		*xidp = 0;
512	/* Reject requests while attempting a forced unmount. */
513	if (nmp != NULL && (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)) {
514		m_freem(nd->nd_mreq);
515		return (ESTALE);
516	}
517
518	/*
519	 * Set authcred, which is used to acquire RPC credentials to
520	 * the cred argument, by default. The crhold() should not be
521	 * necessary, but will ensure that some future code change
522	 * doesn't result in the credential being free'd prematurely.
523	 */
524	authcred = crhold(cred);
525
526	/* For client side interruptible mounts, mask off the signals. */
527	if (nmp != NULL && td != NULL && NFSHASINT(nmp)) {
528		newnfs_set_sigmask(td, &oldset);
529		set_sigset = 1;
530	}
531
532	/*
533	 * XXX if not already connected call nfs_connect now. Longer
534	 * term, change nfs_mount to call nfs_connect unconditionally
535	 * and let clnt_reconnect_create handle reconnects.
536	 */
537	if (nrp->nr_client == NULL)
538		newnfs_connect(nmp, nrp, cred, td, 0);
539
540	/*
541	 * For a client side mount, nmp is != NULL and clp == NULL. For
542	 * server calls (callbacks or upcalls), nmp == NULL.
543	 */
544	if (clp != NULL) {
545		NFSLOCKSTATE();
546		if ((clp->lc_flags & LCL_GSS) && nfsrv_gsscallbackson) {
547			secflavour = RPCSEC_GSS_KRB5;
548			if (nd->nd_procnum != NFSPROC_NULL) {
549				if (clp->lc_flags & LCL_GSSINTEGRITY)
550					secflavour = RPCSEC_GSS_KRB5I;
551				else if (clp->lc_flags & LCL_GSSPRIVACY)
552					secflavour = RPCSEC_GSS_KRB5P;
553			}
554		}
555		NFSUNLOCKSTATE();
556	} else if (nmp != NULL && NFSHASKERB(nmp) &&
557	     nd->nd_procnum != NFSPROC_NULL) {
558		if (NFSHASALLGSSNAME(nmp) && nmp->nm_krbnamelen > 0)
559			nd->nd_flag |= ND_USEGSSNAME;
560		if ((nd->nd_flag & ND_USEGSSNAME) != 0) {
561			/*
562			 * If there is a client side host based credential,
563			 * use that, otherwise use the system uid, if set.
564			 * The system uid is in the nmp->nm_sockreq.nr_cred
565			 * credentials.
566			 */
567			if (nmp->nm_krbnamelen > 0) {
568				usegssname = 1;
569				clnt_principal = nmp->nm_krbname;
570			} else if (nmp->nm_uid != (uid_t)-1) {
571				KASSERT(nmp->nm_sockreq.nr_cred != NULL,
572				    ("newnfs_request: NULL nr_cred"));
573				crfree(authcred);
574				authcred = crhold(nmp->nm_sockreq.nr_cred);
575			}
576		} else if (nmp->nm_krbnamelen == 0 &&
577		    nmp->nm_uid != (uid_t)-1 && cred->cr_uid == (uid_t)0) {
578			/*
579			 * If there is no host based principal name and
580			 * the system uid is set and this is root, use the
581			 * system uid, since root won't have user
582			 * credentials in a credentials cache file.
583			 * The system uid is in the nmp->nm_sockreq.nr_cred
584			 * credentials.
585			 */
586			KASSERT(nmp->nm_sockreq.nr_cred != NULL,
587			    ("newnfs_request: NULL nr_cred"));
588			crfree(authcred);
589			authcred = crhold(nmp->nm_sockreq.nr_cred);
590		}
591		if (NFSHASINTEGRITY(nmp))
592			secflavour = RPCSEC_GSS_KRB5I;
593		else if (NFSHASPRIVACY(nmp))
594			secflavour = RPCSEC_GSS_KRB5P;
595		else
596			secflavour = RPCSEC_GSS_KRB5;
597		srv_principal = NFSMNT_SRVKRBNAME(nmp);
598	} else if (nmp != NULL && !NFSHASKERB(nmp) &&
599	    nd->nd_procnum != NFSPROC_NULL &&
600	    (nd->nd_flag & ND_USEGSSNAME) != 0) {
601		/*
602		 * Use the uid that did the mount when the RPC is doing
603		 * NFSv4 system operations, as indicated by the
604		 * ND_USEGSSNAME flag, for the AUTH_SYS case.
605		 * The credentials in nm_sockreq.nr_cred were used for the
606		 * mount.
607		 */
608		KASSERT(nmp->nm_sockreq.nr_cred != NULL,
609		    ("newnfs_request: NULL nr_cred"));
610		crfree(authcred);
611		authcred = crhold(nmp->nm_sockreq.nr_cred);
612	}
613
614	if (nmp != NULL) {
615		bzero(&nf, sizeof(struct nfs_feedback_arg));
616		nf.nf_mount = nmp;
617		nf.nf_td = td;
618		nf.nf_lastmsg = NFSD_MONOSEC -
619		    ((nmp->nm_tprintf_delay)-(nmp->nm_tprintf_initial_delay));
620	}
621
622	if (nd->nd_procnum == NFSPROC_NULL)
623		auth = authnone_create();
624	else if (usegssname) {
625		/*
626		 * For this case, the authenticator is held in the
627		 * nfssockreq structure, so don't release the reference count
628		 * held on it. --> Don't AUTH_DESTROY() it in this function.
629		 */
630		if (nrp->nr_auth == NULL)
631			nrp->nr_auth = nfs_getauth(nrp, secflavour,
632			    clnt_principal, srv_principal, NULL, authcred);
633		else
634			rpc_gss_refresh_auth_call(nrp->nr_auth);
635		auth = nrp->nr_auth;
636	} else
637		auth = nfs_getauth(nrp, secflavour, NULL,
638		    srv_principal, NULL, authcred);
639	crfree(authcred);
640	if (auth == NULL) {
641		m_freem(nd->nd_mreq);
642		if (set_sigset)
643			newnfs_restore_sigmask(td, &oldset);
644		return (EACCES);
645	}
646	bzero(&ext, sizeof(ext));
647	ext.rc_auth = auth;
648	if (nmp != NULL) {
649		ext.rc_feedback = nfs_feedback;
650		ext.rc_feedback_arg = &nf;
651	}
652
653	procnum = nd->nd_procnum;
654	if ((nd->nd_flag & ND_NFSV4) &&
655	    nd->nd_procnum != NFSPROC_NULL &&
656	    nd->nd_procnum != NFSV4PROC_CBCOMPOUND)
657		procnum = NFSV4PROC_COMPOUND;
658
659	if (nmp != NULL) {
660		NFSINCRGLOBAL(newnfsstats.rpcrequests);
661
662		/* Map the procnum to the old NFSv2 one, as required. */
663		if ((nd->nd_flag & ND_NFSV2) != 0) {
664			if (nd->nd_procnum < NFS_V3NPROCS)
665				procnum = nfsv2_procid[nd->nd_procnum];
666			else
667				procnum = NFSV2PROC_NOOP;
668		}
669
670		/*
671		 * Now only used for the R_DONTRECOVER case, but until that is
672		 * supported within the krpc code, I need to keep a queue of
673		 * outstanding RPCs for nfsv4 client requests.
674		 */
675		if ((nd->nd_flag & ND_NFSV4) && procnum == NFSV4PROC_COMPOUND)
676			MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq),
677			    M_NFSDREQ, M_WAITOK);
678#ifdef KDTRACE_HOOKS
679		if (dtrace_nfscl_nfs234_start_probe != NULL) {
680			uint32_t probe_id;
681			int probe_procnum;
682
683			if (nd->nd_flag & ND_NFSV4) {
684				probe_id =
685				    nfscl_nfs4_start_probes[nd->nd_procnum];
686				probe_procnum = nd->nd_procnum;
687			} else if (nd->nd_flag & ND_NFSV3) {
688				probe_id = nfscl_nfs3_start_probes[procnum];
689				probe_procnum = procnum;
690			} else {
691				probe_id =
692				    nfscl_nfs2_start_probes[nd->nd_procnum];
693				probe_procnum = procnum;
694			}
695			if (probe_id != 0)
696				(dtrace_nfscl_nfs234_start_probe)
697				    (probe_id, vp, nd->nd_mreq, cred,
698				     probe_procnum);
699		}
700#endif
701	}
702	trycnt = 0;
703	freeslot = -1;		/* Set to slot that needs to be free'd */
704tryagain:
705	slot = -1;		/* Slot that needs a sequence# increment. */
706	/*
707	 * This timeout specifies when a new socket should be created,
708	 * along with new xid values. For UDP, this should be done
709	 * infrequently, since retransmits of RPC requests should normally
710	 * use the same xid.
711	 */
712	if (nmp == NULL) {
713		timo.tv_usec = 0;
714		if (clp == NULL)
715			timo.tv_sec = NFSV4_UPCALLTIMEO;
716		else
717			timo.tv_sec = NFSV4_CALLBACKTIMEO;
718	} else {
719		if (nrp->nr_sotype != SOCK_DGRAM) {
720			timo.tv_usec = 0;
721			if ((nmp->nm_flag & NFSMNT_NFSV4))
722				timo.tv_sec = INT_MAX;
723			else
724				timo.tv_sec = NFS_TCPTIMEO;
725		} else {
726			if (NFSHASSOFT(nmp)) {
727				/*
728				 * CLSET_RETRIES is set to 2, so this should be
729				 * half of the total timeout required.
730				 */
731				timeo = nmp->nm_retry * nmp->nm_timeo / 2;
732				if (timeo < 1)
733					timeo = 1;
734				timo.tv_sec = timeo / NFS_HZ;
735				timo.tv_usec = (timeo % NFS_HZ) * 1000000 /
736				    NFS_HZ;
737			} else {
738				/* For UDP hard mounts, use a large value. */
739				timo.tv_sec = NFS_MAXTIMEO / NFS_HZ;
740				timo.tv_usec = 0;
741			}
742		}
743
744		if (rep != NULL) {
745			rep->r_flags = 0;
746			rep->r_nmp = nmp;
747			/*
748			 * Chain request into list of outstanding requests.
749			 */
750			NFSLOCKREQ();
751			TAILQ_INSERT_TAIL(&nfsd_reqq, rep, r_chain);
752			NFSUNLOCKREQ();
753		}
754	}
755
756	nd->nd_mrep = NULL;
757	if (clp != NULL && sep != NULL)
758		stat = clnt_bck_call(nrp->nr_client, &ext, procnum,
759		    nd->nd_mreq, &nd->nd_mrep, timo, sep->nfsess_xprt);
760	else
761		stat = CLNT_CALL_MBUF(nrp->nr_client, &ext, procnum,
762		    nd->nd_mreq, &nd->nd_mrep, timo);
763
764	if (rep != NULL) {
765		/*
766		 * RPC done, unlink the request.
767		 */
768		NFSLOCKREQ();
769		TAILQ_REMOVE(&nfsd_reqq, rep, r_chain);
770		NFSUNLOCKREQ();
771	}
772
773	/*
774	 * If there was a successful reply and a tprintf msg.
775	 * tprintf a response.
776	 */
777	if (stat == RPC_SUCCESS) {
778		error = 0;
779	} else if (stat == RPC_TIMEDOUT) {
780		NFSINCRGLOBAL(newnfsstats.rpctimeouts);
781		error = ETIMEDOUT;
782	} else if (stat == RPC_VERSMISMATCH) {
783		NFSINCRGLOBAL(newnfsstats.rpcinvalid);
784		error = EOPNOTSUPP;
785	} else if (stat == RPC_PROGVERSMISMATCH) {
786		NFSINCRGLOBAL(newnfsstats.rpcinvalid);
787		error = EPROTONOSUPPORT;
788	} else if (stat == RPC_INTR) {
789		error = EINTR;
790	} else {
791		NFSINCRGLOBAL(newnfsstats.rpcinvalid);
792		error = EACCES;
793	}
794	if (error) {
795		m_freem(nd->nd_mreq);
796		if (usegssname == 0)
797			AUTH_DESTROY(auth);
798		if (rep != NULL)
799			FREE((caddr_t)rep, M_NFSDREQ);
800		if (set_sigset)
801			newnfs_restore_sigmask(td, &oldset);
802		return (error);
803	}
804
805	KASSERT(nd->nd_mrep != NULL, ("mrep shouldn't be NULL if no error\n"));
806
807	/*
808	 * Search for any mbufs that are not a multiple of 4 bytes long
809	 * or with m_data not longword aligned.
810	 * These could cause pointer alignment problems, so copy them to
811	 * well aligned mbufs.
812	 */
813	newnfs_realign(&nd->nd_mrep, M_WAITOK);
814	nd->nd_md = nd->nd_mrep;
815	nd->nd_dpos = NFSMTOD(nd->nd_md, caddr_t);
816	nd->nd_repstat = 0;
817	if (nd->nd_procnum != NFSPROC_NULL &&
818	    nd->nd_procnum != NFSV4PROC_CBNULL) {
819		/* If sep == NULL, set it to the default in nmp. */
820		if (sep == NULL && nmp != NULL)
821			sep = nfsmnt_mdssession(nmp);
822		/*
823		 * and now the actual NFS xdr.
824		 */
825		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
826		nd->nd_repstat = fxdr_unsigned(u_int32_t, *tl);
827		if (nd->nd_repstat >= 10000)
828			NFSCL_DEBUG(1, "proc=%d reps=%d\n", (int)nd->nd_procnum,
829			    (int)nd->nd_repstat);
830
831		/*
832		 * Get rid of the tag, return count and SEQUENCE result for
833		 * NFSv4.
834		 */
835		if ((nd->nd_flag & ND_NFSV4) != 0) {
836			NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
837			i = fxdr_unsigned(int, *tl);
838			error = nfsm_advance(nd, NFSM_RNDUP(i), -1);
839			if (error)
840				goto nfsmout;
841			NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
842			opcnt = fxdr_unsigned(int, *tl++);
843			i = fxdr_unsigned(int, *tl++);
844			j = fxdr_unsigned(int, *tl);
845			if (j >= 10000)
846				NFSCL_DEBUG(1, "fop=%d fst=%d\n", i, j);
847			/*
848			 * If the first op is Sequence, free up the slot.
849			 */
850			if ((nmp != NULL && i == NFSV4OP_SEQUENCE && j != 0) ||
851			    (clp != NULL && i == NFSV4OP_CBSEQUENCE && j != 0))
852				NFSCL_DEBUG(1, "failed seq=%d\n", j);
853			if (((nmp != NULL && i == NFSV4OP_SEQUENCE && j == 0) ||
854			    (clp != NULL && i == NFSV4OP_CBSEQUENCE &&
855			    j == 0)) && sep != NULL) {
856				if (i == NFSV4OP_SEQUENCE)
857					NFSM_DISSECT(tl, uint32_t *,
858					    NFSX_V4SESSIONID +
859					    5 * NFSX_UNSIGNED);
860				else
861					NFSM_DISSECT(tl, uint32_t *,
862					    NFSX_V4SESSIONID +
863					    4 * NFSX_UNSIGNED);
864				mtx_lock(&sep->nfsess_mtx);
865				if (bcmp(tl, sep->nfsess_sessionid,
866				    NFSX_V4SESSIONID) == 0) {
867					tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
868					retseq = fxdr_unsigned(uint32_t, *tl++);
869					slot = fxdr_unsigned(int, *tl++);
870					freeslot = slot;
871					if (retseq != sep->nfsess_slotseq[slot])
872						printf("retseq diff 0x%x\n",
873						    retseq);
874					retval = fxdr_unsigned(uint32_t, *++tl);
875					if ((retval + 1) < sep->nfsess_foreslots
876					    )
877						sep->nfsess_foreslots = (retval
878						    + 1);
879					else if ((retval + 1) >
880					    sep->nfsess_foreslots)
881						sep->nfsess_foreslots = (retval
882						    < 64) ? (retval + 1) : 64;
883				}
884				mtx_unlock(&sep->nfsess_mtx);
885
886				/* Grab the op and status for the next one. */
887				if (opcnt > 1) {
888					NFSM_DISSECT(tl, uint32_t *,
889					    2 * NFSX_UNSIGNED);
890					i = fxdr_unsigned(int, *tl++);
891					j = fxdr_unsigned(int, *tl);
892				}
893			}
894		}
895		if (nd->nd_repstat != 0) {
896			if (nd->nd_repstat == NFSERR_BADSESSION &&
897			    nmp != NULL && dssep == NULL &&
898			    (nd->nd_flag & ND_NFSV41) != 0) {
899				/*
900				 * If this is a client side MDS RPC, mark
901				 * the MDS session defunct and initiate
902				 * recovery, as required.
903				 * The nfsess_defunct field is protected by
904				 * the NFSLOCKMNT()/nm_mtx lock and not the
905				 * nfsess_mtx lock to simplify its handling,
906				 * for the MDS session. This lock is also
907				 * sufficient for nfsess_sessionid, since it
908				 * never changes in the structure.
909				 */
910				NFSCL_DEBUG(1, "Got badsession\n");
911				NFSLOCKCLSTATE();
912				NFSLOCKMNT(nmp);
913				sep = NFSMNT_MDSSESSION(nmp);
914				if (bcmp(sep->nfsess_sessionid, nd->nd_sequence,
915				    NFSX_V4SESSIONID) == 0) {
916					/* Initiate recovery. */
917					sep->nfsess_defunct = 1;
918					NFSCL_DEBUG(1, "Marked defunct\n");
919					if (nmp->nm_clp != NULL) {
920						nmp->nm_clp->nfsc_flags |=
921						    NFSCLFLAGS_RECOVER;
922						wakeup(nmp->nm_clp);
923					}
924				}
925				NFSUNLOCKCLSTATE();
926				/*
927				 * Sleep for up to 1sec waiting for a new
928				 * session.
929				 */
930				mtx_sleep(&nmp->nm_sess, &nmp->nm_mtx, PZERO,
931				    "nfsbadsess", hz);
932				/*
933				 * Get the session again, in case a new one
934				 * has been created during the sleep.
935				 */
936				sep = NFSMNT_MDSSESSION(nmp);
937				NFSUNLOCKMNT(nmp);
938				if ((nd->nd_flag & ND_LOOPBADSESS) != 0) {
939					reterr = nfsv4_sequencelookup(nmp, sep,
940					    &slotpos, &maxslot, &slotseq,
941					    sessionid);
942					if (reterr == 0) {
943						/* Fill in new session info. */
944						NFSCL_DEBUG(1,
945						  "Filling in new sequence\n");
946						tl = nd->nd_sequence;
947						bcopy(sessionid, tl,
948						    NFSX_V4SESSIONID);
949						tl += NFSX_V4SESSIONID /
950						    NFSX_UNSIGNED;
951						*tl++ = txdr_unsigned(slotseq);
952						*tl++ = txdr_unsigned(slotpos);
953						*tl = txdr_unsigned(maxslot);
954					}
955					if (reterr == NFSERR_BADSESSION ||
956					    reterr == 0) {
957						NFSCL_DEBUG(1,
958						    "Badsession looping\n");
959						m_freem(nd->nd_mrep);
960						nd->nd_mrep = NULL;
961						goto tryagain;
962					}
963					nd->nd_repstat = reterr;
964					NFSCL_DEBUG(1, "Got err=%d\n", reterr);
965				}
966			}
967			/*
968			 * When clp != NULL, it is a callback and all
969			 * callback operations can be retried for NFSERR_DELAY.
970			 */
971			if (((nd->nd_repstat == NFSERR_DELAY ||
972			      nd->nd_repstat == NFSERR_GRACE) &&
973			     (nd->nd_flag & ND_NFSV4) && (clp != NULL ||
974			     (nd->nd_procnum != NFSPROC_DELEGRETURN &&
975			     nd->nd_procnum != NFSPROC_SETATTR &&
976			     nd->nd_procnum != NFSPROC_READ &&
977			     nd->nd_procnum != NFSPROC_READDS &&
978			     nd->nd_procnum != NFSPROC_WRITE &&
979			     nd->nd_procnum != NFSPROC_WRITEDS &&
980			     nd->nd_procnum != NFSPROC_OPEN &&
981			     nd->nd_procnum != NFSPROC_CREATE &&
982			     nd->nd_procnum != NFSPROC_OPENCONFIRM &&
983			     nd->nd_procnum != NFSPROC_OPENDOWNGRADE &&
984			     nd->nd_procnum != NFSPROC_CLOSE &&
985			     nd->nd_procnum != NFSPROC_LOCK &&
986			     nd->nd_procnum != NFSPROC_LOCKU))) ||
987			    (nd->nd_repstat == NFSERR_DELAY &&
988			     (nd->nd_flag & ND_NFSV4) == 0) ||
989			    nd->nd_repstat == NFSERR_RESOURCE) {
990				if (trylater_delay > NFS_TRYLATERDEL)
991					trylater_delay = NFS_TRYLATERDEL;
992				waituntil = NFSD_MONOSEC + trylater_delay;
993				while (NFSD_MONOSEC < waituntil)
994					(void) nfs_catnap(PZERO, 0, "nfstry");
995				trylater_delay *= 2;
996				if (slot != -1) {
997					mtx_lock(&sep->nfsess_mtx);
998					sep->nfsess_slotseq[slot]++;
999					*nd->nd_slotseq = txdr_unsigned(
1000					    sep->nfsess_slotseq[slot]);
1001					mtx_unlock(&sep->nfsess_mtx);
1002				}
1003				m_freem(nd->nd_mrep);
1004				nd->nd_mrep = NULL;
1005				goto tryagain;
1006			}
1007
1008			/*
1009			 * If the File Handle was stale, invalidate the
1010			 * lookup cache, just in case.
1011			 * (vp != NULL implies a client side call)
1012			 */
1013			if (nd->nd_repstat == ESTALE && vp != NULL) {
1014				cache_purge(vp);
1015				if (ncl_call_invalcaches != NULL)
1016					(*ncl_call_invalcaches)(vp);
1017			}
1018		}
1019		if ((nd->nd_flag & ND_NFSV4) != 0) {
1020			/* Free the slot, as required. */
1021			if (freeslot != -1)
1022				nfsv4_freeslot(sep, freeslot);
1023			/*
1024			 * If this op is Putfh, throw its results away.
1025			 */
1026			if (j >= 10000)
1027				NFSCL_DEBUG(1, "nop=%d nst=%d\n", i, j);
1028			if (nmp != NULL && i == NFSV4OP_PUTFH && j == 0) {
1029				NFSM_DISSECT(tl,u_int32_t *,2 * NFSX_UNSIGNED);
1030				i = fxdr_unsigned(int, *tl++);
1031				j = fxdr_unsigned(int, *tl);
1032				if (j >= 10000)
1033					NFSCL_DEBUG(1, "n2op=%d n2st=%d\n", i,
1034					    j);
1035				/*
1036				 * All Compounds that do an Op that must
1037				 * be in sequence consist of NFSV4OP_PUTFH
1038				 * followed by one of these. As such, we
1039				 * can determine if the seqid# should be
1040				 * incremented, here.
1041				 */
1042				if ((i == NFSV4OP_OPEN ||
1043				     i == NFSV4OP_OPENCONFIRM ||
1044				     i == NFSV4OP_OPENDOWNGRADE ||
1045				     i == NFSV4OP_CLOSE ||
1046				     i == NFSV4OP_LOCK ||
1047				     i == NFSV4OP_LOCKU) &&
1048				    (j == 0 ||
1049				     (j != NFSERR_STALECLIENTID &&
1050				      j != NFSERR_STALESTATEID &&
1051				      j != NFSERR_BADSTATEID &&
1052				      j != NFSERR_BADSEQID &&
1053				      j != NFSERR_BADXDR &&
1054				      j != NFSERR_RESOURCE &&
1055				      j != NFSERR_NOFILEHANDLE)))
1056					nd->nd_flag |= ND_INCRSEQID;
1057			}
1058			/*
1059			 * If this op's status is non-zero, mark
1060			 * that there is no more data to process.
1061			 * The exception is Setattr, which always has xdr
1062			 * when it has failed.
1063			 */
1064			if (j != 0 && i != NFSV4OP_SETATTR)
1065				nd->nd_flag |= ND_NOMOREDATA;
1066
1067			/*
1068			 * If R_DONTRECOVER is set, replace the stale error
1069			 * reply, so that recovery isn't initiated.
1070			 */
1071			if ((nd->nd_repstat == NFSERR_STALECLIENTID ||
1072			     nd->nd_repstat == NFSERR_BADSESSION ||
1073			     nd->nd_repstat == NFSERR_STALESTATEID) &&
1074			    rep != NULL && (rep->r_flags & R_DONTRECOVER))
1075				nd->nd_repstat = NFSERR_STALEDONTRECOVER;
1076		}
1077	}
1078
1079#ifdef KDTRACE_HOOKS
1080	if (nmp != NULL && dtrace_nfscl_nfs234_done_probe != NULL) {
1081		uint32_t probe_id;
1082		int probe_procnum;
1083
1084		if (nd->nd_flag & ND_NFSV4) {
1085			probe_id = nfscl_nfs4_done_probes[nd->nd_procnum];
1086			probe_procnum = nd->nd_procnum;
1087		} else if (nd->nd_flag & ND_NFSV3) {
1088			probe_id = nfscl_nfs3_done_probes[procnum];
1089			probe_procnum = procnum;
1090		} else {
1091			probe_id = nfscl_nfs2_done_probes[nd->nd_procnum];
1092			probe_procnum = procnum;
1093		}
1094		if (probe_id != 0)
1095			(dtrace_nfscl_nfs234_done_probe)(probe_id, vp,
1096			    nd->nd_mreq, cred, probe_procnum, 0);
1097	}
1098#endif
1099
1100	m_freem(nd->nd_mreq);
1101	if (usegssname == 0)
1102		AUTH_DESTROY(auth);
1103	if (rep != NULL)
1104		FREE((caddr_t)rep, M_NFSDREQ);
1105	if (set_sigset)
1106		newnfs_restore_sigmask(td, &oldset);
1107	return (0);
1108nfsmout:
1109	mbuf_freem(nd->nd_mrep);
1110	mbuf_freem(nd->nd_mreq);
1111	if (usegssname == 0)
1112		AUTH_DESTROY(auth);
1113	if (rep != NULL)
1114		FREE((caddr_t)rep, M_NFSDREQ);
1115	if (set_sigset)
1116		newnfs_restore_sigmask(td, &oldset);
1117	return (error);
1118}
1119
1120/*
1121 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
1122 * wait for all requests to complete. This is used by forced unmounts
1123 * to terminate any outstanding RPCs.
1124 */
1125int
1126newnfs_nmcancelreqs(struct nfsmount *nmp)
1127{
1128	struct nfsclds *dsp;
1129	struct __rpc_client *cl;
1130
1131	if (nmp->nm_sockreq.nr_client != NULL)
1132		CLNT_CLOSE(nmp->nm_sockreq.nr_client);
1133lookformore:
1134	NFSLOCKMNT(nmp);
1135	TAILQ_FOREACH(dsp, &nmp->nm_sess, nfsclds_list) {
1136		NFSLOCKDS(dsp);
1137		if (dsp != TAILQ_FIRST(&nmp->nm_sess) &&
1138		    (dsp->nfsclds_flags & NFSCLDS_CLOSED) == 0 &&
1139		    dsp->nfsclds_sockp != NULL &&
1140		    dsp->nfsclds_sockp->nr_client != NULL) {
1141			dsp->nfsclds_flags |= NFSCLDS_CLOSED;
1142			cl = dsp->nfsclds_sockp->nr_client;
1143			NFSUNLOCKDS(dsp);
1144			NFSUNLOCKMNT(nmp);
1145			CLNT_CLOSE(cl);
1146			goto lookformore;
1147		}
1148		NFSUNLOCKDS(dsp);
1149	}
1150	NFSUNLOCKMNT(nmp);
1151	return (0);
1152}
1153
1154/*
1155 * Any signal that can interrupt an NFS operation in an intr mount
1156 * should be added to this set. SIGSTOP and SIGKILL cannot be masked.
1157 */
1158int newnfs_sig_set[] = {
1159	SIGINT,
1160	SIGTERM,
1161	SIGHUP,
1162	SIGKILL,
1163	SIGQUIT
1164};
1165
1166/*
1167 * Check to see if one of the signals in our subset is pending on
1168 * the process (in an intr mount).
1169 */
1170static int
1171nfs_sig_pending(sigset_t set)
1172{
1173	int i;
1174
1175	for (i = 0 ; i < sizeof(newnfs_sig_set)/sizeof(int) ; i++)
1176		if (SIGISMEMBER(set, newnfs_sig_set[i]))
1177			return (1);
1178	return (0);
1179}
1180
1181/*
1182 * The set/restore sigmask functions are used to (temporarily) overwrite
1183 * the thread td_sigmask during an RPC call (for example). These are also
1184 * used in other places in the NFS client that might tsleep().
1185 */
1186void
1187newnfs_set_sigmask(struct thread *td, sigset_t *oldset)
1188{
1189	sigset_t newset;
1190	int i;
1191	struct proc *p;
1192
1193	SIGFILLSET(newset);
1194	if (td == NULL)
1195		td = curthread; /* XXX */
1196	p = td->td_proc;
1197	/* Remove the NFS set of signals from newset */
1198	PROC_LOCK(p);
1199	mtx_lock(&p->p_sigacts->ps_mtx);
1200	for (i = 0 ; i < sizeof(newnfs_sig_set)/sizeof(int) ; i++) {
1201		/*
1202		 * But make sure we leave the ones already masked
1203		 * by the process, ie. remove the signal from the
1204		 * temporary signalmask only if it wasn't already
1205		 * in p_sigmask.
1206		 */
1207		if (!SIGISMEMBER(td->td_sigmask, newnfs_sig_set[i]) &&
1208		    !SIGISMEMBER(p->p_sigacts->ps_sigignore, newnfs_sig_set[i]))
1209			SIGDELSET(newset, newnfs_sig_set[i]);
1210	}
1211	mtx_unlock(&p->p_sigacts->ps_mtx);
1212	kern_sigprocmask(td, SIG_SETMASK, &newset, oldset,
1213	    SIGPROCMASK_PROC_LOCKED);
1214	PROC_UNLOCK(p);
1215}
1216
1217void
1218newnfs_restore_sigmask(struct thread *td, sigset_t *set)
1219{
1220	if (td == NULL)
1221		td = curthread; /* XXX */
1222	kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0);
1223}
1224
1225/*
1226 * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the
1227 * old one after msleep() returns.
1228 */
1229int
1230newnfs_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo)
1231{
1232	sigset_t oldset;
1233	int error;
1234	struct proc *p;
1235
1236	if ((priority & PCATCH) == 0)
1237		return msleep(ident, mtx, priority, wmesg, timo);
1238	if (td == NULL)
1239		td = curthread; /* XXX */
1240	newnfs_set_sigmask(td, &oldset);
1241	error = msleep(ident, mtx, priority, wmesg, timo);
1242	newnfs_restore_sigmask(td, &oldset);
1243	p = td->td_proc;
1244	return (error);
1245}
1246
1247/*
1248 * Test for a termination condition pending on the process.
1249 * This is used for NFSMNT_INT mounts.
1250 */
1251int
1252newnfs_sigintr(struct nfsmount *nmp, struct thread *td)
1253{
1254	struct proc *p;
1255	sigset_t tmpset;
1256
1257	/* Terminate all requests while attempting a forced unmount. */
1258	if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
1259		return (EIO);
1260	if (!(nmp->nm_flag & NFSMNT_INT))
1261		return (0);
1262	if (td == NULL)
1263		return (0);
1264	p = td->td_proc;
1265	PROC_LOCK(p);
1266	tmpset = p->p_siglist;
1267	SIGSETOR(tmpset, td->td_siglist);
1268	SIGSETNAND(tmpset, td->td_sigmask);
1269	mtx_lock(&p->p_sigacts->ps_mtx);
1270	SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore);
1271	mtx_unlock(&p->p_sigacts->ps_mtx);
1272	if ((SIGNOTEMPTY(p->p_siglist) || SIGNOTEMPTY(td->td_siglist))
1273	    && nfs_sig_pending(tmpset)) {
1274		PROC_UNLOCK(p);
1275		return (EINTR);
1276	}
1277	PROC_UNLOCK(p);
1278	return (0);
1279}
1280
1281static int
1282nfs_msg(struct thread *td, const char *server, const char *msg, int error)
1283{
1284	struct proc *p;
1285
1286	p = td ? td->td_proc : NULL;
1287	if (error) {
1288		tprintf(p, LOG_INFO, "newnfs server %s: %s, error %d\n",
1289		    server, msg, error);
1290	} else {
1291		tprintf(p, LOG_INFO, "newnfs server %s: %s\n", server, msg);
1292	}
1293	return (0);
1294}
1295
1296static void
1297nfs_down(struct nfsmount *nmp, struct thread *td, const char *msg,
1298    int error, int flags)
1299{
1300	if (nmp == NULL)
1301		return;
1302	mtx_lock(&nmp->nm_mtx);
1303	if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
1304		nmp->nm_state |= NFSSTA_TIMEO;
1305		mtx_unlock(&nmp->nm_mtx);
1306		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1307		    VQ_NOTRESP, 0);
1308	} else
1309		mtx_unlock(&nmp->nm_mtx);
1310	mtx_lock(&nmp->nm_mtx);
1311	if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
1312		nmp->nm_state |= NFSSTA_LOCKTIMEO;
1313		mtx_unlock(&nmp->nm_mtx);
1314		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1315		    VQ_NOTRESPLOCK, 0);
1316	} else
1317		mtx_unlock(&nmp->nm_mtx);
1318	nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, error);
1319}
1320
1321static void
1322nfs_up(struct nfsmount *nmp, struct thread *td, const char *msg,
1323    int flags, int tprintfmsg)
1324{
1325	if (nmp == NULL)
1326		return;
1327	if (tprintfmsg) {
1328		nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0);
1329	}
1330
1331	mtx_lock(&nmp->nm_mtx);
1332	if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
1333		nmp->nm_state &= ~NFSSTA_TIMEO;
1334		mtx_unlock(&nmp->nm_mtx);
1335		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1336		    VQ_NOTRESP, 1);
1337	} else
1338		mtx_unlock(&nmp->nm_mtx);
1339
1340	mtx_lock(&nmp->nm_mtx);
1341	if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
1342		nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
1343		mtx_unlock(&nmp->nm_mtx);
1344		vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1345		    VQ_NOTRESPLOCK, 1);
1346	} else
1347		mtx_unlock(&nmp->nm_mtx);
1348}
1349
1350