1/*	$NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $	*/
2
3/*-
4 * Copyright (c) 2009, Sun Microsystems, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 * - Redistributions of source code must retain the above copyright notice,
10 *   this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright notice,
12 *   this list of conditions and the following disclaimer in the documentation
13 *   and/or other materials provided with the distribution.
14 * - Neither the name of Sun Microsystems, Inc. nor the names of its
15 *   contributors may be used to endorse or promote products derived
16 *   from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#if defined(LIBC_SCCS) && !defined(lint)
32static char *sccsid2 = "@(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro";
33static char *sccsid = "@(#)clnt_tcp.c	2.2 88/08/01 4.0 RPCSRC";
34static char sccsid3[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro";
35#endif
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD$");
38
39/*
40 * clnt_tcp.c, Implements a TCP/IP based, client side RPC.
41 *
42 * Copyright (C) 1984, Sun Microsystems, Inc.
43 *
44 * TCP based RPC supports 'batched calls'.
45 * A sequence of calls may be batched-up in a send buffer.  The rpc call
46 * return immediately to the client even though the call was not necessarily
47 * sent.  The batching occurs if the results' xdr routine is NULL (0) AND
48 * the rpc timeout value is zero (see clnt.h, rpc).
49 *
50 * Clients should NOT casually batch calls that in fact return results; that is,
51 * the server side should be aware that a call is batched and not produce any
52 * return message.  Batched calls that produce many result messages can
53 * deadlock (netlock) the client and the server....
54 *
55 * Now go hang yourself.
56 */
57
58#include <sys/param.h>
59#include <sys/systm.h>
60#include <sys/lock.h>
61#include <sys/malloc.h>
62#include <sys/mbuf.h>
63#include <sys/mutex.h>
64#include <sys/pcpu.h>
65#include <sys/proc.h>
66#include <sys/protosw.h>
67#include <sys/socket.h>
68#include <sys/socketvar.h>
69#include <sys/sx.h>
70#include <sys/syslog.h>
71#include <sys/time.h>
72#include <sys/uio.h>
73
74#include <net/vnet.h>
75
76#include <netinet/tcp.h>
77
78#include <rpc/rpc.h>
79#include <rpc/rpc_com.h>
80#include <rpc/krpc.h>
81
82struct cmessage {
83        struct cmsghdr cmsg;
84        struct cmsgcred cmcred;
85};
86
87static enum clnt_stat clnt_vc_call(CLIENT *, struct rpc_callextra *,
88    rpcproc_t, struct mbuf *, struct mbuf **, struct timeval);
89static void clnt_vc_geterr(CLIENT *, struct rpc_err *);
90static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *);
91static void clnt_vc_abort(CLIENT *);
92static bool_t clnt_vc_control(CLIENT *, u_int, void *);
93static void clnt_vc_close(CLIENT *);
94static void clnt_vc_destroy(CLIENT *);
95static bool_t time_not_ok(struct timeval *);
96static int clnt_vc_soupcall(struct socket *so, void *arg, int waitflag);
97
98static struct clnt_ops clnt_vc_ops = {
99	.cl_call =	clnt_vc_call,
100	.cl_abort =	clnt_vc_abort,
101	.cl_geterr =	clnt_vc_geterr,
102	.cl_freeres =	clnt_vc_freeres,
103	.cl_close =	clnt_vc_close,
104	.cl_destroy =	clnt_vc_destroy,
105	.cl_control =	clnt_vc_control
106};
107
108static void clnt_vc_upcallsdone(struct ct_data *);
109
110/*
111 * Create a client handle for a connection.
112 * Default options are set, which the user can change using clnt_control()'s.
113 * The rpc/vc package does buffering similar to stdio, so the client
114 * must pick send and receive buffer sizes, 0 => use the default.
115 * NB: fd is copied into a private area.
116 * NB: The rpch->cl_auth is set null authentication. Caller may wish to
117 * set this something more useful.
118 *
119 * fd should be an open socket
120 */
121CLIENT *
122clnt_vc_create(
123	struct socket *so,		/* open file descriptor */
124	struct sockaddr *raddr,		/* servers address */
125	const rpcprog_t prog,		/* program number */
126	const rpcvers_t vers,		/* version number */
127	size_t sendsz,			/* buffer recv size */
128	size_t recvsz,			/* buffer send size */
129	int intrflag)			/* interruptible */
130{
131	CLIENT *cl;			/* client handle */
132	struct ct_data *ct = NULL;	/* client handle */
133	struct timeval now;
134	struct rpc_msg call_msg;
135	static uint32_t disrupt;
136	struct __rpc_sockinfo si;
137	XDR xdrs;
138	int error, interrupted, one = 1, sleep_flag;
139	struct sockopt sopt;
140
141	if (disrupt == 0)
142		disrupt = (uint32_t)(long)raddr;
143
144	cl = (CLIENT *)mem_alloc(sizeof (*cl));
145	ct = (struct ct_data *)mem_alloc(sizeof (*ct));
146
147	mtx_init(&ct->ct_lock, "ct->ct_lock", NULL, MTX_DEF);
148	ct->ct_threads = 0;
149	ct->ct_closing = FALSE;
150	ct->ct_closed = FALSE;
151	ct->ct_upcallrefs = 0;
152
153	if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
154		error = soconnect(so, raddr, curthread);
155		SOCK_LOCK(so);
156		interrupted = 0;
157		sleep_flag = PSOCK;
158		if (intrflag != 0)
159			sleep_flag |= PCATCH;
160		while ((so->so_state & SS_ISCONNECTING)
161		    && so->so_error == 0) {
162			error = msleep(&so->so_timeo, SOCK_MTX(so),
163			    sleep_flag, "connec", 0);
164			if (error) {
165				if (error == EINTR || error == ERESTART)
166					interrupted = 1;
167				break;
168			}
169		}
170		if (error == 0) {
171			error = so->so_error;
172			so->so_error = 0;
173		}
174		SOCK_UNLOCK(so);
175		if (error) {
176			if (!interrupted)
177				so->so_state &= ~SS_ISCONNECTING;
178			rpc_createerr.cf_stat = RPC_SYSTEMERROR;
179			rpc_createerr.cf_error.re_errno = error;
180			goto err;
181		}
182	}
183
184	if (!__rpc_socket2sockinfo(so, &si)) {
185		goto err;
186	}
187
188	if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
189		bzero(&sopt, sizeof(sopt));
190		sopt.sopt_dir = SOPT_SET;
191		sopt.sopt_level = SOL_SOCKET;
192		sopt.sopt_name = SO_KEEPALIVE;
193		sopt.sopt_val = &one;
194		sopt.sopt_valsize = sizeof(one);
195		sosetopt(so, &sopt);
196	}
197
198	if (so->so_proto->pr_protocol == IPPROTO_TCP) {
199		bzero(&sopt, sizeof(sopt));
200		sopt.sopt_dir = SOPT_SET;
201		sopt.sopt_level = IPPROTO_TCP;
202		sopt.sopt_name = TCP_NODELAY;
203		sopt.sopt_val = &one;
204		sopt.sopt_valsize = sizeof(one);
205		sosetopt(so, &sopt);
206	}
207
208	ct->ct_closeit = FALSE;
209
210	/*
211	 * Set up private data struct
212	 */
213	ct->ct_socket = so;
214	ct->ct_wait.tv_sec = -1;
215	ct->ct_wait.tv_usec = -1;
216	memcpy(&ct->ct_addr, raddr, raddr->sa_len);
217
218	/*
219	 * Initialize call message
220	 */
221	getmicrotime(&now);
222	ct->ct_xid = ((uint32_t)++disrupt) ^ __RPC_GETXID(&now);
223	call_msg.rm_xid = ct->ct_xid;
224	call_msg.rm_direction = CALL;
225	call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION;
226	call_msg.rm_call.cb_prog = (uint32_t)prog;
227	call_msg.rm_call.cb_vers = (uint32_t)vers;
228
229	/*
230	 * pre-serialize the static part of the call msg and stash it away
231	 */
232	xdrmem_create(&xdrs, ct->ct_mcallc, MCALL_MSG_SIZE,
233	    XDR_ENCODE);
234	if (! xdr_callhdr(&xdrs, &call_msg)) {
235		if (ct->ct_closeit) {
236			soclose(ct->ct_socket);
237		}
238		goto err;
239	}
240	ct->ct_mpos = XDR_GETPOS(&xdrs);
241	XDR_DESTROY(&xdrs);
242	ct->ct_waitchan = "rpcrecv";
243	ct->ct_waitflag = 0;
244
245	/*
246	 * Create a client handle which uses xdrrec for serialization
247	 * and authnone for authentication.
248	 */
249	sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
250	recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
251	error = soreserve(ct->ct_socket, sendsz, recvsz);
252	if (error != 0) {
253		if (ct->ct_closeit) {
254			soclose(ct->ct_socket);
255		}
256		goto err;
257	}
258	cl->cl_refs = 1;
259	cl->cl_ops = &clnt_vc_ops;
260	cl->cl_private = ct;
261	cl->cl_auth = authnone_create();
262
263	SOCKBUF_LOCK(&ct->ct_socket->so_rcv);
264	soupcall_set(ct->ct_socket, SO_RCV, clnt_vc_soupcall, ct);
265	SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv);
266
267	ct->ct_record = NULL;
268	ct->ct_record_resid = 0;
269	TAILQ_INIT(&ct->ct_pending);
270	return (cl);
271
272err:
273	if (ct) {
274		mtx_destroy(&ct->ct_lock);
275		mem_free(ct, sizeof (struct ct_data));
276	}
277	if (cl)
278		mem_free(cl, sizeof (CLIENT));
279	return ((CLIENT *)NULL);
280}
281
282static enum clnt_stat
283clnt_vc_call(
284	CLIENT		*cl,		/* client handle */
285	struct rpc_callextra *ext,	/* call metadata */
286	rpcproc_t	proc,		/* procedure number */
287	struct mbuf	*args,		/* pointer to args */
288	struct mbuf	**resultsp,	/* pointer to results */
289	struct timeval	utimeout)
290{
291	struct ct_data *ct = (struct ct_data *) cl->cl_private;
292	AUTH *auth;
293	struct rpc_err *errp;
294	enum clnt_stat stat;
295	XDR xdrs;
296	struct rpc_msg reply_msg;
297	bool_t ok;
298	int nrefreshes = 2;		/* number of times to refresh cred */
299	struct timeval timeout;
300	uint32_t xid;
301	struct mbuf *mreq = NULL, *results;
302	struct ct_request *cr;
303	int error;
304
305	cr = malloc(sizeof(struct ct_request), M_RPC, M_WAITOK);
306
307	mtx_lock(&ct->ct_lock);
308
309	if (ct->ct_closing || ct->ct_closed) {
310		mtx_unlock(&ct->ct_lock);
311		free(cr, M_RPC);
312		return (RPC_CANTSEND);
313	}
314	ct->ct_threads++;
315
316	if (ext) {
317		auth = ext->rc_auth;
318		errp = &ext->rc_err;
319	} else {
320		auth = cl->cl_auth;
321		errp = &ct->ct_error;
322	}
323
324	cr->cr_mrep = NULL;
325	cr->cr_error = 0;
326
327	if (ct->ct_wait.tv_usec == -1) {
328		timeout = utimeout;	/* use supplied timeout */
329	} else {
330		timeout = ct->ct_wait;	/* use default timeout */
331	}
332
333call_again:
334	mtx_assert(&ct->ct_lock, MA_OWNED);
335
336	ct->ct_xid++;
337	xid = ct->ct_xid;
338
339	mtx_unlock(&ct->ct_lock);
340
341	/*
342	 * Leave space to pre-pend the record mark.
343	 */
344	mreq = m_gethdr(M_WAITOK, MT_DATA);
345	mreq->m_data += sizeof(uint32_t);
346	KASSERT(ct->ct_mpos + sizeof(uint32_t) <= MHLEN,
347	    ("RPC header too big"));
348	bcopy(ct->ct_mcallc, mreq->m_data, ct->ct_mpos);
349	mreq->m_len = ct->ct_mpos;
350
351	/*
352	 * The XID is the first thing in the request.
353	 */
354	*mtod(mreq, uint32_t *) = htonl(xid);
355
356	xdrmbuf_create(&xdrs, mreq, XDR_ENCODE);
357
358	errp->re_status = stat = RPC_SUCCESS;
359
360	if ((! XDR_PUTINT32(&xdrs, &proc)) ||
361	    (! AUTH_MARSHALL(auth, xid, &xdrs,
362		m_copym(args, 0, M_COPYALL, M_WAITOK)))) {
363		errp->re_status = stat = RPC_CANTENCODEARGS;
364		mtx_lock(&ct->ct_lock);
365		goto out;
366	}
367	mreq->m_pkthdr.len = m_length(mreq, NULL);
368
369	/*
370	 * Prepend a record marker containing the packet length.
371	 */
372	M_PREPEND(mreq, sizeof(uint32_t), M_WAITOK);
373	*mtod(mreq, uint32_t *) =
374		htonl(0x80000000 | (mreq->m_pkthdr.len - sizeof(uint32_t)));
375
376	cr->cr_xid = xid;
377	mtx_lock(&ct->ct_lock);
378	/*
379	 * Check to see if the other end has already started to close down
380	 * the connection. The upcall will have set ct_error.re_status
381	 * to RPC_CANTRECV if this is the case.
382	 * If the other end starts to close down the connection after this
383	 * point, it will be detected later when cr_error is checked,
384	 * since the request is in the ct_pending queue.
385	 */
386	if (ct->ct_error.re_status == RPC_CANTRECV) {
387		if (errp != &ct->ct_error) {
388			errp->re_errno = ct->ct_error.re_errno;
389			errp->re_status = RPC_CANTRECV;
390		}
391		stat = RPC_CANTRECV;
392		goto out;
393	}
394	TAILQ_INSERT_TAIL(&ct->ct_pending, cr, cr_link);
395	mtx_unlock(&ct->ct_lock);
396
397	/*
398	 * sosend consumes mreq.
399	 */
400	error = sosend(ct->ct_socket, NULL, NULL, mreq, NULL, 0, curthread);
401	mreq = NULL;
402	if (error == EMSGSIZE) {
403		SOCKBUF_LOCK(&ct->ct_socket->so_snd);
404		sbwait(&ct->ct_socket->so_snd);
405		SOCKBUF_UNLOCK(&ct->ct_socket->so_snd);
406		AUTH_VALIDATE(auth, xid, NULL, NULL);
407		mtx_lock(&ct->ct_lock);
408		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
409		goto call_again;
410	}
411
412	reply_msg.acpted_rply.ar_verf.oa_flavor = AUTH_NULL;
413	reply_msg.acpted_rply.ar_verf.oa_base = cr->cr_verf;
414	reply_msg.acpted_rply.ar_verf.oa_length = 0;
415	reply_msg.acpted_rply.ar_results.where = NULL;
416	reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
417
418	mtx_lock(&ct->ct_lock);
419	if (error) {
420		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
421		errp->re_errno = error;
422		errp->re_status = stat = RPC_CANTSEND;
423		goto out;
424	}
425
426	/*
427	 * Check to see if we got an upcall while waiting for the
428	 * lock. In both these cases, the request has been removed
429	 * from ct->ct_pending.
430	 */
431	if (cr->cr_error) {
432		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
433		errp->re_errno = cr->cr_error;
434		errp->re_status = stat = RPC_CANTRECV;
435		goto out;
436	}
437	if (cr->cr_mrep) {
438		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
439		goto got_reply;
440	}
441
442	/*
443	 * Hack to provide rpc-based message passing
444	 */
445	if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
446		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
447		errp->re_status = stat = RPC_TIMEDOUT;
448		goto out;
449	}
450
451	error = msleep(cr, &ct->ct_lock, ct->ct_waitflag, ct->ct_waitchan,
452	    tvtohz(&timeout));
453
454	TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
455
456	if (error) {
457		/*
458		 * The sleep returned an error so our request is still
459		 * on the list. Turn the error code into an
460		 * appropriate client status.
461		 */
462		errp->re_errno = error;
463		switch (error) {
464		case EINTR:
465			stat = RPC_INTR;
466			break;
467		case EWOULDBLOCK:
468			stat = RPC_TIMEDOUT;
469			break;
470		default:
471			stat = RPC_CANTRECV;
472		}
473		errp->re_status = stat;
474		goto out;
475	} else {
476		/*
477		 * We were woken up by the upcall.  If the
478		 * upcall had a receive error, report that,
479		 * otherwise we have a reply.
480		 */
481		if (cr->cr_error) {
482			errp->re_errno = cr->cr_error;
483			errp->re_status = stat = RPC_CANTRECV;
484			goto out;
485		}
486	}
487
488got_reply:
489	/*
490	 * Now decode and validate the response. We need to drop the
491	 * lock since xdr_replymsg may end up sleeping in malloc.
492	 */
493	mtx_unlock(&ct->ct_lock);
494
495	if (ext && ext->rc_feedback)
496		ext->rc_feedback(FEEDBACK_OK, proc, ext->rc_feedback_arg);
497
498	xdrmbuf_create(&xdrs, cr->cr_mrep, XDR_DECODE);
499	ok = xdr_replymsg(&xdrs, &reply_msg);
500	cr->cr_mrep = NULL;
501
502	if (ok) {
503		if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
504		    (reply_msg.acpted_rply.ar_stat == SUCCESS))
505			errp->re_status = stat = RPC_SUCCESS;
506		else
507			stat = _seterr_reply(&reply_msg, errp);
508
509		if (stat == RPC_SUCCESS) {
510			results = xdrmbuf_getall(&xdrs);
511			if (!AUTH_VALIDATE(auth, xid,
512				&reply_msg.acpted_rply.ar_verf,
513				&results)) {
514				errp->re_status = stat = RPC_AUTHERROR;
515				errp->re_why = AUTH_INVALIDRESP;
516			} else {
517				KASSERT(results,
518				    ("auth validated but no result"));
519				*resultsp = results;
520			}
521		}		/* end successful completion */
522		/*
523		 * If unsuccesful AND error is an authentication error
524		 * then refresh credentials and try again, else break
525		 */
526		else if (stat == RPC_AUTHERROR)
527			/* maybe our credentials need to be refreshed ... */
528			if (nrefreshes > 0 &&
529			    AUTH_REFRESH(auth, &reply_msg)) {
530				nrefreshes--;
531				XDR_DESTROY(&xdrs);
532				mtx_lock(&ct->ct_lock);
533				goto call_again;
534			}
535		/* end of unsuccessful completion */
536	}	/* end of valid reply message */
537	else {
538		errp->re_status = stat = RPC_CANTDECODERES;
539	}
540	XDR_DESTROY(&xdrs);
541	mtx_lock(&ct->ct_lock);
542out:
543	mtx_assert(&ct->ct_lock, MA_OWNED);
544
545	KASSERT(stat != RPC_SUCCESS || *resultsp,
546	    ("RPC_SUCCESS without reply"));
547
548	if (mreq)
549		m_freem(mreq);
550	if (cr->cr_mrep)
551		m_freem(cr->cr_mrep);
552
553	ct->ct_threads--;
554	if (ct->ct_closing)
555		wakeup(ct);
556
557	mtx_unlock(&ct->ct_lock);
558
559	if (auth && stat != RPC_SUCCESS)
560		AUTH_VALIDATE(auth, xid, NULL, NULL);
561
562	free(cr, M_RPC);
563
564	return (stat);
565}
566
567static void
568clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp)
569{
570	struct ct_data *ct = (struct ct_data *) cl->cl_private;
571
572	*errp = ct->ct_error;
573}
574
575static bool_t
576clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
577{
578	XDR xdrs;
579	bool_t dummy;
580
581	xdrs.x_op = XDR_FREE;
582	dummy = (*xdr_res)(&xdrs, res_ptr);
583
584	return (dummy);
585}
586
587/*ARGSUSED*/
588static void
589clnt_vc_abort(CLIENT *cl)
590{
591}
592
593static bool_t
594clnt_vc_control(CLIENT *cl, u_int request, void *info)
595{
596	struct ct_data *ct = (struct ct_data *)cl->cl_private;
597	void *infop = info;
598	SVCXPRT *xprt;
599
600	mtx_lock(&ct->ct_lock);
601
602	switch (request) {
603	case CLSET_FD_CLOSE:
604		ct->ct_closeit = TRUE;
605		mtx_unlock(&ct->ct_lock);
606		return (TRUE);
607	case CLSET_FD_NCLOSE:
608		ct->ct_closeit = FALSE;
609		mtx_unlock(&ct->ct_lock);
610		return (TRUE);
611	default:
612		break;
613	}
614
615	/* for other requests which use info */
616	if (info == NULL) {
617		mtx_unlock(&ct->ct_lock);
618		return (FALSE);
619	}
620	switch (request) {
621	case CLSET_TIMEOUT:
622		if (time_not_ok((struct timeval *)info)) {
623			mtx_unlock(&ct->ct_lock);
624			return (FALSE);
625		}
626		ct->ct_wait = *(struct timeval *)infop;
627		break;
628	case CLGET_TIMEOUT:
629		*(struct timeval *)infop = ct->ct_wait;
630		break;
631	case CLGET_SERVER_ADDR:
632		(void) memcpy(info, &ct->ct_addr, (size_t)ct->ct_addr.ss_len);
633		break;
634	case CLGET_SVC_ADDR:
635		/*
636		 * Slightly different semantics to userland - we use
637		 * sockaddr instead of netbuf.
638		 */
639		memcpy(info, &ct->ct_addr, ct->ct_addr.ss_len);
640		break;
641	case CLSET_SVC_ADDR:		/* set to new address */
642		mtx_unlock(&ct->ct_lock);
643		return (FALSE);
644	case CLGET_XID:
645		*(uint32_t *)info = ct->ct_xid;
646		break;
647	case CLSET_XID:
648		/* This will set the xid of the NEXT call */
649		/* decrement by 1 as clnt_vc_call() increments once */
650		ct->ct_xid = *(uint32_t *)info - 1;
651		break;
652	case CLGET_VERS:
653		/*
654		 * This RELIES on the information that, in the call body,
655		 * the version number field is the fifth field from the
656		 * begining of the RPC header. MUST be changed if the
657		 * call_struct is changed
658		 */
659		*(uint32_t *)info =
660		    ntohl(*(uint32_t *)(void *)(ct->ct_mcallc +
661		    4 * BYTES_PER_XDR_UNIT));
662		break;
663
664	case CLSET_VERS:
665		*(uint32_t *)(void *)(ct->ct_mcallc +
666		    4 * BYTES_PER_XDR_UNIT) =
667		    htonl(*(uint32_t *)info);
668		break;
669
670	case CLGET_PROG:
671		/*
672		 * This RELIES on the information that, in the call body,
673		 * the program number field is the fourth field from the
674		 * begining of the RPC header. MUST be changed if the
675		 * call_struct is changed
676		 */
677		*(uint32_t *)info =
678		    ntohl(*(uint32_t *)(void *)(ct->ct_mcallc +
679		    3 * BYTES_PER_XDR_UNIT));
680		break;
681
682	case CLSET_PROG:
683		*(uint32_t *)(void *)(ct->ct_mcallc +
684		    3 * BYTES_PER_XDR_UNIT) =
685		    htonl(*(uint32_t *)info);
686		break;
687
688	case CLSET_WAITCHAN:
689		ct->ct_waitchan = (const char *)info;
690		break;
691
692	case CLGET_WAITCHAN:
693		*(const char **) info = ct->ct_waitchan;
694		break;
695
696	case CLSET_INTERRUPTIBLE:
697		if (*(int *) info)
698			ct->ct_waitflag = PCATCH;
699		else
700			ct->ct_waitflag = 0;
701		break;
702
703	case CLGET_INTERRUPTIBLE:
704		if (ct->ct_waitflag)
705			*(int *) info = TRUE;
706		else
707			*(int *) info = FALSE;
708		break;
709
710	case CLSET_BACKCHANNEL:
711		xprt = (SVCXPRT *)info;
712		if (ct->ct_backchannelxprt == NULL) {
713			xprt->xp_p2 = ct;
714			ct->ct_backchannelxprt = xprt;
715		}
716		break;
717
718	default:
719		mtx_unlock(&ct->ct_lock);
720		return (FALSE);
721	}
722
723	mtx_unlock(&ct->ct_lock);
724	return (TRUE);
725}
726
727static void
728clnt_vc_close(CLIENT *cl)
729{
730	struct ct_data *ct = (struct ct_data *) cl->cl_private;
731	struct ct_request *cr;
732
733	mtx_lock(&ct->ct_lock);
734
735	if (ct->ct_closed) {
736		mtx_unlock(&ct->ct_lock);
737		return;
738	}
739
740	if (ct->ct_closing) {
741		while (ct->ct_closing)
742			msleep(ct, &ct->ct_lock, 0, "rpcclose", 0);
743		KASSERT(ct->ct_closed, ("client should be closed"));
744		mtx_unlock(&ct->ct_lock);
745		return;
746	}
747
748	if (ct->ct_socket) {
749		ct->ct_closing = TRUE;
750		mtx_unlock(&ct->ct_lock);
751
752		SOCKBUF_LOCK(&ct->ct_socket->so_rcv);
753		soupcall_clear(ct->ct_socket, SO_RCV);
754		clnt_vc_upcallsdone(ct);
755		SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv);
756
757		/*
758		 * Abort any pending requests and wait until everyone
759		 * has finished with clnt_vc_call.
760		 */
761		mtx_lock(&ct->ct_lock);
762		TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) {
763			cr->cr_xid = 0;
764			cr->cr_error = ESHUTDOWN;
765			wakeup(cr);
766		}
767
768		while (ct->ct_threads)
769			msleep(ct, &ct->ct_lock, 0, "rpcclose", 0);
770	}
771
772	ct->ct_closing = FALSE;
773	ct->ct_closed = TRUE;
774	mtx_unlock(&ct->ct_lock);
775	wakeup(ct);
776}
777
778static void
779clnt_vc_destroy(CLIENT *cl)
780{
781	struct ct_data *ct = (struct ct_data *) cl->cl_private;
782	struct socket *so = NULL;
783	SVCXPRT *xprt;
784
785	clnt_vc_close(cl);
786
787	mtx_lock(&ct->ct_lock);
788	xprt = ct->ct_backchannelxprt;
789	ct->ct_backchannelxprt = NULL;
790	if (xprt != NULL) {
791		mtx_unlock(&ct->ct_lock);	/* To avoid a LOR. */
792		sx_xlock(&xprt->xp_lock);
793		mtx_lock(&ct->ct_lock);
794		xprt->xp_p2 = NULL;
795		xprt_unregister(xprt);
796	}
797
798	if (ct->ct_socket) {
799		if (ct->ct_closeit) {
800			so = ct->ct_socket;
801		}
802	}
803
804	mtx_unlock(&ct->ct_lock);
805	if (xprt != NULL) {
806		sx_xunlock(&xprt->xp_lock);
807		SVC_RELEASE(xprt);
808	}
809
810	mtx_destroy(&ct->ct_lock);
811	if (so) {
812		soshutdown(so, SHUT_WR);
813		soclose(so);
814	}
815	mem_free(ct, sizeof(struct ct_data));
816	if (cl->cl_netid && cl->cl_netid[0])
817		mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
818	if (cl->cl_tp && cl->cl_tp[0])
819		mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
820	mem_free(cl, sizeof(CLIENT));
821}
822
823/*
824 * Make sure that the time is not garbage.   -1 value is disallowed.
825 * Note this is different from time_not_ok in clnt_dg.c
826 */
827static bool_t
828time_not_ok(struct timeval *t)
829{
830	return (t->tv_sec <= -1 || t->tv_sec > 100000000 ||
831		t->tv_usec <= -1 || t->tv_usec > 1000000);
832}
833
834int
835clnt_vc_soupcall(struct socket *so, void *arg, int waitflag)
836{
837	struct ct_data *ct = (struct ct_data *) arg;
838	struct uio uio;
839	struct mbuf *m, *m2;
840	struct ct_request *cr;
841	int error, rcvflag, foundreq;
842	uint32_t xid_plus_direction[2], header;
843	bool_t do_read;
844	SVCXPRT *xprt;
845	struct cf_conn *cd;
846
847	CTASSERT(sizeof(xid_plus_direction) == 2 * sizeof(uint32_t));
848	ct->ct_upcallrefs++;
849	uio.uio_td = curthread;
850	do {
851		/*
852		 * If ct_record_resid is zero, we are waiting for a
853		 * record mark.
854		 */
855		if (ct->ct_record_resid == 0) {
856
857			/*
858			 * Make sure there is either a whole record
859			 * mark in the buffer or there is some other
860			 * error condition
861			 */
862			do_read = FALSE;
863			if (so->so_rcv.sb_cc >= sizeof(uint32_t)
864			    || (so->so_rcv.sb_state & SBS_CANTRCVMORE)
865			    || so->so_error)
866				do_read = TRUE;
867
868			if (!do_read)
869				break;
870
871			SOCKBUF_UNLOCK(&so->so_rcv);
872			uio.uio_resid = sizeof(uint32_t);
873			m = NULL;
874			rcvflag = MSG_DONTWAIT | MSG_SOCALLBCK;
875			error = soreceive(so, NULL, &uio, &m, NULL, &rcvflag);
876			SOCKBUF_LOCK(&so->so_rcv);
877
878			if (error == EWOULDBLOCK)
879				break;
880
881			/*
882			 * If there was an error, wake up all pending
883			 * requests.
884			 */
885			if (error || uio.uio_resid > 0) {
886			wakeup_all:
887				mtx_lock(&ct->ct_lock);
888				if (!error) {
889					/*
890					 * We must have got EOF trying
891					 * to read from the stream.
892					 */
893					error = ECONNRESET;
894				}
895				ct->ct_error.re_status = RPC_CANTRECV;
896				ct->ct_error.re_errno = error;
897				TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) {
898					cr->cr_error = error;
899					wakeup(cr);
900				}
901				mtx_unlock(&ct->ct_lock);
902				break;
903			}
904			m_copydata(m, 0, sizeof(uint32_t), (char *)&header);
905			header = ntohl(header);
906			ct->ct_record = NULL;
907			ct->ct_record_resid = header & 0x7fffffff;
908			ct->ct_record_eor = ((header & 0x80000000) != 0);
909			m_freem(m);
910		} else {
911			/*
912			 * Wait until the socket has the whole record
913			 * buffered.
914			 */
915			do_read = FALSE;
916			if (so->so_rcv.sb_cc >= ct->ct_record_resid
917			    || (so->so_rcv.sb_state & SBS_CANTRCVMORE)
918			    || so->so_error)
919				do_read = TRUE;
920
921			if (!do_read)
922				break;
923
924			/*
925			 * We have the record mark. Read as much as
926			 * the socket has buffered up to the end of
927			 * this record.
928			 */
929			SOCKBUF_UNLOCK(&so->so_rcv);
930			uio.uio_resid = ct->ct_record_resid;
931			m = NULL;
932			rcvflag = MSG_DONTWAIT | MSG_SOCALLBCK;
933			error = soreceive(so, NULL, &uio, &m, NULL, &rcvflag);
934			SOCKBUF_LOCK(&so->so_rcv);
935
936			if (error == EWOULDBLOCK)
937				break;
938
939			if (error || uio.uio_resid == ct->ct_record_resid)
940				goto wakeup_all;
941
942			/*
943			 * If we have part of the record already,
944			 * chain this bit onto the end.
945			 */
946			if (ct->ct_record)
947				m_last(ct->ct_record)->m_next = m;
948			else
949				ct->ct_record = m;
950
951			ct->ct_record_resid = uio.uio_resid;
952
953			/*
954			 * If we have the entire record, see if we can
955			 * match it to a request.
956			 */
957			if (ct->ct_record_resid == 0
958			    && ct->ct_record_eor) {
959				/*
960				 * The XID is in the first uint32_t of
961				 * the reply and the message direction
962				 * is the second one.
963				 */
964				if (ct->ct_record->m_len <
965				    sizeof(xid_plus_direction) &&
966				    m_length(ct->ct_record, NULL) <
967				    sizeof(xid_plus_direction)) {
968					m_freem(ct->ct_record);
969					break;
970				}
971				m_copydata(ct->ct_record, 0,
972				    sizeof(xid_plus_direction),
973				    (char *)xid_plus_direction);
974				xid_plus_direction[0] =
975				    ntohl(xid_plus_direction[0]);
976				xid_plus_direction[1] =
977				    ntohl(xid_plus_direction[1]);
978				/* Check message direction. */
979				if (xid_plus_direction[1] == CALL) {
980					/* This is a backchannel request. */
981					mtx_lock(&ct->ct_lock);
982					xprt = ct->ct_backchannelxprt;
983					if (xprt == NULL) {
984						mtx_unlock(&ct->ct_lock);
985						/* Just throw it away. */
986						m_freem(ct->ct_record);
987						ct->ct_record = NULL;
988					} else {
989						cd = (struct cf_conn *)
990						    xprt->xp_p1;
991						m2 = cd->mreq;
992						/*
993						 * The requests are chained
994						 * in the m_nextpkt list.
995						 */
996						while (m2 != NULL &&
997						    m2->m_nextpkt != NULL)
998							/* Find end of list. */
999							m2 = m2->m_nextpkt;
1000						if (m2 != NULL)
1001							m2->m_nextpkt =
1002							    ct->ct_record;
1003						else
1004							cd->mreq =
1005							    ct->ct_record;
1006						ct->ct_record->m_nextpkt =
1007						    NULL;
1008						ct->ct_record = NULL;
1009						xprt_active(xprt);
1010						mtx_unlock(&ct->ct_lock);
1011					}
1012				} else {
1013					mtx_lock(&ct->ct_lock);
1014					foundreq = 0;
1015					TAILQ_FOREACH(cr, &ct->ct_pending,
1016					    cr_link) {
1017						if (cr->cr_xid ==
1018						    xid_plus_direction[0]) {
1019							/*
1020							 * This one
1021							 * matches. We leave
1022							 * the reply mbuf in
1023							 * cr->cr_mrep. Set
1024							 * the XID to zero so
1025							 * that we will ignore
1026							 * any duplicated
1027							 * replies.
1028							 */
1029							cr->cr_xid = 0;
1030							cr->cr_mrep =
1031							    ct->ct_record;
1032							cr->cr_error = 0;
1033							foundreq = 1;
1034							wakeup(cr);
1035							break;
1036						}
1037					}
1038					mtx_unlock(&ct->ct_lock);
1039
1040					if (!foundreq)
1041						m_freem(ct->ct_record);
1042					ct->ct_record = NULL;
1043				}
1044			}
1045		}
1046	} while (m);
1047	ct->ct_upcallrefs--;
1048	if (ct->ct_upcallrefs < 0)
1049		panic("rpcvc upcall refcnt");
1050	if (ct->ct_upcallrefs == 0)
1051		wakeup(&ct->ct_upcallrefs);
1052	return (SU_OK);
1053}
1054
1055/*
1056 * Wait for all upcalls in progress to complete.
1057 */
1058static void
1059clnt_vc_upcallsdone(struct ct_data *ct)
1060{
1061
1062	SOCKBUF_LOCK_ASSERT(&ct->ct_socket->so_rcv);
1063
1064	while (ct->ct_upcallrefs > 0)
1065		(void) msleep(&ct->ct_upcallrefs,
1066		    SOCKBUF_MTX(&ct->ct_socket->so_rcv), 0, "rpcvcup", 0);
1067}
1068