clnt_vc.c revision 261046
1/*	$NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $	*/
2
3/*-
4 * Copyright (c) 2009, Sun Microsystems, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 * - Redistributions of source code must retain the above copyright notice,
10 *   this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright notice,
12 *   this list of conditions and the following disclaimer in the documentation
13 *   and/or other materials provided with the distribution.
14 * - Neither the name of Sun Microsystems, Inc. nor the names of its
15 *   contributors may be used to endorse or promote products derived
16 *   from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#if defined(LIBC_SCCS) && !defined(lint)
32static char *sccsid2 = "@(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro";
33static char *sccsid = "@(#)clnt_tcp.c	2.2 88/08/01 4.0 RPCSRC";
34static char sccsid3[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro";
35#endif
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: stable/10/sys/rpc/clnt_vc.c 261046 2014-01-22 23:45:27Z mav $");
38
39/*
40 * clnt_tcp.c, Implements a TCP/IP based, client side RPC.
41 *
42 * Copyright (C) 1984, Sun Microsystems, Inc.
43 *
44 * TCP based RPC supports 'batched calls'.
45 * A sequence of calls may be batched-up in a send buffer.  The rpc call
46 * return immediately to the client even though the call was not necessarily
47 * sent.  The batching occurs if the results' xdr routine is NULL (0) AND
48 * the rpc timeout value is zero (see clnt.h, rpc).
49 *
50 * Clients should NOT casually batch calls that in fact return results; that is,
51 * the server side should be aware that a call is batched and not produce any
52 * return message.  Batched calls that produce many result messages can
53 * deadlock (netlock) the client and the server....
54 *
55 * Now go hang yourself.
56 */
57
58#include <sys/param.h>
59#include <sys/systm.h>
60#include <sys/lock.h>
61#include <sys/malloc.h>
62#include <sys/mbuf.h>
63#include <sys/mutex.h>
64#include <sys/pcpu.h>
65#include <sys/proc.h>
66#include <sys/protosw.h>
67#include <sys/socket.h>
68#include <sys/socketvar.h>
69#include <sys/sx.h>
70#include <sys/syslog.h>
71#include <sys/time.h>
72#include <sys/uio.h>
73
74#include <net/vnet.h>
75
76#include <netinet/tcp.h>
77
78#include <rpc/rpc.h>
79#include <rpc/rpc_com.h>
80#include <rpc/krpc.h>
81
82struct cmessage {
83        struct cmsghdr cmsg;
84        struct cmsgcred cmcred;
85};
86
87static enum clnt_stat clnt_vc_call(CLIENT *, struct rpc_callextra *,
88    rpcproc_t, struct mbuf *, struct mbuf **, struct timeval);
89static void clnt_vc_geterr(CLIENT *, struct rpc_err *);
90static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *);
91static void clnt_vc_abort(CLIENT *);
92static bool_t clnt_vc_control(CLIENT *, u_int, void *);
93static void clnt_vc_close(CLIENT *);
94static void clnt_vc_destroy(CLIENT *);
95static bool_t time_not_ok(struct timeval *);
96static int clnt_vc_soupcall(struct socket *so, void *arg, int waitflag);
97
98static struct clnt_ops clnt_vc_ops = {
99	.cl_call =	clnt_vc_call,
100	.cl_abort =	clnt_vc_abort,
101	.cl_geterr =	clnt_vc_geterr,
102	.cl_freeres =	clnt_vc_freeres,
103	.cl_close =	clnt_vc_close,
104	.cl_destroy =	clnt_vc_destroy,
105	.cl_control =	clnt_vc_control
106};
107
108static void clnt_vc_upcallsdone(struct ct_data *);
109
110/*
111 * Create a client handle for a connection.
112 * Default options are set, which the user can change using clnt_control()'s.
113 * The rpc/vc package does buffering similar to stdio, so the client
114 * must pick send and receive buffer sizes, 0 => use the default.
115 * NB: fd is copied into a private area.
116 * NB: The rpch->cl_auth is set null authentication. Caller may wish to
117 * set this something more useful.
118 *
119 * fd should be an open socket
120 */
121CLIENT *
122clnt_vc_create(
123	struct socket *so,		/* open file descriptor */
124	struct sockaddr *raddr,		/* servers address */
125	const rpcprog_t prog,		/* program number */
126	const rpcvers_t vers,		/* version number */
127	size_t sendsz,			/* buffer recv size */
128	size_t recvsz,			/* buffer send size */
129	int intrflag)			/* interruptible */
130{
131	CLIENT *cl;			/* client handle */
132	struct ct_data *ct = NULL;	/* client handle */
133	struct timeval now;
134	struct rpc_msg call_msg;
135	static uint32_t disrupt;
136	struct __rpc_sockinfo si;
137	XDR xdrs;
138	int error, interrupted, one = 1, sleep_flag;
139	struct sockopt sopt;
140
141	if (disrupt == 0)
142		disrupt = (uint32_t)(long)raddr;
143
144	cl = (CLIENT *)mem_alloc(sizeof (*cl));
145	ct = (struct ct_data *)mem_alloc(sizeof (*ct));
146
147	mtx_init(&ct->ct_lock, "ct->ct_lock", NULL, MTX_DEF);
148	ct->ct_threads = 0;
149	ct->ct_closing = FALSE;
150	ct->ct_closed = FALSE;
151	ct->ct_upcallrefs = 0;
152
153	if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
154		error = soconnect(so, raddr, curthread);
155		SOCK_LOCK(so);
156		interrupted = 0;
157		sleep_flag = PSOCK;
158		if (intrflag != 0)
159			sleep_flag |= PCATCH;
160		while ((so->so_state & SS_ISCONNECTING)
161		    && so->so_error == 0) {
162			error = msleep(&so->so_timeo, SOCK_MTX(so),
163			    sleep_flag, "connec", 0);
164			if (error) {
165				if (error == EINTR || error == ERESTART)
166					interrupted = 1;
167				break;
168			}
169		}
170		if (error == 0) {
171			error = so->so_error;
172			so->so_error = 0;
173		}
174		SOCK_UNLOCK(so);
175		if (error) {
176			if (!interrupted)
177				so->so_state &= ~SS_ISCONNECTING;
178			rpc_createerr.cf_stat = RPC_SYSTEMERROR;
179			rpc_createerr.cf_error.re_errno = error;
180			goto err;
181		}
182	}
183
184	if (!__rpc_socket2sockinfo(so, &si)) {
185		goto err;
186	}
187
188	if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
189		bzero(&sopt, sizeof(sopt));
190		sopt.sopt_dir = SOPT_SET;
191		sopt.sopt_level = SOL_SOCKET;
192		sopt.sopt_name = SO_KEEPALIVE;
193		sopt.sopt_val = &one;
194		sopt.sopt_valsize = sizeof(one);
195		sosetopt(so, &sopt);
196	}
197
198	if (so->so_proto->pr_protocol == IPPROTO_TCP) {
199		bzero(&sopt, sizeof(sopt));
200		sopt.sopt_dir = SOPT_SET;
201		sopt.sopt_level = IPPROTO_TCP;
202		sopt.sopt_name = TCP_NODELAY;
203		sopt.sopt_val = &one;
204		sopt.sopt_valsize = sizeof(one);
205		sosetopt(so, &sopt);
206	}
207
208	ct->ct_closeit = FALSE;
209
210	/*
211	 * Set up private data struct
212	 */
213	ct->ct_socket = so;
214	ct->ct_wait.tv_sec = -1;
215	ct->ct_wait.tv_usec = -1;
216	memcpy(&ct->ct_addr, raddr, raddr->sa_len);
217
218	/*
219	 * Initialize call message
220	 */
221	getmicrotime(&now);
222	ct->ct_xid = ((uint32_t)++disrupt) ^ __RPC_GETXID(&now);
223	call_msg.rm_xid = ct->ct_xid;
224	call_msg.rm_direction = CALL;
225	call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION;
226	call_msg.rm_call.cb_prog = (uint32_t)prog;
227	call_msg.rm_call.cb_vers = (uint32_t)vers;
228
229	/*
230	 * pre-serialize the static part of the call msg and stash it away
231	 */
232	xdrmem_create(&xdrs, ct->ct_mcallc, MCALL_MSG_SIZE,
233	    XDR_ENCODE);
234	if (! xdr_callhdr(&xdrs, &call_msg)) {
235		if (ct->ct_closeit) {
236			soclose(ct->ct_socket);
237		}
238		goto err;
239	}
240	ct->ct_mpos = XDR_GETPOS(&xdrs);
241	XDR_DESTROY(&xdrs);
242	ct->ct_waitchan = "rpcrecv";
243	ct->ct_waitflag = 0;
244
245	/*
246	 * Create a client handle which uses xdrrec for serialization
247	 * and authnone for authentication.
248	 */
249	sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
250	recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
251	error = soreserve(ct->ct_socket, sendsz, recvsz);
252	if (error != 0) {
253		if (ct->ct_closeit) {
254			soclose(ct->ct_socket);
255		}
256		goto err;
257	}
258	cl->cl_refs = 1;
259	cl->cl_ops = &clnt_vc_ops;
260	cl->cl_private = ct;
261	cl->cl_auth = authnone_create();
262
263	SOCKBUF_LOCK(&ct->ct_socket->so_rcv);
264	soupcall_set(ct->ct_socket, SO_RCV, clnt_vc_soupcall, ct);
265	SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv);
266
267	ct->ct_record = NULL;
268	ct->ct_record_resid = 0;
269	TAILQ_INIT(&ct->ct_pending);
270	return (cl);
271
272err:
273	if (cl) {
274		if (ct) {
275			mtx_destroy(&ct->ct_lock);
276			mem_free(ct, sizeof (struct ct_data));
277		}
278		if (cl)
279			mem_free(cl, sizeof (CLIENT));
280	}
281	return ((CLIENT *)NULL);
282}
283
284static enum clnt_stat
285clnt_vc_call(
286	CLIENT		*cl,		/* client handle */
287	struct rpc_callextra *ext,	/* call metadata */
288	rpcproc_t	proc,		/* procedure number */
289	struct mbuf	*args,		/* pointer to args */
290	struct mbuf	**resultsp,	/* pointer to results */
291	struct timeval	utimeout)
292{
293	struct ct_data *ct = (struct ct_data *) cl->cl_private;
294	AUTH *auth;
295	struct rpc_err *errp;
296	enum clnt_stat stat;
297	XDR xdrs;
298	struct rpc_msg reply_msg;
299	bool_t ok;
300	int nrefreshes = 2;		/* number of times to refresh cred */
301	struct timeval timeout;
302	uint32_t xid;
303	struct mbuf *mreq = NULL, *results;
304	struct ct_request *cr;
305	int error;
306
307	cr = malloc(sizeof(struct ct_request), M_RPC, M_WAITOK);
308
309	mtx_lock(&ct->ct_lock);
310
311	if (ct->ct_closing || ct->ct_closed) {
312		mtx_unlock(&ct->ct_lock);
313		free(cr, M_RPC);
314		return (RPC_CANTSEND);
315	}
316	ct->ct_threads++;
317
318	if (ext) {
319		auth = ext->rc_auth;
320		errp = &ext->rc_err;
321	} else {
322		auth = cl->cl_auth;
323		errp = &ct->ct_error;
324	}
325
326	cr->cr_mrep = NULL;
327	cr->cr_error = 0;
328
329	if (ct->ct_wait.tv_usec == -1) {
330		timeout = utimeout;	/* use supplied timeout */
331	} else {
332		timeout = ct->ct_wait;	/* use default timeout */
333	}
334
335call_again:
336	mtx_assert(&ct->ct_lock, MA_OWNED);
337
338	ct->ct_xid++;
339	xid = ct->ct_xid;
340
341	mtx_unlock(&ct->ct_lock);
342
343	/*
344	 * Leave space to pre-pend the record mark.
345	 */
346	mreq = m_gethdr(M_WAITOK, MT_DATA);
347	mreq->m_data += sizeof(uint32_t);
348	KASSERT(ct->ct_mpos + sizeof(uint32_t) <= MHLEN,
349	    ("RPC header too big"));
350	bcopy(ct->ct_mcallc, mreq->m_data, ct->ct_mpos);
351	mreq->m_len = ct->ct_mpos;
352
353	/*
354	 * The XID is the first thing in the request.
355	 */
356	*mtod(mreq, uint32_t *) = htonl(xid);
357
358	xdrmbuf_create(&xdrs, mreq, XDR_ENCODE);
359
360	errp->re_status = stat = RPC_SUCCESS;
361
362	if ((! XDR_PUTINT32(&xdrs, &proc)) ||
363	    (! AUTH_MARSHALL(auth, xid, &xdrs,
364		m_copym(args, 0, M_COPYALL, M_WAITOK)))) {
365		errp->re_status = stat = RPC_CANTENCODEARGS;
366		mtx_lock(&ct->ct_lock);
367		goto out;
368	}
369	mreq->m_pkthdr.len = m_length(mreq, NULL);
370
371	/*
372	 * Prepend a record marker containing the packet length.
373	 */
374	M_PREPEND(mreq, sizeof(uint32_t), M_WAITOK);
375	*mtod(mreq, uint32_t *) =
376		htonl(0x80000000 | (mreq->m_pkthdr.len - sizeof(uint32_t)));
377
378	cr->cr_xid = xid;
379	mtx_lock(&ct->ct_lock);
380	/*
381	 * Check to see if the other end has already started to close down
382	 * the connection. The upcall will have set ct_error.re_status
383	 * to RPC_CANTRECV if this is the case.
384	 * If the other end starts to close down the connection after this
385	 * point, it will be detected later when cr_error is checked,
386	 * since the request is in the ct_pending queue.
387	 */
388	if (ct->ct_error.re_status == RPC_CANTRECV) {
389		if (errp != &ct->ct_error) {
390			errp->re_errno = ct->ct_error.re_errno;
391			errp->re_status = RPC_CANTRECV;
392		}
393		stat = RPC_CANTRECV;
394		goto out;
395	}
396	TAILQ_INSERT_TAIL(&ct->ct_pending, cr, cr_link);
397	mtx_unlock(&ct->ct_lock);
398
399	/*
400	 * sosend consumes mreq.
401	 */
402	error = sosend(ct->ct_socket, NULL, NULL, mreq, NULL, 0, curthread);
403	mreq = NULL;
404	if (error == EMSGSIZE) {
405		SOCKBUF_LOCK(&ct->ct_socket->so_snd);
406		sbwait(&ct->ct_socket->so_snd);
407		SOCKBUF_UNLOCK(&ct->ct_socket->so_snd);
408		AUTH_VALIDATE(auth, xid, NULL, NULL);
409		mtx_lock(&ct->ct_lock);
410		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
411		goto call_again;
412	}
413
414	reply_msg.acpted_rply.ar_verf.oa_flavor = AUTH_NULL;
415	reply_msg.acpted_rply.ar_verf.oa_base = cr->cr_verf;
416	reply_msg.acpted_rply.ar_verf.oa_length = 0;
417	reply_msg.acpted_rply.ar_results.where = NULL;
418	reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
419
420	mtx_lock(&ct->ct_lock);
421	if (error) {
422		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
423		errp->re_errno = error;
424		errp->re_status = stat = RPC_CANTSEND;
425		goto out;
426	}
427
428	/*
429	 * Check to see if we got an upcall while waiting for the
430	 * lock. In both these cases, the request has been removed
431	 * from ct->ct_pending.
432	 */
433	if (cr->cr_error) {
434		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
435		errp->re_errno = cr->cr_error;
436		errp->re_status = stat = RPC_CANTRECV;
437		goto out;
438	}
439	if (cr->cr_mrep) {
440		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
441		goto got_reply;
442	}
443
444	/*
445	 * Hack to provide rpc-based message passing
446	 */
447	if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
448		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
449		errp->re_status = stat = RPC_TIMEDOUT;
450		goto out;
451	}
452
453	error = msleep(cr, &ct->ct_lock, ct->ct_waitflag, ct->ct_waitchan,
454	    tvtohz(&timeout));
455
456	TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
457
458	if (error) {
459		/*
460		 * The sleep returned an error so our request is still
461		 * on the list. Turn the error code into an
462		 * appropriate client status.
463		 */
464		errp->re_errno = error;
465		switch (error) {
466		case EINTR:
467			stat = RPC_INTR;
468			break;
469		case EWOULDBLOCK:
470			stat = RPC_TIMEDOUT;
471			break;
472		default:
473			stat = RPC_CANTRECV;
474		}
475		errp->re_status = stat;
476		goto out;
477	} else {
478		/*
479		 * We were woken up by the upcall.  If the
480		 * upcall had a receive error, report that,
481		 * otherwise we have a reply.
482		 */
483		if (cr->cr_error) {
484			errp->re_errno = cr->cr_error;
485			errp->re_status = stat = RPC_CANTRECV;
486			goto out;
487		}
488	}
489
490got_reply:
491	/*
492	 * Now decode and validate the response. We need to drop the
493	 * lock since xdr_replymsg may end up sleeping in malloc.
494	 */
495	mtx_unlock(&ct->ct_lock);
496
497	if (ext && ext->rc_feedback)
498		ext->rc_feedback(FEEDBACK_OK, proc, ext->rc_feedback_arg);
499
500	xdrmbuf_create(&xdrs, cr->cr_mrep, XDR_DECODE);
501	ok = xdr_replymsg(&xdrs, &reply_msg);
502	cr->cr_mrep = NULL;
503
504	if (ok) {
505		if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
506		    (reply_msg.acpted_rply.ar_stat == SUCCESS))
507			errp->re_status = stat = RPC_SUCCESS;
508		else
509			stat = _seterr_reply(&reply_msg, errp);
510
511		if (stat == RPC_SUCCESS) {
512			results = xdrmbuf_getall(&xdrs);
513			if (!AUTH_VALIDATE(auth, xid,
514				&reply_msg.acpted_rply.ar_verf,
515				&results)) {
516				errp->re_status = stat = RPC_AUTHERROR;
517				errp->re_why = AUTH_INVALIDRESP;
518			} else {
519				KASSERT(results,
520				    ("auth validated but no result"));
521				*resultsp = results;
522			}
523		}		/* end successful completion */
524		/*
525		 * If unsuccesful AND error is an authentication error
526		 * then refresh credentials and try again, else break
527		 */
528		else if (stat == RPC_AUTHERROR)
529			/* maybe our credentials need to be refreshed ... */
530			if (nrefreshes > 0 &&
531			    AUTH_REFRESH(auth, &reply_msg)) {
532				nrefreshes--;
533				XDR_DESTROY(&xdrs);
534				mtx_lock(&ct->ct_lock);
535				goto call_again;
536			}
537		/* end of unsuccessful completion */
538	}	/* end of valid reply message */
539	else {
540		errp->re_status = stat = RPC_CANTDECODERES;
541	}
542	XDR_DESTROY(&xdrs);
543	mtx_lock(&ct->ct_lock);
544out:
545	mtx_assert(&ct->ct_lock, MA_OWNED);
546
547	KASSERT(stat != RPC_SUCCESS || *resultsp,
548	    ("RPC_SUCCESS without reply"));
549
550	if (mreq)
551		m_freem(mreq);
552	if (cr->cr_mrep)
553		m_freem(cr->cr_mrep);
554
555	ct->ct_threads--;
556	if (ct->ct_closing)
557		wakeup(ct);
558
559	mtx_unlock(&ct->ct_lock);
560
561	if (auth && stat != RPC_SUCCESS)
562		AUTH_VALIDATE(auth, xid, NULL, NULL);
563
564	free(cr, M_RPC);
565
566	return (stat);
567}
568
569static void
570clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp)
571{
572	struct ct_data *ct = (struct ct_data *) cl->cl_private;
573
574	*errp = ct->ct_error;
575}
576
577static bool_t
578clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
579{
580	XDR xdrs;
581	bool_t dummy;
582
583	xdrs.x_op = XDR_FREE;
584	dummy = (*xdr_res)(&xdrs, res_ptr);
585
586	return (dummy);
587}
588
589/*ARGSUSED*/
590static void
591clnt_vc_abort(CLIENT *cl)
592{
593}
594
595static bool_t
596clnt_vc_control(CLIENT *cl, u_int request, void *info)
597{
598	struct ct_data *ct = (struct ct_data *)cl->cl_private;
599	void *infop = info;
600	SVCXPRT *xprt;
601
602	mtx_lock(&ct->ct_lock);
603
604	switch (request) {
605	case CLSET_FD_CLOSE:
606		ct->ct_closeit = TRUE;
607		mtx_unlock(&ct->ct_lock);
608		return (TRUE);
609	case CLSET_FD_NCLOSE:
610		ct->ct_closeit = FALSE;
611		mtx_unlock(&ct->ct_lock);
612		return (TRUE);
613	default:
614		break;
615	}
616
617	/* for other requests which use info */
618	if (info == NULL) {
619		mtx_unlock(&ct->ct_lock);
620		return (FALSE);
621	}
622	switch (request) {
623	case CLSET_TIMEOUT:
624		if (time_not_ok((struct timeval *)info)) {
625			mtx_unlock(&ct->ct_lock);
626			return (FALSE);
627		}
628		ct->ct_wait = *(struct timeval *)infop;
629		break;
630	case CLGET_TIMEOUT:
631		*(struct timeval *)infop = ct->ct_wait;
632		break;
633	case CLGET_SERVER_ADDR:
634		(void) memcpy(info, &ct->ct_addr, (size_t)ct->ct_addr.ss_len);
635		break;
636	case CLGET_SVC_ADDR:
637		/*
638		 * Slightly different semantics to userland - we use
639		 * sockaddr instead of netbuf.
640		 */
641		memcpy(info, &ct->ct_addr, ct->ct_addr.ss_len);
642		break;
643	case CLSET_SVC_ADDR:		/* set to new address */
644		mtx_unlock(&ct->ct_lock);
645		return (FALSE);
646	case CLGET_XID:
647		*(uint32_t *)info = ct->ct_xid;
648		break;
649	case CLSET_XID:
650		/* This will set the xid of the NEXT call */
651		/* decrement by 1 as clnt_vc_call() increments once */
652		ct->ct_xid = *(uint32_t *)info - 1;
653		break;
654	case CLGET_VERS:
655		/*
656		 * This RELIES on the information that, in the call body,
657		 * the version number field is the fifth field from the
658		 * begining of the RPC header. MUST be changed if the
659		 * call_struct is changed
660		 */
661		*(uint32_t *)info =
662		    ntohl(*(uint32_t *)(void *)(ct->ct_mcallc +
663		    4 * BYTES_PER_XDR_UNIT));
664		break;
665
666	case CLSET_VERS:
667		*(uint32_t *)(void *)(ct->ct_mcallc +
668		    4 * BYTES_PER_XDR_UNIT) =
669		    htonl(*(uint32_t *)info);
670		break;
671
672	case CLGET_PROG:
673		/*
674		 * This RELIES on the information that, in the call body,
675		 * the program number field is the fourth field from the
676		 * begining of the RPC header. MUST be changed if the
677		 * call_struct is changed
678		 */
679		*(uint32_t *)info =
680		    ntohl(*(uint32_t *)(void *)(ct->ct_mcallc +
681		    3 * BYTES_PER_XDR_UNIT));
682		break;
683
684	case CLSET_PROG:
685		*(uint32_t *)(void *)(ct->ct_mcallc +
686		    3 * BYTES_PER_XDR_UNIT) =
687		    htonl(*(uint32_t *)info);
688		break;
689
690	case CLSET_WAITCHAN:
691		ct->ct_waitchan = (const char *)info;
692		break;
693
694	case CLGET_WAITCHAN:
695		*(const char **) info = ct->ct_waitchan;
696		break;
697
698	case CLSET_INTERRUPTIBLE:
699		if (*(int *) info)
700			ct->ct_waitflag = PCATCH;
701		else
702			ct->ct_waitflag = 0;
703		break;
704
705	case CLGET_INTERRUPTIBLE:
706		if (ct->ct_waitflag)
707			*(int *) info = TRUE;
708		else
709			*(int *) info = FALSE;
710		break;
711
712	case CLSET_BACKCHANNEL:
713		xprt = (SVCXPRT *)info;
714		if (ct->ct_backchannelxprt == NULL) {
715			xprt->xp_p2 = ct;
716			ct->ct_backchannelxprt = xprt;
717		}
718		break;
719
720	default:
721		mtx_unlock(&ct->ct_lock);
722		return (FALSE);
723	}
724
725	mtx_unlock(&ct->ct_lock);
726	return (TRUE);
727}
728
729static void
730clnt_vc_close(CLIENT *cl)
731{
732	struct ct_data *ct = (struct ct_data *) cl->cl_private;
733	struct ct_request *cr;
734
735	mtx_lock(&ct->ct_lock);
736
737	if (ct->ct_closed) {
738		mtx_unlock(&ct->ct_lock);
739		return;
740	}
741
742	if (ct->ct_closing) {
743		while (ct->ct_closing)
744			msleep(ct, &ct->ct_lock, 0, "rpcclose", 0);
745		KASSERT(ct->ct_closed, ("client should be closed"));
746		mtx_unlock(&ct->ct_lock);
747		return;
748	}
749
750	if (ct->ct_socket) {
751		ct->ct_closing = TRUE;
752		mtx_unlock(&ct->ct_lock);
753
754		SOCKBUF_LOCK(&ct->ct_socket->so_rcv);
755		soupcall_clear(ct->ct_socket, SO_RCV);
756		clnt_vc_upcallsdone(ct);
757		SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv);
758
759		/*
760		 * Abort any pending requests and wait until everyone
761		 * has finished with clnt_vc_call.
762		 */
763		mtx_lock(&ct->ct_lock);
764		TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) {
765			cr->cr_xid = 0;
766			cr->cr_error = ESHUTDOWN;
767			wakeup(cr);
768		}
769
770		while (ct->ct_threads)
771			msleep(ct, &ct->ct_lock, 0, "rpcclose", 0);
772	}
773
774	ct->ct_closing = FALSE;
775	ct->ct_closed = TRUE;
776	mtx_unlock(&ct->ct_lock);
777	wakeup(ct);
778}
779
780static void
781clnt_vc_destroy(CLIENT *cl)
782{
783	struct ct_data *ct = (struct ct_data *) cl->cl_private;
784	struct socket *so = NULL;
785	SVCXPRT *xprt;
786
787	clnt_vc_close(cl);
788
789	mtx_lock(&ct->ct_lock);
790	xprt = ct->ct_backchannelxprt;
791	ct->ct_backchannelxprt = NULL;
792	if (xprt != NULL) {
793		mtx_unlock(&ct->ct_lock);	/* To avoid a LOR. */
794		sx_xlock(&xprt->xp_lock);
795		mtx_lock(&ct->ct_lock);
796		xprt->xp_p2 = NULL;
797		xprt_unregister(xprt);
798	}
799
800	if (ct->ct_socket) {
801		if (ct->ct_closeit) {
802			so = ct->ct_socket;
803		}
804	}
805
806	mtx_unlock(&ct->ct_lock);
807	if (xprt != NULL) {
808		sx_xunlock(&xprt->xp_lock);
809		SVC_RELEASE(xprt);
810	}
811
812	mtx_destroy(&ct->ct_lock);
813	if (so) {
814		soshutdown(so, SHUT_WR);
815		soclose(so);
816	}
817	mem_free(ct, sizeof(struct ct_data));
818	if (cl->cl_netid && cl->cl_netid[0])
819		mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
820	if (cl->cl_tp && cl->cl_tp[0])
821		mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
822	mem_free(cl, sizeof(CLIENT));
823}
824
825/*
826 * Make sure that the time is not garbage.   -1 value is disallowed.
827 * Note this is different from time_not_ok in clnt_dg.c
828 */
829static bool_t
830time_not_ok(struct timeval *t)
831{
832	return (t->tv_sec <= -1 || t->tv_sec > 100000000 ||
833		t->tv_usec <= -1 || t->tv_usec > 1000000);
834}
835
836int
837clnt_vc_soupcall(struct socket *so, void *arg, int waitflag)
838{
839	struct ct_data *ct = (struct ct_data *) arg;
840	struct uio uio;
841	struct mbuf *m, *m2;
842	struct ct_request *cr;
843	int error, rcvflag, foundreq;
844	uint32_t xid_plus_direction[2], header;
845	bool_t do_read;
846	SVCXPRT *xprt;
847	struct cf_conn *cd;
848
849	CTASSERT(sizeof(xid_plus_direction) == 2 * sizeof(uint32_t));
850	ct->ct_upcallrefs++;
851	uio.uio_td = curthread;
852	do {
853		/*
854		 * If ct_record_resid is zero, we are waiting for a
855		 * record mark.
856		 */
857		if (ct->ct_record_resid == 0) {
858
859			/*
860			 * Make sure there is either a whole record
861			 * mark in the buffer or there is some other
862			 * error condition
863			 */
864			do_read = FALSE;
865			if (so->so_rcv.sb_cc >= sizeof(uint32_t)
866			    || (so->so_rcv.sb_state & SBS_CANTRCVMORE)
867			    || so->so_error)
868				do_read = TRUE;
869
870			if (!do_read)
871				break;
872
873			SOCKBUF_UNLOCK(&so->so_rcv);
874			uio.uio_resid = sizeof(uint32_t);
875			m = NULL;
876			rcvflag = MSG_DONTWAIT | MSG_SOCALLBCK;
877			error = soreceive(so, NULL, &uio, &m, NULL, &rcvflag);
878			SOCKBUF_LOCK(&so->so_rcv);
879
880			if (error == EWOULDBLOCK)
881				break;
882
883			/*
884			 * If there was an error, wake up all pending
885			 * requests.
886			 */
887			if (error || uio.uio_resid > 0) {
888			wakeup_all:
889				mtx_lock(&ct->ct_lock);
890				if (!error) {
891					/*
892					 * We must have got EOF trying
893					 * to read from the stream.
894					 */
895					error = ECONNRESET;
896				}
897				ct->ct_error.re_status = RPC_CANTRECV;
898				ct->ct_error.re_errno = error;
899				TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) {
900					cr->cr_error = error;
901					wakeup(cr);
902				}
903				mtx_unlock(&ct->ct_lock);
904				break;
905			}
906			m_copydata(m, 0, sizeof(uint32_t), (char *)&header);
907			header = ntohl(header);
908			ct->ct_record = NULL;
909			ct->ct_record_resid = header & 0x7fffffff;
910			ct->ct_record_eor = ((header & 0x80000000) != 0);
911			m_freem(m);
912		} else {
913			/*
914			 * Wait until the socket has the whole record
915			 * buffered.
916			 */
917			do_read = FALSE;
918			if (so->so_rcv.sb_cc >= ct->ct_record_resid
919			    || (so->so_rcv.sb_state & SBS_CANTRCVMORE)
920			    || so->so_error)
921				do_read = TRUE;
922
923			if (!do_read)
924				break;
925
926			/*
927			 * We have the record mark. Read as much as
928			 * the socket has buffered up to the end of
929			 * this record.
930			 */
931			SOCKBUF_UNLOCK(&so->so_rcv);
932			uio.uio_resid = ct->ct_record_resid;
933			m = NULL;
934			rcvflag = MSG_DONTWAIT | MSG_SOCALLBCK;
935			error = soreceive(so, NULL, &uio, &m, NULL, &rcvflag);
936			SOCKBUF_LOCK(&so->so_rcv);
937
938			if (error == EWOULDBLOCK)
939				break;
940
941			if (error || uio.uio_resid == ct->ct_record_resid)
942				goto wakeup_all;
943
944			/*
945			 * If we have part of the record already,
946			 * chain this bit onto the end.
947			 */
948			if (ct->ct_record)
949				m_last(ct->ct_record)->m_next = m;
950			else
951				ct->ct_record = m;
952
953			ct->ct_record_resid = uio.uio_resid;
954
955			/*
956			 * If we have the entire record, see if we can
957			 * match it to a request.
958			 */
959			if (ct->ct_record_resid == 0
960			    && ct->ct_record_eor) {
961				/*
962				 * The XID is in the first uint32_t of
963				 * the reply and the message direction
964				 * is the second one.
965				 */
966				if (ct->ct_record->m_len <
967				    sizeof(xid_plus_direction) &&
968				    m_length(ct->ct_record, NULL) <
969				    sizeof(xid_plus_direction)) {
970					m_freem(ct->ct_record);
971					break;
972				}
973				m_copydata(ct->ct_record, 0,
974				    sizeof(xid_plus_direction),
975				    (char *)xid_plus_direction);
976				xid_plus_direction[0] =
977				    ntohl(xid_plus_direction[0]);
978				xid_plus_direction[1] =
979				    ntohl(xid_plus_direction[1]);
980				/* Check message direction. */
981				if (xid_plus_direction[1] == CALL) {
982					/* This is a backchannel request. */
983					mtx_lock(&ct->ct_lock);
984					xprt = ct->ct_backchannelxprt;
985					if (xprt == NULL) {
986						mtx_unlock(&ct->ct_lock);
987						/* Just throw it away. */
988						m_freem(ct->ct_record);
989						ct->ct_record = NULL;
990					} else {
991						cd = (struct cf_conn *)
992						    xprt->xp_p1;
993						m2 = cd->mreq;
994						/*
995						 * The requests are chained
996						 * in the m_nextpkt list.
997						 */
998						while (m2 != NULL &&
999						    m2->m_nextpkt != NULL)
1000							/* Find end of list. */
1001							m2 = m2->m_nextpkt;
1002						if (m2 != NULL)
1003							m2->m_nextpkt =
1004							    ct->ct_record;
1005						else
1006							cd->mreq =
1007							    ct->ct_record;
1008						ct->ct_record->m_nextpkt =
1009						    NULL;
1010						ct->ct_record = NULL;
1011						xprt_active(xprt);
1012						mtx_unlock(&ct->ct_lock);
1013					}
1014				} else {
1015					mtx_lock(&ct->ct_lock);
1016					foundreq = 0;
1017					TAILQ_FOREACH(cr, &ct->ct_pending,
1018					    cr_link) {
1019						if (cr->cr_xid ==
1020						    xid_plus_direction[0]) {
1021							/*
1022							 * This one
1023							 * matches. We leave
1024							 * the reply mbuf in
1025							 * cr->cr_mrep. Set
1026							 * the XID to zero so
1027							 * that we will ignore
1028							 * any duplicated
1029							 * replies.
1030							 */
1031							cr->cr_xid = 0;
1032							cr->cr_mrep =
1033							    ct->ct_record;
1034							cr->cr_error = 0;
1035							foundreq = 1;
1036							wakeup(cr);
1037							break;
1038						}
1039					}
1040					mtx_unlock(&ct->ct_lock);
1041
1042					if (!foundreq)
1043						m_freem(ct->ct_record);
1044					ct->ct_record = NULL;
1045				}
1046			}
1047		}
1048	} while (m);
1049	ct->ct_upcallrefs--;
1050	if (ct->ct_upcallrefs < 0)
1051		panic("rpcvc upcall refcnt");
1052	if (ct->ct_upcallrefs == 0)
1053		wakeup(&ct->ct_upcallrefs);
1054	return (SU_OK);
1055}
1056
1057/*
1058 * Wait for all upcalls in progress to complete.
1059 */
1060static void
1061clnt_vc_upcallsdone(struct ct_data *ct)
1062{
1063
1064	SOCKBUF_LOCK_ASSERT(&ct->ct_socket->so_rcv);
1065
1066	while (ct->ct_upcallrefs > 0)
1067		(void) msleep(&ct->ct_upcallrefs,
1068		    SOCKBUF_MTX(&ct->ct_socket->so_rcv), 0, "rpcvcup", 0);
1069}
1070