clnt_vc.c revision 261046
1/*	$NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $	*/
2
3/*-
4 * Copyright (c) 2009, Sun Microsystems, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 * - Redistributions of source code must retain the above copyright notice,
10 *   this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright notice,
12 *   this list of conditions and the following disclaimer in the documentation
13 *   and/or other materials provided with the distribution.
14 * - Neither the name of Sun Microsystems, Inc. nor the names of its
15 *   contributors may be used to endorse or promote products derived
16 *   from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#if defined(LIBC_SCCS) && !defined(lint)
32static char *sccsid2 = "@(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro";
33static char *sccsid = "@(#)clnt_tcp.c	2.2 88/08/01 4.0 RPCSRC";
34static char sccsid3[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro";
35#endif
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: stable/10/lib/libc/rpc/clnt_vc.c 261046 2014-01-22 23:45:27Z mav $");
38
39/*
40 * clnt_tcp.c, Implements a TCP/IP based, client side RPC.
41 *
42 * Copyright (C) 1984, Sun Microsystems, Inc.
43 *
44 * TCP based RPC supports 'batched calls'.
45 * A sequence of calls may be batched-up in a send buffer.  The rpc call
46 * return immediately to the client even though the call was not necessarily
47 * sent.  The batching occurs if the results' xdr routine is NULL (0) AND
48 * the rpc timeout value is zero (see clnt.h, rpc).
49 *
50 * Clients should NOT casually batch calls that in fact return results; that is,
51 * the server side should be aware that a call is batched and not produce any
52 * return message.  Batched calls that produce many result messages can
53 * deadlock (netlock) the client and the server....
54 *
55 * Now go hang yourself.
56 */
57
58#include "namespace.h"
59#include "reentrant.h"
60#include <sys/types.h>
61#include <sys/poll.h>
62#include <sys/syslog.h>
63#include <sys/socket.h>
64#include <sys/un.h>
65#include <sys/uio.h>
66
67#include <arpa/inet.h>
68#include <assert.h>
69#include <err.h>
70#include <errno.h>
71#include <netdb.h>
72#include <stdio.h>
73#include <stdlib.h>
74#include <string.h>
75#include <unistd.h>
76#include <signal.h>
77
78#include <rpc/rpc.h>
79#include <rpc/rpcsec_gss.h>
80#include "un-namespace.h"
81#include "rpc_com.h"
82#include "mt_misc.h"
83
84#define MCALL_MSG_SIZE 24
85
86struct cmessage {
87        struct cmsghdr cmsg;
88        struct cmsgcred cmcred;
89};
90
91static enum clnt_stat clnt_vc_call(CLIENT *, rpcproc_t, xdrproc_t, void *,
92    xdrproc_t, void *, struct timeval);
93static void clnt_vc_geterr(CLIENT *, struct rpc_err *);
94static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *);
95static void clnt_vc_abort(CLIENT *);
96static bool_t clnt_vc_control(CLIENT *, u_int, void *);
97static void clnt_vc_destroy(CLIENT *);
98static struct clnt_ops *clnt_vc_ops(void);
99static bool_t time_not_ok(struct timeval *);
100static int read_vc(void *, void *, int);
101static int write_vc(void *, void *, int);
102static int __msgwrite(int, void *, size_t);
103static int __msgread(int, void *, size_t);
104
105struct ct_data {
106	int		ct_fd;		/* connection's fd */
107	bool_t		ct_closeit;	/* close it on destroy */
108	struct timeval	ct_wait;	/* wait interval in milliseconds */
109	bool_t          ct_waitset;	/* wait set by clnt_control? */
110	struct netbuf	ct_addr;	/* remote addr */
111	struct rpc_err	ct_error;
112	union {
113		char	ct_mcallc[MCALL_MSG_SIZE];	/* marshalled callmsg */
114		u_int32_t ct_mcalli;
115	} ct_u;
116	u_int		ct_mpos;	/* pos after marshal */
117	XDR		ct_xdrs;	/* XDR stream */
118};
119
120/*
121 *      This machinery implements per-fd locks for MT-safety.  It is not
122 *      sufficient to do per-CLIENT handle locks for MT-safety because a
123 *      user may create more than one CLIENT handle with the same fd behind
124 *      it.  Therfore, we allocate an array of flags (vc_fd_locks), protected
125 *      by the clnt_fd_lock mutex, and an array (vc_cv) of condition variables
126 *      similarly protected.  Vc_fd_lock[fd] == 1 => a call is activte on some
127 *      CLIENT handle created for that fd.
128 *      The current implementation holds locks across the entire RPC and reply.
129 *      Yes, this is silly, and as soon as this code is proven to work, this
130 *      should be the first thing fixed.  One step at a time.
131 */
132static int      *vc_fd_locks;
133static cond_t   *vc_cv;
134#define release_fd_lock(fd, mask) {	\
135	mutex_lock(&clnt_fd_lock);	\
136	vc_fd_locks[fd] = 0;		\
137	mutex_unlock(&clnt_fd_lock);	\
138	thr_sigsetmask(SIG_SETMASK, &(mask), (sigset_t *) NULL);	\
139	cond_signal(&vc_cv[fd]);	\
140}
141
142static const char clnt_vc_errstr[] = "%s : %s";
143static const char clnt_vc_str[] = "clnt_vc_create";
144static const char clnt_read_vc_str[] = "read_vc";
145static const char __no_mem_str[] = "out of memory";
146
147/*
148 * Create a client handle for a connection.
149 * Default options are set, which the user can change using clnt_control()'s.
150 * The rpc/vc package does buffering similar to stdio, so the client
151 * must pick send and receive buffer sizes, 0 => use the default.
152 * NB: fd is copied into a private area.
153 * NB: The rpch->cl_auth is set null authentication. Caller may wish to
154 * set this something more useful.
155 *
156 * fd should be an open socket
157 */
158CLIENT *
159clnt_vc_create(fd, raddr, prog, vers, sendsz, recvsz)
160	int fd;				/* open file descriptor */
161	const struct netbuf *raddr;	/* servers address */
162	const rpcprog_t prog;			/* program number */
163	const rpcvers_t vers;			/* version number */
164	u_int sendsz;			/* buffer recv size */
165	u_int recvsz;			/* buffer send size */
166{
167	CLIENT *cl;			/* client handle */
168	struct ct_data *ct = NULL;	/* client handle */
169	struct timeval now;
170	struct rpc_msg call_msg;
171	static u_int32_t disrupt;
172	sigset_t mask;
173	sigset_t newmask;
174	struct sockaddr_storage ss;
175	socklen_t slen;
176	struct __rpc_sockinfo si;
177
178	if (disrupt == 0)
179		disrupt = (u_int32_t)(long)raddr;
180
181	cl = (CLIENT *)mem_alloc(sizeof (*cl));
182	ct = (struct ct_data *)mem_alloc(sizeof (*ct));
183	if ((cl == (CLIENT *)NULL) || (ct == (struct ct_data *)NULL)) {
184		(void) syslog(LOG_ERR, clnt_vc_errstr,
185		    clnt_vc_str, __no_mem_str);
186		rpc_createerr.cf_stat = RPC_SYSTEMERROR;
187		rpc_createerr.cf_error.re_errno = errno;
188		goto err;
189	}
190	ct->ct_addr.buf = NULL;
191	sigfillset(&newmask);
192	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
193	mutex_lock(&clnt_fd_lock);
194	if (vc_fd_locks == (int *) NULL) {
195		int cv_allocsz, fd_allocsz;
196		int dtbsize = __rpc_dtbsize();
197
198		fd_allocsz = dtbsize * sizeof (int);
199		vc_fd_locks = (int *) mem_alloc(fd_allocsz);
200		if (vc_fd_locks == (int *) NULL) {
201			mutex_unlock(&clnt_fd_lock);
202			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
203			goto err;
204		} else
205			memset(vc_fd_locks, '\0', fd_allocsz);
206
207		assert(vc_cv == (cond_t *) NULL);
208		cv_allocsz = dtbsize * sizeof (cond_t);
209		vc_cv = (cond_t *) mem_alloc(cv_allocsz);
210		if (vc_cv == (cond_t *) NULL) {
211			mem_free(vc_fd_locks, fd_allocsz);
212			vc_fd_locks = (int *) NULL;
213			mutex_unlock(&clnt_fd_lock);
214			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
215			goto err;
216		} else {
217			int i;
218
219			for (i = 0; i < dtbsize; i++)
220				cond_init(&vc_cv[i], 0, (void *) 0);
221		}
222	} else
223		assert(vc_cv != (cond_t *) NULL);
224
225	/*
226	 * XXX - fvdl connecting while holding a mutex?
227	 */
228	slen = sizeof ss;
229	if (_getpeername(fd, (struct sockaddr *)(void *)&ss, &slen) < 0) {
230		if (errno != ENOTCONN) {
231			rpc_createerr.cf_stat = RPC_SYSTEMERROR;
232			rpc_createerr.cf_error.re_errno = errno;
233			mutex_unlock(&clnt_fd_lock);
234			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
235			goto err;
236		}
237		if (_connect(fd, (struct sockaddr *)raddr->buf, raddr->len) < 0){
238			rpc_createerr.cf_stat = RPC_SYSTEMERROR;
239			rpc_createerr.cf_error.re_errno = errno;
240			mutex_unlock(&clnt_fd_lock);
241			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
242			goto err;
243		}
244	}
245	mutex_unlock(&clnt_fd_lock);
246	thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
247	if (!__rpc_fd2sockinfo(fd, &si))
248		goto err;
249
250	ct->ct_closeit = FALSE;
251
252	/*
253	 * Set up private data struct
254	 */
255	ct->ct_fd = fd;
256	ct->ct_wait.tv_usec = 0;
257	ct->ct_waitset = FALSE;
258	ct->ct_addr.buf = malloc(raddr->maxlen);
259	if (ct->ct_addr.buf == NULL)
260		goto err;
261	memcpy(ct->ct_addr.buf, raddr->buf, raddr->len);
262	ct->ct_addr.len = raddr->len;
263	ct->ct_addr.maxlen = raddr->maxlen;
264
265	/*
266	 * Initialize call message
267	 */
268	(void)gettimeofday(&now, NULL);
269	call_msg.rm_xid = ((u_int32_t)++disrupt) ^ __RPC_GETXID(&now);
270	call_msg.rm_direction = CALL;
271	call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION;
272	call_msg.rm_call.cb_prog = (u_int32_t)prog;
273	call_msg.rm_call.cb_vers = (u_int32_t)vers;
274
275	/*
276	 * pre-serialize the static part of the call msg and stash it away
277	 */
278	xdrmem_create(&(ct->ct_xdrs), ct->ct_u.ct_mcallc, MCALL_MSG_SIZE,
279	    XDR_ENCODE);
280	if (! xdr_callhdr(&(ct->ct_xdrs), &call_msg)) {
281		if (ct->ct_closeit) {
282			(void)_close(fd);
283		}
284		goto err;
285	}
286	ct->ct_mpos = XDR_GETPOS(&(ct->ct_xdrs));
287	XDR_DESTROY(&(ct->ct_xdrs));
288	assert(ct->ct_mpos + sizeof(uint32_t) <= MCALL_MSG_SIZE);
289
290	/*
291	 * Create a client handle which uses xdrrec for serialization
292	 * and authnone for authentication.
293	 */
294	cl->cl_ops = clnt_vc_ops();
295	cl->cl_private = ct;
296	cl->cl_auth = authnone_create();
297	sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
298	recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
299	xdrrec_create(&(ct->ct_xdrs), sendsz, recvsz,
300	    cl->cl_private, read_vc, write_vc);
301	return (cl);
302
303err:
304	if (cl) {
305		if (ct) {
306			if (ct->ct_addr.len)
307				mem_free(ct->ct_addr.buf, ct->ct_addr.len);
308			mem_free(ct, sizeof (struct ct_data));
309		}
310		if (cl)
311			mem_free(cl, sizeof (CLIENT));
312	}
313	return ((CLIENT *)NULL);
314}
315
316static enum clnt_stat
317clnt_vc_call(cl, proc, xdr_args, args_ptr, xdr_results, results_ptr, timeout)
318	CLIENT *cl;
319	rpcproc_t proc;
320	xdrproc_t xdr_args;
321	void *args_ptr;
322	xdrproc_t xdr_results;
323	void *results_ptr;
324	struct timeval timeout;
325{
326	struct ct_data *ct = (struct ct_data *) cl->cl_private;
327	XDR *xdrs = &(ct->ct_xdrs);
328	struct rpc_msg reply_msg;
329	u_int32_t x_id;
330	u_int32_t *msg_x_id = &ct->ct_u.ct_mcalli;    /* yuk */
331	bool_t shipnow;
332	int refreshes = 2;
333	sigset_t mask, newmask;
334	int rpc_lock_value;
335	bool_t reply_stat;
336
337	assert(cl != NULL);
338
339	sigfillset(&newmask);
340	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
341	mutex_lock(&clnt_fd_lock);
342	while (vc_fd_locks[ct->ct_fd])
343		cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock);
344	if (__isthreaded)
345                rpc_lock_value = 1;
346        else
347                rpc_lock_value = 0;
348	vc_fd_locks[ct->ct_fd] = rpc_lock_value;
349	mutex_unlock(&clnt_fd_lock);
350	if (!ct->ct_waitset) {
351		/* If time is not within limits, we ignore it. */
352		if (time_not_ok(&timeout) == FALSE)
353			ct->ct_wait = timeout;
354	}
355
356	shipnow =
357	    (xdr_results == NULL && timeout.tv_sec == 0
358	    && timeout.tv_usec == 0) ? FALSE : TRUE;
359
360call_again:
361	xdrs->x_op = XDR_ENCODE;
362	ct->ct_error.re_status = RPC_SUCCESS;
363	x_id = ntohl(--(*msg_x_id));
364
365	if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
366		if ((! XDR_PUTBYTES(xdrs, ct->ct_u.ct_mcallc, ct->ct_mpos)) ||
367		    (! XDR_PUTINT32(xdrs, &proc)) ||
368		    (! AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
369		    (! (*xdr_args)(xdrs, args_ptr))) {
370			if (ct->ct_error.re_status == RPC_SUCCESS)
371				ct->ct_error.re_status = RPC_CANTENCODEARGS;
372			(void)xdrrec_endofrecord(xdrs, TRUE);
373			release_fd_lock(ct->ct_fd, mask);
374			return (ct->ct_error.re_status);
375		}
376	} else {
377		*(uint32_t *) &ct->ct_u.ct_mcallc[ct->ct_mpos] = htonl(proc);
378		if (! __rpc_gss_wrap(cl->cl_auth, ct->ct_u.ct_mcallc,
379			ct->ct_mpos + sizeof(uint32_t),
380			xdrs, xdr_args, args_ptr)) {
381			if (ct->ct_error.re_status == RPC_SUCCESS)
382				ct->ct_error.re_status = RPC_CANTENCODEARGS;
383			(void)xdrrec_endofrecord(xdrs, TRUE);
384			release_fd_lock(ct->ct_fd, mask);
385			return (ct->ct_error.re_status);
386		}
387	}
388	if (! xdrrec_endofrecord(xdrs, shipnow)) {
389		release_fd_lock(ct->ct_fd, mask);
390		return (ct->ct_error.re_status = RPC_CANTSEND);
391	}
392	if (! shipnow) {
393		release_fd_lock(ct->ct_fd, mask);
394		return (RPC_SUCCESS);
395	}
396	/*
397	 * Hack to provide rpc-based message passing
398	 */
399	if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
400		release_fd_lock(ct->ct_fd, mask);
401		return(ct->ct_error.re_status = RPC_TIMEDOUT);
402	}
403
404
405	/*
406	 * Keep receiving until we get a valid transaction id
407	 */
408	xdrs->x_op = XDR_DECODE;
409	while (TRUE) {
410		reply_msg.acpted_rply.ar_verf = _null_auth;
411		reply_msg.acpted_rply.ar_results.where = NULL;
412		reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
413		if (! xdrrec_skiprecord(xdrs)) {
414			release_fd_lock(ct->ct_fd, mask);
415			return (ct->ct_error.re_status);
416		}
417		/* now decode and validate the response header */
418		if (! xdr_replymsg(xdrs, &reply_msg)) {
419			if (ct->ct_error.re_status == RPC_SUCCESS)
420				continue;
421			release_fd_lock(ct->ct_fd, mask);
422			return (ct->ct_error.re_status);
423		}
424		if (reply_msg.rm_xid == x_id)
425			break;
426	}
427
428	/*
429	 * process header
430	 */
431	_seterr_reply(&reply_msg, &(ct->ct_error));
432	if (ct->ct_error.re_status == RPC_SUCCESS) {
433		if (! AUTH_VALIDATE(cl->cl_auth,
434		    &reply_msg.acpted_rply.ar_verf)) {
435			ct->ct_error.re_status = RPC_AUTHERROR;
436			ct->ct_error.re_why = AUTH_INVALIDRESP;
437		} else {
438			if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
439				reply_stat = (*xdr_results)(xdrs, results_ptr);
440			} else {
441				reply_stat = __rpc_gss_unwrap(cl->cl_auth,
442				    xdrs, xdr_results, results_ptr);
443			}
444			if (! reply_stat) {
445				if (ct->ct_error.re_status == RPC_SUCCESS)
446					ct->ct_error.re_status =
447						RPC_CANTDECODERES;
448			}
449		}
450		/* free verifier ... */
451		if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
452			xdrs->x_op = XDR_FREE;
453			(void)xdr_opaque_auth(xdrs,
454			    &(reply_msg.acpted_rply.ar_verf));
455		}
456	}  /* end successful completion */
457	else {
458		/* maybe our credentials need to be refreshed ... */
459		if (refreshes-- && AUTH_REFRESH(cl->cl_auth, &reply_msg))
460			goto call_again;
461	}  /* end of unsuccessful completion */
462	release_fd_lock(ct->ct_fd, mask);
463	return (ct->ct_error.re_status);
464}
465
466static void
467clnt_vc_geterr(cl, errp)
468	CLIENT *cl;
469	struct rpc_err *errp;
470{
471	struct ct_data *ct;
472
473	assert(cl != NULL);
474	assert(errp != NULL);
475
476	ct = (struct ct_data *) cl->cl_private;
477	*errp = ct->ct_error;
478}
479
480static bool_t
481clnt_vc_freeres(cl, xdr_res, res_ptr)
482	CLIENT *cl;
483	xdrproc_t xdr_res;
484	void *res_ptr;
485{
486	struct ct_data *ct;
487	XDR *xdrs;
488	bool_t dummy;
489	sigset_t mask;
490	sigset_t newmask;
491
492	assert(cl != NULL);
493
494	ct = (struct ct_data *)cl->cl_private;
495	xdrs = &(ct->ct_xdrs);
496
497	sigfillset(&newmask);
498	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
499	mutex_lock(&clnt_fd_lock);
500	while (vc_fd_locks[ct->ct_fd])
501		cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock);
502	xdrs->x_op = XDR_FREE;
503	dummy = (*xdr_res)(xdrs, res_ptr);
504	mutex_unlock(&clnt_fd_lock);
505	thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
506	cond_signal(&vc_cv[ct->ct_fd]);
507
508	return dummy;
509}
510
511/*ARGSUSED*/
512static void
513clnt_vc_abort(cl)
514	CLIENT *cl;
515{
516}
517
518static bool_t
519clnt_vc_control(cl, request, info)
520	CLIENT *cl;
521	u_int request;
522	void *info;
523{
524	struct ct_data *ct;
525	void *infop = info;
526	sigset_t mask;
527	sigset_t newmask;
528	int rpc_lock_value;
529
530	assert(cl != NULL);
531
532	ct = (struct ct_data *)cl->cl_private;
533
534	sigfillset(&newmask);
535	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
536	mutex_lock(&clnt_fd_lock);
537	while (vc_fd_locks[ct->ct_fd])
538		cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock);
539	if (__isthreaded)
540                rpc_lock_value = 1;
541        else
542                rpc_lock_value = 0;
543	vc_fd_locks[ct->ct_fd] = rpc_lock_value;
544	mutex_unlock(&clnt_fd_lock);
545
546	switch (request) {
547	case CLSET_FD_CLOSE:
548		ct->ct_closeit = TRUE;
549		release_fd_lock(ct->ct_fd, mask);
550		return (TRUE);
551	case CLSET_FD_NCLOSE:
552		ct->ct_closeit = FALSE;
553		release_fd_lock(ct->ct_fd, mask);
554		return (TRUE);
555	default:
556		break;
557	}
558
559	/* for other requests which use info */
560	if (info == NULL) {
561		release_fd_lock(ct->ct_fd, mask);
562		return (FALSE);
563	}
564	switch (request) {
565	case CLSET_TIMEOUT:
566		if (time_not_ok((struct timeval *)info)) {
567			release_fd_lock(ct->ct_fd, mask);
568			return (FALSE);
569		}
570		ct->ct_wait = *(struct timeval *)infop;
571		ct->ct_waitset = TRUE;
572		break;
573	case CLGET_TIMEOUT:
574		*(struct timeval *)infop = ct->ct_wait;
575		break;
576	case CLGET_SERVER_ADDR:
577		(void) memcpy(info, ct->ct_addr.buf, (size_t)ct->ct_addr.len);
578		break;
579	case CLGET_FD:
580		*(int *)info = ct->ct_fd;
581		break;
582	case CLGET_SVC_ADDR:
583		/* The caller should not free this memory area */
584		*(struct netbuf *)info = ct->ct_addr;
585		break;
586	case CLSET_SVC_ADDR:		/* set to new address */
587		release_fd_lock(ct->ct_fd, mask);
588		return (FALSE);
589	case CLGET_XID:
590		/*
591		 * use the knowledge that xid is the
592		 * first element in the call structure
593		 * This will get the xid of the PREVIOUS call
594		 */
595		*(u_int32_t *)info =
596		    ntohl(*(u_int32_t *)(void *)&ct->ct_u.ct_mcalli);
597		break;
598	case CLSET_XID:
599		/* This will set the xid of the NEXT call */
600		*(u_int32_t *)(void *)&ct->ct_u.ct_mcalli =
601		    htonl(*((u_int32_t *)info) + 1);
602		/* increment by 1 as clnt_vc_call() decrements once */
603		break;
604	case CLGET_VERS:
605		/*
606		 * This RELIES on the information that, in the call body,
607		 * the version number field is the fifth field from the
608		 * begining of the RPC header. MUST be changed if the
609		 * call_struct is changed
610		 */
611		*(u_int32_t *)info =
612		    ntohl(*(u_int32_t *)(void *)(ct->ct_u.ct_mcallc +
613		    4 * BYTES_PER_XDR_UNIT));
614		break;
615
616	case CLSET_VERS:
617		*(u_int32_t *)(void *)(ct->ct_u.ct_mcallc +
618		    4 * BYTES_PER_XDR_UNIT) =
619		    htonl(*(u_int32_t *)info);
620		break;
621
622	case CLGET_PROG:
623		/*
624		 * This RELIES on the information that, in the call body,
625		 * the program number field is the fourth field from the
626		 * begining of the RPC header. MUST be changed if the
627		 * call_struct is changed
628		 */
629		*(u_int32_t *)info =
630		    ntohl(*(u_int32_t *)(void *)(ct->ct_u.ct_mcallc +
631		    3 * BYTES_PER_XDR_UNIT));
632		break;
633
634	case CLSET_PROG:
635		*(u_int32_t *)(void *)(ct->ct_u.ct_mcallc +
636		    3 * BYTES_PER_XDR_UNIT) =
637		    htonl(*(u_int32_t *)info);
638		break;
639
640	default:
641		release_fd_lock(ct->ct_fd, mask);
642		return (FALSE);
643	}
644	release_fd_lock(ct->ct_fd, mask);
645	return (TRUE);
646}
647
648
649static void
650clnt_vc_destroy(cl)
651	CLIENT *cl;
652{
653	struct ct_data *ct = (struct ct_data *) cl->cl_private;
654	int ct_fd = ct->ct_fd;
655	sigset_t mask;
656	sigset_t newmask;
657
658	assert(cl != NULL);
659
660	ct = (struct ct_data *) cl->cl_private;
661
662	sigfillset(&newmask);
663	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
664	mutex_lock(&clnt_fd_lock);
665	while (vc_fd_locks[ct_fd])
666		cond_wait(&vc_cv[ct_fd], &clnt_fd_lock);
667	if (ct->ct_closeit && ct->ct_fd != -1) {
668		(void)_close(ct->ct_fd);
669	}
670	XDR_DESTROY(&(ct->ct_xdrs));
671	if (ct->ct_addr.buf)
672		free(ct->ct_addr.buf);
673	mem_free(ct, sizeof(struct ct_data));
674	if (cl->cl_netid && cl->cl_netid[0])
675		mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
676	if (cl->cl_tp && cl->cl_tp[0])
677		mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
678	mem_free(cl, sizeof(CLIENT));
679	mutex_unlock(&clnt_fd_lock);
680	thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
681	cond_signal(&vc_cv[ct_fd]);
682}
683
684/*
685 * Interface between xdr serializer and tcp connection.
686 * Behaves like the system calls, read & write, but keeps some error state
687 * around for the rpc level.
688 */
689static int
690read_vc(ctp, buf, len)
691	void *ctp;
692	void *buf;
693	int len;
694{
695	struct sockaddr sa;
696	socklen_t sal;
697	struct ct_data *ct = (struct ct_data *)ctp;
698	struct pollfd fd;
699	int milliseconds = (int)((ct->ct_wait.tv_sec * 1000) +
700	    (ct->ct_wait.tv_usec / 1000));
701
702	if (len == 0)
703		return (0);
704	fd.fd = ct->ct_fd;
705	fd.events = POLLIN;
706	for (;;) {
707		switch (_poll(&fd, 1, milliseconds)) {
708		case 0:
709			ct->ct_error.re_status = RPC_TIMEDOUT;
710			return (-1);
711
712		case -1:
713			if (errno == EINTR)
714				continue;
715			ct->ct_error.re_status = RPC_CANTRECV;
716			ct->ct_error.re_errno = errno;
717			return (-1);
718		}
719		break;
720	}
721
722	sal = sizeof(sa);
723	if ((_getpeername(ct->ct_fd, &sa, &sal) == 0) &&
724	    (sa.sa_family == AF_LOCAL)) {
725		len = __msgread(ct->ct_fd, buf, (size_t)len);
726	} else {
727		len = _read(ct->ct_fd, buf, (size_t)len);
728	}
729
730	switch (len) {
731	case 0:
732		/* premature eof */
733		ct->ct_error.re_errno = ECONNRESET;
734		ct->ct_error.re_status = RPC_CANTRECV;
735		len = -1;  /* it's really an error */
736		break;
737
738	case -1:
739		ct->ct_error.re_errno = errno;
740		ct->ct_error.re_status = RPC_CANTRECV;
741		break;
742	}
743	return (len);
744}
745
746static int
747write_vc(ctp, buf, len)
748	void *ctp;
749	void *buf;
750	int len;
751{
752	struct sockaddr sa;
753	socklen_t sal;
754	struct ct_data *ct = (struct ct_data *)ctp;
755	int i, cnt;
756
757	sal = sizeof(sa);
758	if ((_getpeername(ct->ct_fd, &sa, &sal) == 0) &&
759	    (sa.sa_family == AF_LOCAL)) {
760		for (cnt = len; cnt > 0; cnt -= i, buf = (char *)buf + i) {
761			if ((i = __msgwrite(ct->ct_fd, buf,
762			     (size_t)cnt)) == -1) {
763				ct->ct_error.re_errno = errno;
764				ct->ct_error.re_status = RPC_CANTSEND;
765				return (-1);
766			}
767		}
768	} else {
769		for (cnt = len; cnt > 0; cnt -= i, buf = (char *)buf + i) {
770			if ((i = _write(ct->ct_fd, buf, (size_t)cnt)) == -1) {
771				ct->ct_error.re_errno = errno;
772				ct->ct_error.re_status = RPC_CANTSEND;
773				return (-1);
774			}
775		}
776	}
777	return (len);
778}
779
780static struct clnt_ops *
781clnt_vc_ops()
782{
783	static struct clnt_ops ops;
784	sigset_t mask, newmask;
785
786	/* VARIABLES PROTECTED BY ops_lock: ops */
787
788	sigfillset(&newmask);
789	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
790	mutex_lock(&ops_lock);
791	if (ops.cl_call == NULL) {
792		ops.cl_call = clnt_vc_call;
793		ops.cl_abort = clnt_vc_abort;
794		ops.cl_geterr = clnt_vc_geterr;
795		ops.cl_freeres = clnt_vc_freeres;
796		ops.cl_destroy = clnt_vc_destroy;
797		ops.cl_control = clnt_vc_control;
798	}
799	mutex_unlock(&ops_lock);
800	thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
801	return (&ops);
802}
803
804/*
805 * Make sure that the time is not garbage.   -1 value is disallowed.
806 * Note this is different from time_not_ok in clnt_dg.c
807 */
808static bool_t
809time_not_ok(t)
810	struct timeval *t;
811{
812	return (t->tv_sec <= -1 || t->tv_sec > 100000000 ||
813		t->tv_usec <= -1 || t->tv_usec > 1000000);
814}
815
816static int
817__msgread(sock, buf, cnt)
818	int sock;
819	void *buf;
820	size_t cnt;
821{
822	struct iovec iov[1];
823	struct msghdr msg;
824	union {
825		struct cmsghdr cmsg;
826		char control[CMSG_SPACE(sizeof(struct cmsgcred))];
827	} cm;
828
829	bzero((char *)&cm, sizeof(cm));
830	iov[0].iov_base = buf;
831	iov[0].iov_len = cnt;
832
833	msg.msg_iov = iov;
834	msg.msg_iovlen = 1;
835	msg.msg_name = NULL;
836	msg.msg_namelen = 0;
837	msg.msg_control = (caddr_t)&cm;
838	msg.msg_controllen = CMSG_SPACE(sizeof(struct cmsgcred));
839	msg.msg_flags = 0;
840
841	return(_recvmsg(sock, &msg, 0));
842}
843
844static int
845__msgwrite(sock, buf, cnt)
846	int sock;
847	void *buf;
848	size_t cnt;
849{
850	struct iovec iov[1];
851	struct msghdr msg;
852	union {
853		struct cmsghdr cmsg;
854		char control[CMSG_SPACE(sizeof(struct cmsgcred))];
855	} cm;
856
857	bzero((char *)&cm, sizeof(cm));
858	iov[0].iov_base = buf;
859	iov[0].iov_len = cnt;
860
861	cm.cmsg.cmsg_type = SCM_CREDS;
862	cm.cmsg.cmsg_level = SOL_SOCKET;
863	cm.cmsg.cmsg_len = CMSG_LEN(sizeof(struct cmsgcred));
864
865	msg.msg_iov = iov;
866	msg.msg_iovlen = 1;
867	msg.msg_name = NULL;
868	msg.msg_namelen = 0;
869	msg.msg_control = (caddr_t)&cm;
870	msg.msg_controllen = CMSG_SPACE(sizeof(struct cmsgcred));
871	msg.msg_flags = 0;
872
873	return(_sendmsg(sock, &msg, 0));
874}
875