1/*	$NetBSD: clnt_dg.c,v 1.4 2000/07/14 08:40:41 fvdl Exp $	*/
2
3/*-
4 * Copyright (c) 2009, Sun Microsystems, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 * - Redistributions of source code must retain the above copyright notice,
10 *   this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright notice,
12 *   this list of conditions and the following disclaimer in the documentation
13 *   and/or other materials provided with the distribution.
14 * - Neither the name of Sun Microsystems, Inc. nor the names of its
15 *   contributors may be used to endorse or promote products derived
16 *   from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30/*
31 * Copyright (c) 1986-1991 by Sun Microsystems Inc.
32 */
33
34#if defined(LIBC_SCCS) && !defined(lint)
35#ident	"@(#)clnt_dg.c	1.23	94/04/22 SMI"
36static char sccsid[] = "@(#)clnt_dg.c 1.19 89/03/16 Copyr 1988 Sun Micro";
37#endif
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD: stable/10/lib/libc/rpc/clnt_dg.c 309489 2016-12-03 17:40:58Z ngie $");
40
41/*
42 * Implements a connectionless client side RPC.
43 */
44
45#include "namespace.h"
46#include "reentrant.h"
47#include <sys/types.h>
48#include <sys/event.h>
49#include <sys/time.h>
50#include <sys/socket.h>
51#include <sys/ioctl.h>
52#include <arpa/inet.h>
53#include <rpc/rpc.h>
54#include <rpc/rpcsec_gss.h>
55#include <errno.h>
56#include <stdlib.h>
57#include <string.h>
58#include <signal.h>
59#include <unistd.h>
60#include <err.h>
61#include "un-namespace.h"
62#include "rpc_com.h"
63#include "mt_misc.h"
64
65
66#ifdef _FREEFALL_CONFIG
67/*
68 * Disable RPC exponential back-off for FreeBSD.org systems.
69 */
70#define	RPC_MAX_BACKOFF		1 /* second */
71#else
72#define	RPC_MAX_BACKOFF		30 /* seconds */
73#endif
74
75
76static struct clnt_ops *clnt_dg_ops(void);
77static bool_t time_not_ok(struct timeval *);
78static enum clnt_stat clnt_dg_call(CLIENT *, rpcproc_t, xdrproc_t, void *,
79	    xdrproc_t, void *, struct timeval);
80static void clnt_dg_geterr(CLIENT *, struct rpc_err *);
81static bool_t clnt_dg_freeres(CLIENT *, xdrproc_t, void *);
82static void clnt_dg_abort(CLIENT *);
83static bool_t clnt_dg_control(CLIENT *, u_int, void *);
84static void clnt_dg_destroy(CLIENT *);
85
86
87
88
89/*
90 *	This machinery implements per-fd locks for MT-safety.  It is not
91 *	sufficient to do per-CLIENT handle locks for MT-safety because a
92 *	user may create more than one CLIENT handle with the same fd behind
93 *	it.  Therfore, we allocate an array of flags (dg_fd_locks), protected
94 *	by the clnt_fd_lock mutex, and an array (dg_cv) of condition variables
95 *	similarly protected.  Dg_fd_lock[fd] == 1 => a call is activte on some
96 *	CLIENT handle created for that fd.
97 *	The current implementation holds locks across the entire RPC and reply,
98 *	including retransmissions.  Yes, this is silly, and as soon as this
99 *	code is proven to work, this should be the first thing fixed.  One step
100 *	at a time.
101 */
102static int	*dg_fd_locks;
103static cond_t	*dg_cv;
104#define	release_fd_lock(fd, mask) {		\
105	mutex_lock(&clnt_fd_lock);	\
106	dg_fd_locks[fd] = 0;		\
107	mutex_unlock(&clnt_fd_lock);	\
108	thr_sigsetmask(SIG_SETMASK, &(mask), NULL); \
109	cond_signal(&dg_cv[fd]);	\
110}
111
112static const char mem_err_clnt_dg[] = "clnt_dg_create: out of memory";
113
114/* VARIABLES PROTECTED BY clnt_fd_lock: dg_fd_locks, dg_cv */
115
116#define	MCALL_MSG_SIZE 24
117
118/*
119 * Private data kept per client handle
120 */
121struct cu_data {
122	int			cu_fd;		/* connections fd */
123	bool_t			cu_closeit;	/* opened by library */
124	struct sockaddr_storage	cu_raddr;	/* remote address */
125	int			cu_rlen;
126	struct timeval		cu_wait;	/* retransmit interval */
127	struct timeval		cu_total;	/* total time for the call */
128	struct rpc_err		cu_error;
129	XDR			cu_outxdrs;
130	u_int			cu_xdrpos;
131	u_int			cu_sendsz;	/* send size */
132	char			cu_outhdr[MCALL_MSG_SIZE];
133	char			*cu_outbuf;
134	u_int			cu_recvsz;	/* recv size */
135	int			cu_async;
136	int			cu_connect;	/* Use connect(). */
137	int			cu_connected;	/* Have done connect(). */
138	struct kevent		cu_kin;
139	int			cu_kq;
140	char			cu_inbuf[1];
141};
142
143/*
144 * Connection less client creation returns with client handle parameters.
145 * Default options are set, which the user can change using clnt_control().
146 * fd should be open and bound.
147 * NB: The rpch->cl_auth is initialized to null authentication.
148 * 	Caller may wish to set this something more useful.
149 *
150 * sendsz and recvsz are the maximum allowable packet sizes that can be
151 * sent and received. Normally they are the same, but they can be
152 * changed to improve the program efficiency and buffer allocation.
153 * If they are 0, use the transport default.
154 *
155 * If svcaddr is NULL, returns NULL.
156 *
157 * fd      - open file descriptor
158 * svcaddr - servers address
159 * program - program number
160 * version - version number
161 * sendsz  - buffer recv size
162 * recvsz  - buffer send size
163 */
164CLIENT *
165clnt_dg_create(int fd, const struct netbuf *svcaddr, rpcprog_t program,
166    rpcvers_t version, u_int sendsz, u_int recvsz)
167{
168	CLIENT *cl = NULL;		/* client handle */
169	struct cu_data *cu = NULL;	/* private data */
170	struct timeval now;
171	struct rpc_msg call_msg;
172	sigset_t mask;
173	sigset_t newmask;
174	struct __rpc_sockinfo si;
175	int one = 1;
176
177	sigfillset(&newmask);
178	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
179	mutex_lock(&clnt_fd_lock);
180	if (dg_fd_locks == (int *) NULL) {
181		int cv_allocsz;
182		size_t fd_allocsz;
183		int dtbsize = __rpc_dtbsize();
184
185		fd_allocsz = dtbsize * sizeof (int);
186		dg_fd_locks = (int *) mem_alloc(fd_allocsz);
187		if (dg_fd_locks == (int *) NULL) {
188			mutex_unlock(&clnt_fd_lock);
189			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
190			goto err1;
191		} else
192			memset(dg_fd_locks, '\0', fd_allocsz);
193
194		cv_allocsz = dtbsize * sizeof (cond_t);
195		dg_cv = (cond_t *) mem_alloc(cv_allocsz);
196		if (dg_cv == (cond_t *) NULL) {
197			mem_free(dg_fd_locks, fd_allocsz);
198			dg_fd_locks = (int *) NULL;
199			mutex_unlock(&clnt_fd_lock);
200			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
201			goto err1;
202		} else {
203			int i;
204
205			for (i = 0; i < dtbsize; i++)
206				cond_init(&dg_cv[i], 0, (void *) 0);
207		}
208	}
209
210	mutex_unlock(&clnt_fd_lock);
211	thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
212
213	if (svcaddr == NULL) {
214		rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
215		return (NULL);
216	}
217
218	if (!__rpc_fd2sockinfo(fd, &si)) {
219		rpc_createerr.cf_stat = RPC_TLIERROR;
220		rpc_createerr.cf_error.re_errno = 0;
221		return (NULL);
222	}
223	/*
224	 * Find the receive and the send size
225	 */
226	sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
227	recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
228	if ((sendsz == 0) || (recvsz == 0)) {
229		rpc_createerr.cf_stat = RPC_TLIERROR; /* XXX */
230		rpc_createerr.cf_error.re_errno = 0;
231		return (NULL);
232	}
233
234	if ((cl = mem_alloc(sizeof (CLIENT))) == NULL)
235		goto err1;
236	/*
237	 * Should be multiple of 4 for XDR.
238	 */
239	sendsz = ((sendsz + 3) / 4) * 4;
240	recvsz = ((recvsz + 3) / 4) * 4;
241	cu = mem_alloc(sizeof (*cu) + sendsz + recvsz);
242	if (cu == NULL)
243		goto err1;
244	(void) memcpy(&cu->cu_raddr, svcaddr->buf, (size_t)svcaddr->len);
245	cu->cu_rlen = svcaddr->len;
246	cu->cu_outbuf = &cu->cu_inbuf[recvsz];
247	/* Other values can also be set through clnt_control() */
248	cu->cu_wait.tv_sec = 15;	/* heuristically chosen */
249	cu->cu_wait.tv_usec = 0;
250	cu->cu_total.tv_sec = -1;
251	cu->cu_total.tv_usec = -1;
252	cu->cu_sendsz = sendsz;
253	cu->cu_recvsz = recvsz;
254	cu->cu_async = FALSE;
255	cu->cu_connect = FALSE;
256	cu->cu_connected = FALSE;
257	(void) gettimeofday(&now, NULL);
258	call_msg.rm_xid = __RPC_GETXID(&now);
259	call_msg.rm_call.cb_prog = program;
260	call_msg.rm_call.cb_vers = version;
261	xdrmem_create(&(cu->cu_outxdrs), cu->cu_outhdr, MCALL_MSG_SIZE,
262	    XDR_ENCODE);
263	if (! xdr_callhdr(&cu->cu_outxdrs, &call_msg)) {
264		rpc_createerr.cf_stat = RPC_CANTENCODEARGS;  /* XXX */
265		rpc_createerr.cf_error.re_errno = 0;
266		goto err2;
267	}
268	cu->cu_xdrpos = XDR_GETPOS(&(cu->cu_outxdrs));
269	XDR_DESTROY(&cu->cu_outxdrs);
270	xdrmem_create(&cu->cu_outxdrs, cu->cu_outbuf, sendsz, XDR_ENCODE);
271
272	/* XXX fvdl - do we still want this? */
273#if 0
274	(void)bindresvport_sa(fd, (struct sockaddr *)svcaddr->buf);
275#endif
276	_ioctl(fd, FIONBIO, (char *)(void *)&one);
277
278	/*
279	 * By default, closeit is always FALSE. It is users responsibility
280	 * to do a close on it, else the user may use clnt_control
281	 * to let clnt_destroy do it for him/her.
282	 */
283	cu->cu_closeit = FALSE;
284	cu->cu_fd = fd;
285	cl->cl_ops = clnt_dg_ops();
286	cl->cl_private = (caddr_t)(void *)cu;
287	cl->cl_auth = authnone_create();
288	cl->cl_tp = NULL;
289	cl->cl_netid = NULL;
290	cu->cu_kq = -1;
291	EV_SET(&cu->cu_kin, cu->cu_fd, EVFILT_READ, EV_ADD, 0, 0, 0);
292	return (cl);
293err1:
294	warnx(mem_err_clnt_dg);
295	rpc_createerr.cf_stat = RPC_SYSTEMERROR;
296	rpc_createerr.cf_error.re_errno = errno;
297err2:
298	if (cl) {
299		mem_free(cl, sizeof (CLIENT));
300		if (cu)
301			mem_free(cu, sizeof (*cu) + sendsz + recvsz);
302	}
303	return (NULL);
304}
305
306/*
307 * cl       - client handle
308 * proc     - procedure number
309 * xargs    - xdr routine for args
310 * argsp    - pointer to args
311 * xresults - xdr routine for results
312 * resultsp - pointer to results
313 * utimeout - seconds to wait before giving up
314 */
315static enum clnt_stat
316clnt_dg_call(CLIENT *cl, rpcproc_t proc, xdrproc_t xargs, void *argsp,
317    xdrproc_t xresults, void *resultsp, struct timeval utimeout)
318{
319	struct cu_data *cu = (struct cu_data *)cl->cl_private;
320	XDR *xdrs;
321	size_t outlen = 0;
322	struct rpc_msg reply_msg;
323	XDR reply_xdrs;
324	bool_t ok;
325	int nrefreshes = 2;		/* number of times to refresh cred */
326	int nretries = 0;		/* number of times we retransmitted */
327	struct timeval timeout;
328	struct timeval retransmit_time;
329	struct timeval next_sendtime, starttime, time_waited, tv;
330	struct timespec ts;
331	struct kevent kv;
332	struct sockaddr *sa;
333	sigset_t mask;
334	sigset_t newmask;
335	socklen_t salen;
336	ssize_t recvlen = 0;
337	int kin_len, n, rpc_lock_value;
338	u_int32_t xid;
339
340	outlen = 0;
341	sigfillset(&newmask);
342	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
343	mutex_lock(&clnt_fd_lock);
344	while (dg_fd_locks[cu->cu_fd])
345		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
346	if (__isthreaded)
347		rpc_lock_value = 1;
348	else
349		rpc_lock_value = 0;
350	dg_fd_locks[cu->cu_fd] = rpc_lock_value;
351	mutex_unlock(&clnt_fd_lock);
352	if (cu->cu_total.tv_usec == -1) {
353		timeout = utimeout;	/* use supplied timeout */
354	} else {
355		timeout = cu->cu_total;	/* use default timeout */
356	}
357
358	if (cu->cu_connect && !cu->cu_connected) {
359		if (_connect(cu->cu_fd, (struct sockaddr *)&cu->cu_raddr,
360		    cu->cu_rlen) < 0) {
361			cu->cu_error.re_errno = errno;
362			cu->cu_error.re_status = RPC_CANTSEND;
363			goto out;
364		}
365		cu->cu_connected = 1;
366	}
367	if (cu->cu_connected) {
368		sa = NULL;
369		salen = 0;
370	} else {
371		sa = (struct sockaddr *)&cu->cu_raddr;
372		salen = cu->cu_rlen;
373	}
374	time_waited.tv_sec = 0;
375	time_waited.tv_usec = 0;
376	retransmit_time = next_sendtime = cu->cu_wait;
377	gettimeofday(&starttime, NULL);
378
379	/* Clean up in case the last call ended in a longjmp(3) call. */
380	if (cu->cu_kq >= 0)
381		_close(cu->cu_kq);
382	if ((cu->cu_kq = kqueue()) < 0) {
383		cu->cu_error.re_errno = errno;
384		cu->cu_error.re_status = RPC_CANTSEND;
385		goto out;
386	}
387	kin_len = 1;
388
389call_again:
390	if (cu->cu_async == TRUE && xargs == NULL)
391		goto get_reply;
392	/*
393	 * the transaction is the first thing in the out buffer
394	 * XXX Yes, and it's in network byte order, so we should to
395	 * be careful when we increment it, shouldn't we.
396	 */
397	xid = ntohl(*(u_int32_t *)(void *)(cu->cu_outhdr));
398	xid++;
399	*(u_int32_t *)(void *)(cu->cu_outhdr) = htonl(xid);
400call_again_same_xid:
401	xdrs = &(cu->cu_outxdrs);
402	xdrs->x_op = XDR_ENCODE;
403	XDR_SETPOS(xdrs, 0);
404
405	if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
406		if ((! XDR_PUTBYTES(xdrs, cu->cu_outhdr, cu->cu_xdrpos)) ||
407		    (! XDR_PUTINT32(xdrs, &proc)) ||
408		    (! AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
409		    (! (*xargs)(xdrs, argsp))) {
410			cu->cu_error.re_status = RPC_CANTENCODEARGS;
411			goto out;
412		}
413	} else {
414		*(uint32_t *) &cu->cu_outhdr[cu->cu_xdrpos] = htonl(proc);
415		if (!__rpc_gss_wrap(cl->cl_auth, cu->cu_outhdr,
416			cu->cu_xdrpos + sizeof(uint32_t),
417			xdrs, xargs, argsp)) {
418			cu->cu_error.re_status = RPC_CANTENCODEARGS;
419			goto out;
420		}
421	}
422	outlen = (size_t)XDR_GETPOS(xdrs);
423
424send_again:
425	if (_sendto(cu->cu_fd, cu->cu_outbuf, outlen, 0, sa, salen) != outlen) {
426		cu->cu_error.re_errno = errno;
427		cu->cu_error.re_status = RPC_CANTSEND;
428		goto out;
429	}
430
431	/*
432	 * Hack to provide rpc-based message passing
433	 */
434	if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
435		cu->cu_error.re_status = RPC_TIMEDOUT;
436		goto out;
437	}
438
439get_reply:
440
441	/*
442	 * sub-optimal code appears here because we have
443	 * some clock time to spare while the packets are in flight.
444	 * (We assume that this is actually only executed once.)
445	 */
446	reply_msg.acpted_rply.ar_verf = _null_auth;
447	if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
448		reply_msg.acpted_rply.ar_results.where = resultsp;
449		reply_msg.acpted_rply.ar_results.proc = xresults;
450	} else {
451		reply_msg.acpted_rply.ar_results.where = NULL;
452		reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
453	}
454
455	for (;;) {
456		/* Decide how long to wait. */
457		if (timercmp(&next_sendtime, &timeout, <))
458			timersub(&next_sendtime, &time_waited, &tv);
459		else
460			timersub(&timeout, &time_waited, &tv);
461		if (tv.tv_sec < 0 || tv.tv_usec < 0)
462			tv.tv_sec = tv.tv_usec = 0;
463		TIMEVAL_TO_TIMESPEC(&tv, &ts);
464
465		n = _kevent(cu->cu_kq, &cu->cu_kin, kin_len, &kv, 1, &ts);
466		/* We don't need to register the event again. */
467		kin_len = 0;
468
469		if (n == 1) {
470			if (kv.flags & EV_ERROR) {
471				cu->cu_error.re_errno = kv.data;
472				cu->cu_error.re_status = RPC_CANTRECV;
473				goto out;
474			}
475			/* We have some data now */
476			do {
477				recvlen = _recvfrom(cu->cu_fd, cu->cu_inbuf,
478				    cu->cu_recvsz, 0, NULL, NULL);
479			} while (recvlen < 0 && errno == EINTR);
480			if (recvlen < 0 && errno != EWOULDBLOCK) {
481				cu->cu_error.re_errno = errno;
482				cu->cu_error.re_status = RPC_CANTRECV;
483				goto out;
484			}
485			if (recvlen >= sizeof(u_int32_t) &&
486			    (cu->cu_async == TRUE ||
487			    *((u_int32_t *)(void *)(cu->cu_inbuf)) ==
488			    *((u_int32_t *)(void *)(cu->cu_outbuf)))) {
489				/* We now assume we have the proper reply. */
490				break;
491			}
492		}
493		if (n == -1 && errno != EINTR) {
494			cu->cu_error.re_errno = errno;
495			cu->cu_error.re_status = RPC_CANTRECV;
496			goto out;
497		}
498		gettimeofday(&tv, NULL);
499		timersub(&tv, &starttime, &time_waited);
500
501		/* Check for timeout. */
502		if (timercmp(&time_waited, &timeout, >)) {
503			cu->cu_error.re_status = RPC_TIMEDOUT;
504			goto out;
505		}
506
507		/* Retransmit if necessary. */
508		if (timercmp(&time_waited, &next_sendtime, >)) {
509			/* update retransmit_time */
510			if (retransmit_time.tv_sec < RPC_MAX_BACKOFF)
511				timeradd(&retransmit_time, &retransmit_time,
512				    &retransmit_time);
513			timeradd(&next_sendtime, &retransmit_time,
514			    &next_sendtime);
515			nretries++;
516
517			/*
518			 * When retransmitting a RPCSEC_GSS message,
519			 * we must use a new sequence number (handled
520			 * by __rpc_gss_wrap above).
521			 */
522			if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS)
523				goto send_again;
524			else
525				goto call_again_same_xid;
526		}
527	}
528
529	/*
530	 * now decode and validate the response
531	 */
532
533	xdrmem_create(&reply_xdrs, cu->cu_inbuf, (u_int)recvlen, XDR_DECODE);
534	ok = xdr_replymsg(&reply_xdrs, &reply_msg);
535	/* XDR_DESTROY(&reply_xdrs);	save a few cycles on noop destroy */
536	if (ok) {
537		if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
538			(reply_msg.acpted_rply.ar_stat == SUCCESS))
539			cu->cu_error.re_status = RPC_SUCCESS;
540		else
541			_seterr_reply(&reply_msg, &(cu->cu_error));
542
543		if (cu->cu_error.re_status == RPC_SUCCESS) {
544			if (! AUTH_VALIDATE(cl->cl_auth,
545					    &reply_msg.acpted_rply.ar_verf)) {
546				if (nretries &&
547				    cl->cl_auth->ah_cred.oa_flavor
548				    == RPCSEC_GSS)
549					/*
550					 * If we retransmitted, its
551					 * possible that we will
552					 * receive a reply for one of
553					 * the earlier transmissions
554					 * (which will use an older
555					 * RPCSEC_GSS sequence
556					 * number). In this case, just
557					 * go back and listen for a
558					 * new reply. We could keep a
559					 * record of all the seq
560					 * numbers we have transmitted
561					 * so far so that we could
562					 * accept a reply for any of
563					 * them here.
564					 */
565					goto get_reply;
566				cu->cu_error.re_status = RPC_AUTHERROR;
567				cu->cu_error.re_why = AUTH_INVALIDRESP;
568			} else {
569				if (cl->cl_auth->ah_cred.oa_flavor
570				    == RPCSEC_GSS) {
571					if (!__rpc_gss_unwrap(cl->cl_auth,
572						&reply_xdrs, xresults,
573						resultsp))
574						cu->cu_error.re_status =
575							RPC_CANTDECODERES;
576				}
577			}
578			if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
579				xdrs->x_op = XDR_FREE;
580				(void) xdr_opaque_auth(xdrs,
581					&(reply_msg.acpted_rply.ar_verf));
582			}
583		}		/* end successful completion */
584		/*
585		 * If unsuccesful AND error is an authentication error
586		 * then refresh credentials and try again, else break
587		 */
588		else if (cu->cu_error.re_status == RPC_AUTHERROR)
589			/* maybe our credentials need to be refreshed ... */
590			if (nrefreshes > 0 &&
591			    AUTH_REFRESH(cl->cl_auth, &reply_msg)) {
592				nrefreshes--;
593				goto call_again;
594			}
595		/* end of unsuccessful completion */
596	}	/* end of valid reply message */
597	else {
598		cu->cu_error.re_status = RPC_CANTDECODERES;
599
600	}
601out:
602	if (cu->cu_kq >= 0)
603		_close(cu->cu_kq);
604	cu->cu_kq = -1;
605	release_fd_lock(cu->cu_fd, mask);
606	return (cu->cu_error.re_status);
607}
608
609static void
610clnt_dg_geterr(CLIENT *cl, struct rpc_err *errp)
611{
612	struct cu_data *cu = (struct cu_data *)cl->cl_private;
613
614	*errp = cu->cu_error;
615}
616
617static bool_t
618clnt_dg_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
619{
620	struct cu_data *cu = (struct cu_data *)cl->cl_private;
621	XDR *xdrs = &(cu->cu_outxdrs);
622	bool_t dummy;
623	sigset_t mask;
624	sigset_t newmask;
625
626	sigfillset(&newmask);
627	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
628	mutex_lock(&clnt_fd_lock);
629	while (dg_fd_locks[cu->cu_fd])
630		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
631	xdrs->x_op = XDR_FREE;
632	dummy = (*xdr_res)(xdrs, res_ptr);
633	mutex_unlock(&clnt_fd_lock);
634	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
635	cond_signal(&dg_cv[cu->cu_fd]);
636	return (dummy);
637}
638
639/*ARGSUSED*/
640static void
641clnt_dg_abort(CLIENT *h)
642{
643}
644
645static bool_t
646clnt_dg_control(CLIENT *cl, u_int request, void *info)
647{
648	struct cu_data *cu = (struct cu_data *)cl->cl_private;
649	struct netbuf *addr;
650	sigset_t mask;
651	sigset_t newmask;
652	int rpc_lock_value;
653
654	sigfillset(&newmask);
655	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
656	mutex_lock(&clnt_fd_lock);
657	while (dg_fd_locks[cu->cu_fd])
658		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
659	if (__isthreaded)
660                rpc_lock_value = 1;
661        else
662                rpc_lock_value = 0;
663	dg_fd_locks[cu->cu_fd] = rpc_lock_value;
664	mutex_unlock(&clnt_fd_lock);
665	switch (request) {
666	case CLSET_FD_CLOSE:
667		cu->cu_closeit = TRUE;
668		release_fd_lock(cu->cu_fd, mask);
669		return (TRUE);
670	case CLSET_FD_NCLOSE:
671		cu->cu_closeit = FALSE;
672		release_fd_lock(cu->cu_fd, mask);
673		return (TRUE);
674	}
675
676	/* for other requests which use info */
677	if (info == NULL) {
678		release_fd_lock(cu->cu_fd, mask);
679		return (FALSE);
680	}
681	switch (request) {
682	case CLSET_TIMEOUT:
683		if (time_not_ok((struct timeval *)info)) {
684			release_fd_lock(cu->cu_fd, mask);
685			return (FALSE);
686		}
687		cu->cu_total = *(struct timeval *)info;
688		break;
689	case CLGET_TIMEOUT:
690		*(struct timeval *)info = cu->cu_total;
691		break;
692	case CLGET_SERVER_ADDR:		/* Give him the fd address */
693		/* Now obsolete. Only for backward compatibility */
694		(void) memcpy(info, &cu->cu_raddr, (size_t)cu->cu_rlen);
695		break;
696	case CLSET_RETRY_TIMEOUT:
697		if (time_not_ok((struct timeval *)info)) {
698			release_fd_lock(cu->cu_fd, mask);
699			return (FALSE);
700		}
701		cu->cu_wait = *(struct timeval *)info;
702		break;
703	case CLGET_RETRY_TIMEOUT:
704		*(struct timeval *)info = cu->cu_wait;
705		break;
706	case CLGET_FD:
707		*(int *)info = cu->cu_fd;
708		break;
709	case CLGET_SVC_ADDR:
710		addr = (struct netbuf *)info;
711		addr->buf = &cu->cu_raddr;
712		addr->len = cu->cu_rlen;
713		addr->maxlen = sizeof cu->cu_raddr;
714		break;
715	case CLSET_SVC_ADDR:		/* set to new address */
716		addr = (struct netbuf *)info;
717		if (addr->len < sizeof cu->cu_raddr) {
718			release_fd_lock(cu->cu_fd, mask);
719			return (FALSE);
720		}
721		(void) memcpy(&cu->cu_raddr, addr->buf, addr->len);
722		cu->cu_rlen = addr->len;
723		break;
724	case CLGET_XID:
725		/*
726		 * use the knowledge that xid is the
727		 * first element in the call structure *.
728		 * This will get the xid of the PREVIOUS call
729		 */
730		*(u_int32_t *)info =
731		    ntohl(*(u_int32_t *)(void *)cu->cu_outhdr);
732		break;
733
734	case CLSET_XID:
735		/* This will set the xid of the NEXT call */
736		*(u_int32_t *)(void *)cu->cu_outhdr =
737		    htonl(*(u_int32_t *)info - 1);
738		/* decrement by 1 as clnt_dg_call() increments once */
739		break;
740
741	case CLGET_VERS:
742		/*
743		 * This RELIES on the information that, in the call body,
744		 * the version number field is the fifth field from the
745		 * begining of the RPC header. MUST be changed if the
746		 * call_struct is changed
747		 */
748		*(u_int32_t *)info =
749		    ntohl(*(u_int32_t *)(void *)(cu->cu_outhdr +
750		    4 * BYTES_PER_XDR_UNIT));
751		break;
752
753	case CLSET_VERS:
754		*(u_int32_t *)(void *)(cu->cu_outhdr + 4 * BYTES_PER_XDR_UNIT)
755			= htonl(*(u_int32_t *)info);
756		break;
757
758	case CLGET_PROG:
759		/*
760		 * This RELIES on the information that, in the call body,
761		 * the program number field is the fourth field from the
762		 * begining of the RPC header. MUST be changed if the
763		 * call_struct is changed
764		 */
765		*(u_int32_t *)info =
766		    ntohl(*(u_int32_t *)(void *)(cu->cu_outhdr +
767		    3 * BYTES_PER_XDR_UNIT));
768		break;
769
770	case CLSET_PROG:
771		*(u_int32_t *)(void *)(cu->cu_outhdr + 3 * BYTES_PER_XDR_UNIT)
772			= htonl(*(u_int32_t *)info);
773		break;
774	case CLSET_ASYNC:
775		cu->cu_async = *(int *)info;
776		break;
777	case CLSET_CONNECT:
778		cu->cu_connect = *(int *)info;
779		break;
780	default:
781		release_fd_lock(cu->cu_fd, mask);
782		return (FALSE);
783	}
784	release_fd_lock(cu->cu_fd, mask);
785	return (TRUE);
786}
787
788static void
789clnt_dg_destroy(CLIENT *cl)
790{
791	struct cu_data *cu = (struct cu_data *)cl->cl_private;
792	int cu_fd = cu->cu_fd;
793	sigset_t mask;
794	sigset_t newmask;
795
796	sigfillset(&newmask);
797	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
798	mutex_lock(&clnt_fd_lock);
799	while (dg_fd_locks[cu_fd])
800		cond_wait(&dg_cv[cu_fd], &clnt_fd_lock);
801	if (cu->cu_closeit)
802		(void)_close(cu_fd);
803	if (cu->cu_kq >= 0)
804		_close(cu->cu_kq);
805	XDR_DESTROY(&(cu->cu_outxdrs));
806	mem_free(cu, (sizeof (*cu) + cu->cu_sendsz + cu->cu_recvsz));
807	if (cl->cl_netid && cl->cl_netid[0])
808		mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
809	if (cl->cl_tp && cl->cl_tp[0])
810		mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
811	mem_free(cl, sizeof (CLIENT));
812	mutex_unlock(&clnt_fd_lock);
813	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
814	cond_signal(&dg_cv[cu_fd]);
815}
816
817static struct clnt_ops *
818clnt_dg_ops(void)
819{
820	static struct clnt_ops ops;
821	sigset_t mask;
822	sigset_t newmask;
823
824/* VARIABLES PROTECTED BY ops_lock: ops */
825
826	sigfillset(&newmask);
827	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
828	mutex_lock(&ops_lock);
829	if (ops.cl_call == NULL) {
830		ops.cl_call = clnt_dg_call;
831		ops.cl_abort = clnt_dg_abort;
832		ops.cl_geterr = clnt_dg_geterr;
833		ops.cl_freeres = clnt_dg_freeres;
834		ops.cl_destroy = clnt_dg_destroy;
835		ops.cl_control = clnt_dg_control;
836	}
837	mutex_unlock(&ops_lock);
838	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
839	return (&ops);
840}
841
842/*
843 * Make sure that the time is not garbage.  -1 value is allowed.
844 */
845static bool_t
846time_not_ok(struct timeval *t)
847{
848	return (t->tv_sec < -1 || t->tv_sec > 100000000 ||
849		t->tv_usec < -1 || t->tv_usec > 1000000);
850}
851
852