svc.c revision 261054
174462Salfred/*	$NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $	*/
274462Salfred
374462Salfred/*-
474462Salfred * Copyright (c) 2009, Sun Microsystems, Inc.
574462Salfred * All rights reserved.
674462Salfred *
774462Salfred * Redistribution and use in source and binary forms, with or without
874462Salfred * modification, are permitted provided that the following conditions are met:
974462Salfred * - Redistributions of source code must retain the above copyright notice,
1074462Salfred *   this list of conditions and the following disclaimer.
1174462Salfred * - Redistributions in binary form must reproduce the above copyright notice,
1274462Salfred *   this list of conditions and the following disclaimer in the documentation
1374462Salfred *   and/or other materials provided with the distribution.
1474462Salfred * - Neither the name of Sun Microsystems, Inc. nor the names of its
1574509Salfred *   contributors may be used to endorse or promote products derived
1674462Salfred *   from this software without specific prior written permission.
1774462Salfred *
1874462Salfred * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1974462Salfred * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2074462Salfred * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2174462Salfred * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
2274462Salfred * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2374462Salfred * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2474462Salfred * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2574462Salfred * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2674462Salfred * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2774462Salfred * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2874462Salfred * POSSIBILITY OF SUCH DAMAGE.
2974462Salfred */
3074462Salfred
3174462Salfred#if defined(LIBC_SCCS) && !defined(lint)
3274462Salfredstatic char *sccsid2 = "@(#)svc.c 1.44 88/02/08 Copyr 1984 Sun Micro";
3374462Salfredstatic char *sccsid = "@(#)svc.c	2.4 88/08/11 4.0 RPCSRC";
3474462Salfred#endif
3574462Salfred#include <sys/cdefs.h>
3674462Salfred__FBSDID("$FreeBSD: stable/10/sys/rpc/svc.c 261054 2014-01-22 23:52:20Z mav $");
3774462Salfred
3874462Salfred/*
3974462Salfred * svc.c, Server-side remote procedure call interface.
4074462Salfred *
4174462Salfred * There are two sets of procedures here.  The xprt routines are
4274462Salfred * for handling transport handles.  The svc routines handle the
4374462Salfred * list of service routines.
4474462Salfred *
4574462Salfred * Copyright (C) 1984, Sun Microsystems, Inc.
4674462Salfred */
4774462Salfred
4874462Salfred#include <sys/param.h>
4974462Salfred#include <sys/lock.h>
5074462Salfred#include <sys/kernel.h>
5174462Salfred#include <sys/kthread.h>
5274462Salfred#include <sys/malloc.h>
5374462Salfred#include <sys/mbuf.h>
5474462Salfred#include <sys/mutex.h>
5574462Salfred#include <sys/proc.h>
5674462Salfred#include <sys/queue.h>
5774462Salfred#include <sys/socketvar.h>
5874462Salfred#include <sys/systm.h>
5974462Salfred#include <sys/ucred.h>
6074462Salfred
6174462Salfred#include <rpc/rpc.h>
6274462Salfred#include <rpc/rpcb_clnt.h>
6374462Salfred#include <rpc/replay.h>
6474462Salfred
6574462Salfred#include <rpc/rpc_com.h>
6674462Salfred
6774462Salfred#define SVC_VERSQUIET 0x0001		/* keep quiet about vers mismatch */
6874462Salfred#define version_keepquiet(xp) (SVC_EXT(xp)->xp_flags & SVC_VERSQUIET)
6974462Salfred
7074462Salfredstatic struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t,
7174462Salfred    char *);
7274462Salfredstatic void svc_new_thread(SVCPOOL *pool);
7374462Salfredstatic void xprt_unregister_locked(SVCXPRT *xprt);
7474462Salfredstatic void svc_change_space_used(SVCPOOL *pool, int delta);
7574462Salfredstatic bool_t svc_request_space_available(SVCPOOL *pool);
7674462Salfred
7774462Salfred/* ***************  SVCXPRT related stuff **************** */
7874462Salfred
7974462Salfredstatic int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS);
8074462Salfredstatic int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS);
8174462Salfred
8274462SalfredSVCPOOL*
8374462Salfredsvcpool_create(const char *name, struct sysctl_oid_list *sysctl_base)
84{
85	SVCPOOL *pool;
86
87	pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO);
88
89	mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF);
90	pool->sp_name = name;
91	pool->sp_state = SVCPOOL_INIT;
92	pool->sp_proc = NULL;
93	TAILQ_INIT(&pool->sp_xlist);
94	TAILQ_INIT(&pool->sp_active);
95	TAILQ_INIT(&pool->sp_callouts);
96	LIST_INIT(&pool->sp_threads);
97	LIST_INIT(&pool->sp_idlethreads);
98	pool->sp_minthreads = 1;
99	pool->sp_maxthreads = 1;
100	pool->sp_threadcount = 0;
101
102	/*
103	 * Don't use more than a quarter of mbuf clusters or more than
104	 * 45Mb buffering requests.
105	 */
106	pool->sp_space_high = nmbclusters * MCLBYTES / 4;
107	if (pool->sp_space_high > 45 << 20)
108		pool->sp_space_high = 45 << 20;
109	pool->sp_space_low = 2 * pool->sp_space_high / 3;
110
111	sysctl_ctx_init(&pool->sp_sysctl);
112	if (sysctl_base) {
113		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
114		    "minthreads", CTLTYPE_INT | CTLFLAG_RW,
115		    pool, 0, svcpool_minthread_sysctl, "I", "");
116		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
117		    "maxthreads", CTLTYPE_INT | CTLFLAG_RW,
118		    pool, 0, svcpool_maxthread_sysctl, "I", "");
119		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
120		    "threads", CTLFLAG_RD, &pool->sp_threadcount, 0, "");
121
122		SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
123		    "request_space_used", CTLFLAG_RD,
124		    &pool->sp_space_used, 0,
125		    "Space in parsed but not handled requests.");
126
127		SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
128		    "request_space_used_highest", CTLFLAG_RD,
129		    &pool->sp_space_used_highest, 0,
130		    "Highest space used since reboot.");
131
132		SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
133		    "request_space_high", CTLFLAG_RW,
134		    &pool->sp_space_high, 0,
135		    "Maximum space in parsed but not handled requests.");
136
137		SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
138		    "request_space_low", CTLFLAG_RW,
139		    &pool->sp_space_low, 0,
140		    "Low water mark for request space.");
141
142		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
143		    "request_space_throttled", CTLFLAG_RD,
144		    &pool->sp_space_throttled, 0,
145		    "Whether nfs requests are currently throttled");
146
147		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
148		    "request_space_throttle_count", CTLFLAG_RD,
149		    &pool->sp_space_throttle_count, 0,
150		    "Count of times throttling based on request space has occurred");
151	}
152
153	return pool;
154}
155
156void
157svcpool_destroy(SVCPOOL *pool)
158{
159	SVCXPRT *xprt, *nxprt;
160	struct svc_callout *s;
161	struct svcxprt_list cleanup;
162
163	TAILQ_INIT(&cleanup);
164	mtx_lock(&pool->sp_lock);
165
166	while (TAILQ_FIRST(&pool->sp_xlist)) {
167		xprt = TAILQ_FIRST(&pool->sp_xlist);
168		xprt_unregister_locked(xprt);
169		TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
170	}
171
172	while (TAILQ_FIRST(&pool->sp_callouts)) {
173		s = TAILQ_FIRST(&pool->sp_callouts);
174		mtx_unlock(&pool->sp_lock);
175		svc_unreg(pool, s->sc_prog, s->sc_vers);
176		mtx_lock(&pool->sp_lock);
177	}
178	mtx_unlock(&pool->sp_lock);
179
180	TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
181		SVC_RELEASE(xprt);
182	}
183
184	mtx_destroy(&pool->sp_lock);
185
186	if (pool->sp_rcache)
187		replay_freecache(pool->sp_rcache);
188
189	sysctl_ctx_free(&pool->sp_sysctl);
190	free(pool, M_RPC);
191}
192
193static bool_t
194svcpool_active(SVCPOOL *pool)
195{
196	enum svcpool_state state = pool->sp_state;
197
198	if (state == SVCPOOL_INIT || state == SVCPOOL_CLOSING)
199		return (FALSE);
200	return (TRUE);
201}
202
203/*
204 * Sysctl handler to set the minimum thread count on a pool
205 */
206static int
207svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS)
208{
209	SVCPOOL *pool;
210	int newminthreads, error, n;
211
212	pool = oidp->oid_arg1;
213	newminthreads = pool->sp_minthreads;
214	error = sysctl_handle_int(oidp, &newminthreads, 0, req);
215	if (error == 0 && newminthreads != pool->sp_minthreads) {
216		if (newminthreads > pool->sp_maxthreads)
217			return (EINVAL);
218		mtx_lock(&pool->sp_lock);
219		if (newminthreads > pool->sp_minthreads
220		    && svcpool_active(pool)) {
221			/*
222			 * If the pool is running and we are
223			 * increasing, create some more threads now.
224			 */
225			n = newminthreads - pool->sp_threadcount;
226			if (n > 0) {
227				mtx_unlock(&pool->sp_lock);
228				while (n--)
229					svc_new_thread(pool);
230				mtx_lock(&pool->sp_lock);
231			}
232		}
233		pool->sp_minthreads = newminthreads;
234		mtx_unlock(&pool->sp_lock);
235	}
236	return (error);
237}
238
239/*
240 * Sysctl handler to set the maximum thread count on a pool
241 */
242static int
243svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS)
244{
245	SVCPOOL *pool;
246	SVCTHREAD *st;
247	int newmaxthreads, error;
248
249	pool = oidp->oid_arg1;
250	newmaxthreads = pool->sp_maxthreads;
251	error = sysctl_handle_int(oidp, &newmaxthreads, 0, req);
252	if (error == 0 && newmaxthreads != pool->sp_maxthreads) {
253		if (newmaxthreads < pool->sp_minthreads)
254			return (EINVAL);
255		mtx_lock(&pool->sp_lock);
256		if (newmaxthreads < pool->sp_maxthreads
257		    && svcpool_active(pool)) {
258			/*
259			 * If the pool is running and we are
260			 * decreasing, wake up some idle threads to
261			 * encourage them to exit.
262			 */
263			LIST_FOREACH(st, &pool->sp_idlethreads, st_ilink)
264				cv_signal(&st->st_cond);
265		}
266		pool->sp_maxthreads = newmaxthreads;
267		mtx_unlock(&pool->sp_lock);
268	}
269	return (error);
270}
271
272/*
273 * Activate a transport handle.
274 */
275void
276xprt_register(SVCXPRT *xprt)
277{
278	SVCPOOL *pool = xprt->xp_pool;
279
280	SVC_ACQUIRE(xprt);
281	mtx_lock(&pool->sp_lock);
282	xprt->xp_registered = TRUE;
283	xprt->xp_active = FALSE;
284	TAILQ_INSERT_TAIL(&pool->sp_xlist, xprt, xp_link);
285	mtx_unlock(&pool->sp_lock);
286}
287
288/*
289 * De-activate a transport handle. Note: the locked version doesn't
290 * release the transport - caller must do that after dropping the pool
291 * lock.
292 */
293static void
294xprt_unregister_locked(SVCXPRT *xprt)
295{
296	SVCPOOL *pool = xprt->xp_pool;
297
298	mtx_assert(&pool->sp_lock, MA_OWNED);
299	KASSERT(xprt->xp_registered == TRUE,
300	    ("xprt_unregister_locked: not registered"));
301	xprt_inactive_locked(xprt);
302	TAILQ_REMOVE(&pool->sp_xlist, xprt, xp_link);
303	xprt->xp_registered = FALSE;
304}
305
306void
307xprt_unregister(SVCXPRT *xprt)
308{
309	SVCPOOL *pool = xprt->xp_pool;
310
311	mtx_lock(&pool->sp_lock);
312	if (xprt->xp_registered == FALSE) {
313		/* Already unregistered by another thread */
314		mtx_unlock(&pool->sp_lock);
315		return;
316	}
317	xprt_unregister_locked(xprt);
318	mtx_unlock(&pool->sp_lock);
319
320	SVC_RELEASE(xprt);
321}
322
323/*
324 * Attempt to assign a service thread to this transport.
325 */
326static int
327xprt_assignthread(SVCXPRT *xprt)
328{
329	SVCPOOL *pool = xprt->xp_pool;
330	SVCTHREAD *st;
331
332	mtx_assert(&pool->sp_lock, MA_OWNED);
333	st = LIST_FIRST(&pool->sp_idlethreads);
334	if (st) {
335		LIST_REMOVE(st, st_ilink);
336		st->st_idle = FALSE;
337		SVC_ACQUIRE(xprt);
338		xprt->xp_thread = st;
339		st->st_xprt = xprt;
340		cv_signal(&st->st_cond);
341		return (TRUE);
342	} else {
343		/*
344		 * See if we can create a new thread. The
345		 * actual thread creation happens in
346		 * svc_run_internal because our locking state
347		 * is poorly defined (we are typically called
348		 * from a socket upcall). Don't create more
349		 * than one thread per second.
350		 */
351		if (pool->sp_state == SVCPOOL_ACTIVE
352		    && pool->sp_lastcreatetime < time_uptime
353		    && pool->sp_threadcount < pool->sp_maxthreads) {
354			pool->sp_state = SVCPOOL_THREADWANTED;
355		}
356	}
357	return (FALSE);
358}
359
360void
361xprt_active(SVCXPRT *xprt)
362{
363	SVCPOOL *pool = xprt->xp_pool;
364
365	mtx_lock(&pool->sp_lock);
366
367	if (!xprt->xp_registered) {
368		/*
369		 * Race with xprt_unregister - we lose.
370		 */
371		mtx_unlock(&pool->sp_lock);
372		return;
373	}
374
375	if (!xprt->xp_active) {
376		xprt->xp_active = TRUE;
377		if (xprt->xp_thread == NULL) {
378			if (!svc_request_space_available(pool) ||
379			    !xprt_assignthread(xprt))
380				TAILQ_INSERT_TAIL(&pool->sp_active, xprt,
381				    xp_alink);
382		}
383	}
384
385	mtx_unlock(&pool->sp_lock);
386}
387
388void
389xprt_inactive_locked(SVCXPRT *xprt)
390{
391	SVCPOOL *pool = xprt->xp_pool;
392
393	mtx_assert(&pool->sp_lock, MA_OWNED);
394	if (xprt->xp_active) {
395		if (xprt->xp_thread == NULL)
396			TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
397		xprt->xp_active = FALSE;
398	}
399}
400
401void
402xprt_inactive(SVCXPRT *xprt)
403{
404	SVCPOOL *pool = xprt->xp_pool;
405
406	mtx_lock(&pool->sp_lock);
407	xprt_inactive_locked(xprt);
408	mtx_unlock(&pool->sp_lock);
409}
410
411/*
412 * Variant of xprt_inactive() for use only when sure that port is
413 * assigned to thread. For example, withing receive handlers.
414 */
415void
416xprt_inactive_self(SVCXPRT *xprt)
417{
418
419	KASSERT(xprt->xp_thread != NULL,
420	    ("xprt_inactive_self(%p) with NULL xp_thread", xprt));
421	xprt->xp_active = FALSE;
422}
423
424/*
425 * Add a service program to the callout list.
426 * The dispatch routine will be called when a rpc request for this
427 * program number comes in.
428 */
429bool_t
430svc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
431    void (*dispatch)(struct svc_req *, SVCXPRT *),
432    const struct netconfig *nconf)
433{
434	SVCPOOL *pool = xprt->xp_pool;
435	struct svc_callout *s;
436	char *netid = NULL;
437	int flag = 0;
438
439/* VARIABLES PROTECTED BY svc_lock: s, svc_head */
440
441	if (xprt->xp_netid) {
442		netid = strdup(xprt->xp_netid, M_RPC);
443		flag = 1;
444	} else if (nconf && nconf->nc_netid) {
445		netid = strdup(nconf->nc_netid, M_RPC);
446		flag = 1;
447	} /* must have been created with svc_raw_create */
448	if ((netid == NULL) && (flag == 1)) {
449		return (FALSE);
450	}
451
452	mtx_lock(&pool->sp_lock);
453	if ((s = svc_find(pool, prog, vers, netid)) != NULL) {
454		if (netid)
455			free(netid, M_RPC);
456		if (s->sc_dispatch == dispatch)
457			goto rpcb_it; /* he is registering another xptr */
458		mtx_unlock(&pool->sp_lock);
459		return (FALSE);
460	}
461	s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
462	if (s == NULL) {
463		if (netid)
464			free(netid, M_RPC);
465		mtx_unlock(&pool->sp_lock);
466		return (FALSE);
467	}
468
469	s->sc_prog = prog;
470	s->sc_vers = vers;
471	s->sc_dispatch = dispatch;
472	s->sc_netid = netid;
473	TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link);
474
475	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
476		((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC);
477
478rpcb_it:
479	mtx_unlock(&pool->sp_lock);
480	/* now register the information with the local binder service */
481	if (nconf) {
482		bool_t dummy;
483		struct netconfig tnc;
484		struct netbuf nb;
485		tnc = *nconf;
486		nb.buf = &xprt->xp_ltaddr;
487		nb.len = xprt->xp_ltaddr.ss_len;
488		dummy = rpcb_set(prog, vers, &tnc, &nb);
489		return (dummy);
490	}
491	return (TRUE);
492}
493
494/*
495 * Remove a service program from the callout list.
496 */
497void
498svc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers)
499{
500	struct svc_callout *s;
501
502	/* unregister the information anyway */
503	(void) rpcb_unset(prog, vers, NULL);
504	mtx_lock(&pool->sp_lock);
505	while ((s = svc_find(pool, prog, vers, NULL)) != NULL) {
506		TAILQ_REMOVE(&pool->sp_callouts, s, sc_link);
507		if (s->sc_netid)
508			mem_free(s->sc_netid, sizeof (s->sc_netid) + 1);
509		mem_free(s, sizeof (struct svc_callout));
510	}
511	mtx_unlock(&pool->sp_lock);
512}
513
514/* ********************** CALLOUT list related stuff ************* */
515
516/*
517 * Search the callout list for a program number, return the callout
518 * struct.
519 */
520static struct svc_callout *
521svc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid)
522{
523	struct svc_callout *s;
524
525	mtx_assert(&pool->sp_lock, MA_OWNED);
526	TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
527		if (s->sc_prog == prog && s->sc_vers == vers
528		    && (netid == NULL || s->sc_netid == NULL ||
529			strcmp(netid, s->sc_netid) == 0))
530			break;
531	}
532
533	return (s);
534}
535
536/* ******************* REPLY GENERATION ROUTINES  ************ */
537
538static bool_t
539svc_sendreply_common(struct svc_req *rqstp, struct rpc_msg *rply,
540    struct mbuf *body)
541{
542	SVCXPRT *xprt = rqstp->rq_xprt;
543	bool_t ok;
544
545	if (rqstp->rq_args) {
546		m_freem(rqstp->rq_args);
547		rqstp->rq_args = NULL;
548	}
549
550	if (xprt->xp_pool->sp_rcache)
551		replay_setreply(xprt->xp_pool->sp_rcache,
552		    rply, svc_getrpccaller(rqstp), body);
553
554	if (!SVCAUTH_WRAP(&rqstp->rq_auth, &body))
555		return (FALSE);
556
557	ok = SVC_REPLY(xprt, rply, rqstp->rq_addr, body);
558	if (rqstp->rq_addr) {
559		free(rqstp->rq_addr, M_SONAME);
560		rqstp->rq_addr = NULL;
561	}
562
563	return (ok);
564}
565
566/*
567 * Send a reply to an rpc request
568 */
569bool_t
570svc_sendreply(struct svc_req *rqstp, xdrproc_t xdr_results, void * xdr_location)
571{
572	struct rpc_msg rply;
573	struct mbuf *m;
574	XDR xdrs;
575	bool_t ok;
576
577	rply.rm_xid = rqstp->rq_xid;
578	rply.rm_direction = REPLY;
579	rply.rm_reply.rp_stat = MSG_ACCEPTED;
580	rply.acpted_rply.ar_verf = rqstp->rq_verf;
581	rply.acpted_rply.ar_stat = SUCCESS;
582	rply.acpted_rply.ar_results.where = NULL;
583	rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
584
585	m = m_getcl(M_WAITOK, MT_DATA, 0);
586	xdrmbuf_create(&xdrs, m, XDR_ENCODE);
587	ok = xdr_results(&xdrs, xdr_location);
588	XDR_DESTROY(&xdrs);
589
590	if (ok) {
591		return (svc_sendreply_common(rqstp, &rply, m));
592	} else {
593		m_freem(m);
594		return (FALSE);
595	}
596}
597
598bool_t
599svc_sendreply_mbuf(struct svc_req *rqstp, struct mbuf *m)
600{
601	struct rpc_msg rply;
602
603	rply.rm_xid = rqstp->rq_xid;
604	rply.rm_direction = REPLY;
605	rply.rm_reply.rp_stat = MSG_ACCEPTED;
606	rply.acpted_rply.ar_verf = rqstp->rq_verf;
607	rply.acpted_rply.ar_stat = SUCCESS;
608	rply.acpted_rply.ar_results.where = NULL;
609	rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
610
611	return (svc_sendreply_common(rqstp, &rply, m));
612}
613
614/*
615 * No procedure error reply
616 */
617void
618svcerr_noproc(struct svc_req *rqstp)
619{
620	SVCXPRT *xprt = rqstp->rq_xprt;
621	struct rpc_msg rply;
622
623	rply.rm_xid = rqstp->rq_xid;
624	rply.rm_direction = REPLY;
625	rply.rm_reply.rp_stat = MSG_ACCEPTED;
626	rply.acpted_rply.ar_verf = rqstp->rq_verf;
627	rply.acpted_rply.ar_stat = PROC_UNAVAIL;
628
629	if (xprt->xp_pool->sp_rcache)
630		replay_setreply(xprt->xp_pool->sp_rcache,
631		    &rply, svc_getrpccaller(rqstp), NULL);
632
633	svc_sendreply_common(rqstp, &rply, NULL);
634}
635
636/*
637 * Can't decode args error reply
638 */
639void
640svcerr_decode(struct svc_req *rqstp)
641{
642	SVCXPRT *xprt = rqstp->rq_xprt;
643	struct rpc_msg rply;
644
645	rply.rm_xid = rqstp->rq_xid;
646	rply.rm_direction = REPLY;
647	rply.rm_reply.rp_stat = MSG_ACCEPTED;
648	rply.acpted_rply.ar_verf = rqstp->rq_verf;
649	rply.acpted_rply.ar_stat = GARBAGE_ARGS;
650
651	if (xprt->xp_pool->sp_rcache)
652		replay_setreply(xprt->xp_pool->sp_rcache,
653		    &rply, (struct sockaddr *) &xprt->xp_rtaddr, NULL);
654
655	svc_sendreply_common(rqstp, &rply, NULL);
656}
657
658/*
659 * Some system error
660 */
661void
662svcerr_systemerr(struct svc_req *rqstp)
663{
664	SVCXPRT *xprt = rqstp->rq_xprt;
665	struct rpc_msg rply;
666
667	rply.rm_xid = rqstp->rq_xid;
668	rply.rm_direction = REPLY;
669	rply.rm_reply.rp_stat = MSG_ACCEPTED;
670	rply.acpted_rply.ar_verf = rqstp->rq_verf;
671	rply.acpted_rply.ar_stat = SYSTEM_ERR;
672
673	if (xprt->xp_pool->sp_rcache)
674		replay_setreply(xprt->xp_pool->sp_rcache,
675		    &rply, svc_getrpccaller(rqstp), NULL);
676
677	svc_sendreply_common(rqstp, &rply, NULL);
678}
679
680/*
681 * Authentication error reply
682 */
683void
684svcerr_auth(struct svc_req *rqstp, enum auth_stat why)
685{
686	SVCXPRT *xprt = rqstp->rq_xprt;
687	struct rpc_msg rply;
688
689	rply.rm_xid = rqstp->rq_xid;
690	rply.rm_direction = REPLY;
691	rply.rm_reply.rp_stat = MSG_DENIED;
692	rply.rjcted_rply.rj_stat = AUTH_ERROR;
693	rply.rjcted_rply.rj_why = why;
694
695	if (xprt->xp_pool->sp_rcache)
696		replay_setreply(xprt->xp_pool->sp_rcache,
697		    &rply, svc_getrpccaller(rqstp), NULL);
698
699	svc_sendreply_common(rqstp, &rply, NULL);
700}
701
702/*
703 * Auth too weak error reply
704 */
705void
706svcerr_weakauth(struct svc_req *rqstp)
707{
708
709	svcerr_auth(rqstp, AUTH_TOOWEAK);
710}
711
712/*
713 * Program unavailable error reply
714 */
715void
716svcerr_noprog(struct svc_req *rqstp)
717{
718	SVCXPRT *xprt = rqstp->rq_xprt;
719	struct rpc_msg rply;
720
721	rply.rm_xid = rqstp->rq_xid;
722	rply.rm_direction = REPLY;
723	rply.rm_reply.rp_stat = MSG_ACCEPTED;
724	rply.acpted_rply.ar_verf = rqstp->rq_verf;
725	rply.acpted_rply.ar_stat = PROG_UNAVAIL;
726
727	if (xprt->xp_pool->sp_rcache)
728		replay_setreply(xprt->xp_pool->sp_rcache,
729		    &rply, svc_getrpccaller(rqstp), NULL);
730
731	svc_sendreply_common(rqstp, &rply, NULL);
732}
733
734/*
735 * Program version mismatch error reply
736 */
737void
738svcerr_progvers(struct svc_req *rqstp, rpcvers_t low_vers, rpcvers_t high_vers)
739{
740	SVCXPRT *xprt = rqstp->rq_xprt;
741	struct rpc_msg rply;
742
743	rply.rm_xid = rqstp->rq_xid;
744	rply.rm_direction = REPLY;
745	rply.rm_reply.rp_stat = MSG_ACCEPTED;
746	rply.acpted_rply.ar_verf = rqstp->rq_verf;
747	rply.acpted_rply.ar_stat = PROG_MISMATCH;
748	rply.acpted_rply.ar_vers.low = (uint32_t)low_vers;
749	rply.acpted_rply.ar_vers.high = (uint32_t)high_vers;
750
751	if (xprt->xp_pool->sp_rcache)
752		replay_setreply(xprt->xp_pool->sp_rcache,
753		    &rply, svc_getrpccaller(rqstp), NULL);
754
755	svc_sendreply_common(rqstp, &rply, NULL);
756}
757
758/*
759 * Allocate a new server transport structure. All fields are
760 * initialized to zero and xp_p3 is initialized to point at an
761 * extension structure to hold various flags and authentication
762 * parameters.
763 */
764SVCXPRT *
765svc_xprt_alloc()
766{
767	SVCXPRT *xprt;
768	SVCXPRT_EXT *ext;
769
770	xprt = mem_alloc(sizeof(SVCXPRT));
771	memset(xprt, 0, sizeof(SVCXPRT));
772	ext = mem_alloc(sizeof(SVCXPRT_EXT));
773	memset(ext, 0, sizeof(SVCXPRT_EXT));
774	xprt->xp_p3 = ext;
775	refcount_init(&xprt->xp_refs, 1);
776
777	return (xprt);
778}
779
780/*
781 * Free a server transport structure.
782 */
783void
784svc_xprt_free(xprt)
785	SVCXPRT *xprt;
786{
787
788	mem_free(xprt->xp_p3, sizeof(SVCXPRT_EXT));
789	mem_free(xprt, sizeof(SVCXPRT));
790}
791
792/* ******************* SERVER INPUT STUFF ******************* */
793
794/*
795 * Read RPC requests from a transport and queue them to be
796 * executed. We handle authentication and replay cache replies here.
797 * Actually dispatching the RPC is deferred till svc_executereq.
798 */
799static enum xprt_stat
800svc_getreq(SVCXPRT *xprt, struct svc_req **rqstp_ret)
801{
802	SVCPOOL *pool = xprt->xp_pool;
803	struct svc_req *r;
804	struct rpc_msg msg;
805	struct mbuf *args;
806	enum xprt_stat stat;
807
808	/* now receive msgs from xprtprt (support batch calls) */
809	r = malloc(sizeof(*r), M_RPC, M_WAITOK|M_ZERO);
810
811	msg.rm_call.cb_cred.oa_base = r->rq_credarea;
812	msg.rm_call.cb_verf.oa_base = &r->rq_credarea[MAX_AUTH_BYTES];
813	r->rq_clntcred = &r->rq_credarea[2*MAX_AUTH_BYTES];
814	if (SVC_RECV(xprt, &msg, &r->rq_addr, &args)) {
815		enum auth_stat why;
816
817		/*
818		 * Handle replays and authenticate before queuing the
819		 * request to be executed.
820		 */
821		SVC_ACQUIRE(xprt);
822		r->rq_xprt = xprt;
823		if (pool->sp_rcache) {
824			struct rpc_msg repmsg;
825			struct mbuf *repbody;
826			enum replay_state rs;
827			rs = replay_find(pool->sp_rcache, &msg,
828			    svc_getrpccaller(r), &repmsg, &repbody);
829			switch (rs) {
830			case RS_NEW:
831				break;
832			case RS_DONE:
833				SVC_REPLY(xprt, &repmsg, r->rq_addr,
834				    repbody);
835				if (r->rq_addr) {
836					free(r->rq_addr, M_SONAME);
837					r->rq_addr = NULL;
838				}
839				m_freem(args);
840				goto call_done;
841
842			default:
843				m_freem(args);
844				goto call_done;
845			}
846		}
847
848		r->rq_xid = msg.rm_xid;
849		r->rq_prog = msg.rm_call.cb_prog;
850		r->rq_vers = msg.rm_call.cb_vers;
851		r->rq_proc = msg.rm_call.cb_proc;
852		r->rq_size = sizeof(*r) + m_length(args, NULL);
853		r->rq_args = args;
854		if ((why = _authenticate(r, &msg)) != AUTH_OK) {
855			/*
856			 * RPCSEC_GSS uses this return code
857			 * for requests that form part of its
858			 * context establishment protocol and
859			 * should not be dispatched to the
860			 * application.
861			 */
862			if (why != RPCSEC_GSS_NODISPATCH)
863				svcerr_auth(r, why);
864			goto call_done;
865		}
866
867		if (!SVCAUTH_UNWRAP(&r->rq_auth, &r->rq_args)) {
868			svcerr_decode(r);
869			goto call_done;
870		}
871
872		/*
873		 * Everything checks out, return request to caller.
874		 */
875		*rqstp_ret = r;
876		r = NULL;
877	}
878call_done:
879	if (r) {
880		svc_freereq(r);
881		r = NULL;
882	}
883	if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
884		xprt_unregister(xprt);
885	}
886
887	return (stat);
888}
889
890static void
891svc_executereq(struct svc_req *rqstp)
892{
893	SVCXPRT *xprt = rqstp->rq_xprt;
894	SVCPOOL *pool = xprt->xp_pool;
895	int prog_found;
896	rpcvers_t low_vers;
897	rpcvers_t high_vers;
898	struct svc_callout *s;
899
900	/* now match message with a registered service*/
901	prog_found = FALSE;
902	low_vers = (rpcvers_t) -1L;
903	high_vers = (rpcvers_t) 0L;
904	TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
905		if (s->sc_prog == rqstp->rq_prog) {
906			if (s->sc_vers == rqstp->rq_vers) {
907				/*
908				 * We hand ownership of r to the
909				 * dispatch method - they must call
910				 * svc_freereq.
911				 */
912				(*s->sc_dispatch)(rqstp, xprt);
913				return;
914			}  /* found correct version */
915			prog_found = TRUE;
916			if (s->sc_vers < low_vers)
917				low_vers = s->sc_vers;
918			if (s->sc_vers > high_vers)
919				high_vers = s->sc_vers;
920		}   /* found correct program */
921	}
922
923	/*
924	 * if we got here, the program or version
925	 * is not served ...
926	 */
927	if (prog_found)
928		svcerr_progvers(rqstp, low_vers, high_vers);
929	else
930		svcerr_noprog(rqstp);
931
932	svc_freereq(rqstp);
933}
934
935static void
936svc_checkidle(SVCPOOL *pool)
937{
938	SVCXPRT *xprt, *nxprt;
939	time_t timo;
940	struct svcxprt_list cleanup;
941
942	TAILQ_INIT(&cleanup);
943	TAILQ_FOREACH_SAFE(xprt, &pool->sp_xlist, xp_link, nxprt) {
944		/*
945		 * Only some transports have idle timers. Don't time
946		 * something out which is just waking up.
947		 */
948		if (!xprt->xp_idletimeout || xprt->xp_thread)
949			continue;
950
951		timo = xprt->xp_lastactive + xprt->xp_idletimeout;
952		if (time_uptime > timo) {
953			xprt_unregister_locked(xprt);
954			TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
955		}
956	}
957
958	mtx_unlock(&pool->sp_lock);
959	TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
960		SVC_RELEASE(xprt);
961	}
962	mtx_lock(&pool->sp_lock);
963
964}
965
966static void
967svc_assign_waiting_sockets(SVCPOOL *pool)
968{
969	SVCXPRT *xprt;
970
971	mtx_lock(&pool->sp_lock);
972	while ((xprt = TAILQ_FIRST(&pool->sp_active)) != NULL) {
973		if (xprt_assignthread(xprt))
974			TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
975		else
976			break;
977	}
978	mtx_unlock(&pool->sp_lock);
979}
980
981static void
982svc_change_space_used(SVCPOOL *pool, int delta)
983{
984	unsigned int value;
985
986	value = atomic_fetchadd_int(&pool->sp_space_used, delta) + delta;
987	if (delta > 0) {
988		if (value >= pool->sp_space_high && !pool->sp_space_throttled) {
989			pool->sp_space_throttled = TRUE;
990			pool->sp_space_throttle_count++;
991		}
992		if (value > pool->sp_space_used_highest)
993			pool->sp_space_used_highest = value;
994	} else {
995		if (value < pool->sp_space_low && pool->sp_space_throttled) {
996			pool->sp_space_throttled = FALSE;
997			svc_assign_waiting_sockets(pool);
998		}
999	}
1000}
1001
1002static bool_t
1003svc_request_space_available(SVCPOOL *pool)
1004{
1005
1006	if (pool->sp_space_throttled)
1007		return (FALSE);
1008	return (TRUE);
1009}
1010
1011static void
1012svc_run_internal(SVCPOOL *pool, bool_t ismaster)
1013{
1014	struct svc_reqlist reqs;
1015	SVCTHREAD *st, *stpref;
1016	SVCXPRT *xprt;
1017	enum xprt_stat stat;
1018	struct svc_req *rqstp;
1019	size_t sz;
1020	int error;
1021
1022	st = mem_alloc(sizeof(*st));
1023	st->st_pool = pool;
1024	st->st_xprt = NULL;
1025	STAILQ_INIT(&st->st_reqs);
1026	cv_init(&st->st_cond, "rpcsvc");
1027	STAILQ_INIT(&reqs);
1028
1029	mtx_lock(&pool->sp_lock);
1030	LIST_INSERT_HEAD(&pool->sp_threads, st, st_link);
1031
1032	/*
1033	 * If we are a new thread which was spawned to cope with
1034	 * increased load, set the state back to SVCPOOL_ACTIVE.
1035	 */
1036	if (pool->sp_state == SVCPOOL_THREADSTARTING)
1037		pool->sp_state = SVCPOOL_ACTIVE;
1038
1039	while (pool->sp_state != SVCPOOL_CLOSING) {
1040		/*
1041		 * Create new thread if requested.
1042		 */
1043		if (pool->sp_state == SVCPOOL_THREADWANTED) {
1044			pool->sp_state = SVCPOOL_THREADSTARTING;
1045			pool->sp_lastcreatetime = time_uptime;
1046			mtx_unlock(&pool->sp_lock);
1047			svc_new_thread(pool);
1048			mtx_lock(&pool->sp_lock);
1049			continue;
1050		}
1051
1052		/*
1053		 * Check for idle transports once per second.
1054		 */
1055		if (time_uptime > pool->sp_lastidlecheck) {
1056			pool->sp_lastidlecheck = time_uptime;
1057			svc_checkidle(pool);
1058		}
1059
1060		xprt = st->st_xprt;
1061		if (!xprt && STAILQ_EMPTY(&st->st_reqs)) {
1062			/*
1063			 * Enforce maxthreads count.
1064			 */
1065			if (pool->sp_threadcount > pool->sp_maxthreads)
1066				break;
1067
1068			/*
1069			 * Before sleeping, see if we can find an
1070			 * active transport which isn't being serviced
1071			 * by a thread.
1072			 */
1073			if (svc_request_space_available(pool) &&
1074			    (xprt = TAILQ_FIRST(&pool->sp_active)) != NULL) {
1075				TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
1076				SVC_ACQUIRE(xprt);
1077				xprt->xp_thread = st;
1078				st->st_xprt = xprt;
1079				continue;
1080			}
1081
1082			LIST_INSERT_HEAD(&pool->sp_idlethreads, st, st_ilink);
1083			st->st_idle = TRUE;
1084			if (ismaster || (!ismaster &&
1085			    pool->sp_threadcount > pool->sp_minthreads))
1086				error = cv_timedwait_sig(&st->st_cond,
1087				    &pool->sp_lock, 5 * hz);
1088			else
1089				error = cv_wait_sig(&st->st_cond,
1090				    &pool->sp_lock);
1091			if (st->st_idle) {
1092				LIST_REMOVE(st, st_ilink);
1093				st->st_idle = FALSE;
1094			}
1095
1096			/*
1097			 * Reduce worker thread count when idle.
1098			 */
1099			if (error == EWOULDBLOCK) {
1100				if (!ismaster
1101				    && (pool->sp_threadcount
1102					> pool->sp_minthreads)
1103					&& !st->st_xprt
1104					&& STAILQ_EMPTY(&st->st_reqs))
1105					break;
1106			} else if (error) {
1107				mtx_unlock(&pool->sp_lock);
1108				svc_exit(pool);
1109				mtx_lock(&pool->sp_lock);
1110				break;
1111			}
1112			continue;
1113		}
1114
1115		if (xprt) {
1116			/*
1117			 * Drain the transport socket and queue up any
1118			 * RPCs.
1119			 */
1120			xprt->xp_lastactive = time_uptime;
1121			do {
1122				mtx_unlock(&pool->sp_lock);
1123				if (!svc_request_space_available(pool))
1124					break;
1125				rqstp = NULL;
1126				stat = svc_getreq(xprt, &rqstp);
1127				if (rqstp) {
1128					svc_change_space_used(pool, rqstp->rq_size);
1129					/*
1130					 * See if the application has
1131					 * a preference for some other
1132					 * thread.
1133					 */
1134					stpref = st;
1135					if (pool->sp_assign)
1136						stpref = pool->sp_assign(st,
1137						    rqstp);
1138					else
1139						mtx_lock(&pool->sp_lock);
1140
1141					rqstp->rq_thread = stpref;
1142					STAILQ_INSERT_TAIL(&stpref->st_reqs,
1143					    rqstp, rq_link);
1144
1145					/*
1146					 * If we assigned the request
1147					 * to another thread, make
1148					 * sure its awake and continue
1149					 * reading from the
1150					 * socket. Otherwise, try to
1151					 * find some other thread to
1152					 * read from the socket and
1153					 * execute the request
1154					 * immediately.
1155					 */
1156					if (stpref == st)
1157						break;
1158					if (stpref->st_idle) {
1159						LIST_REMOVE(stpref, st_ilink);
1160						stpref->st_idle = FALSE;
1161						cv_signal(&stpref->st_cond);
1162					}
1163				} else
1164					mtx_lock(&pool->sp_lock);
1165			} while (stat == XPRT_MOREREQS
1166			    && pool->sp_state != SVCPOOL_CLOSING);
1167
1168			/*
1169			 * Move this transport to the end of the
1170			 * active list to ensure fairness when
1171			 * multiple transports are active. If this was
1172			 * the last queued request, svc_getreq will
1173			 * end up calling xprt_inactive to remove from
1174			 * the active list.
1175			 */
1176			xprt->xp_thread = NULL;
1177			st->st_xprt = NULL;
1178			if (xprt->xp_active) {
1179				if (!svc_request_space_available(pool) ||
1180				    !xprt_assignthread(xprt))
1181					TAILQ_INSERT_TAIL(&pool->sp_active,
1182					    xprt, xp_alink);
1183			}
1184			STAILQ_CONCAT(&reqs, &st->st_reqs);
1185			mtx_unlock(&pool->sp_lock);
1186			SVC_RELEASE(xprt);
1187		} else {
1188			STAILQ_CONCAT(&reqs, &st->st_reqs);
1189			mtx_unlock(&pool->sp_lock);
1190		}
1191
1192		/*
1193		 * Execute what we have queued.
1194		 */
1195		sz = 0;
1196		while ((rqstp = STAILQ_FIRST(&reqs)) != NULL) {
1197			STAILQ_REMOVE_HEAD(&reqs, rq_link);
1198			sz += rqstp->rq_size;
1199			svc_executereq(rqstp);
1200		}
1201		svc_change_space_used(pool, -sz);
1202		mtx_lock(&pool->sp_lock);
1203	}
1204
1205	if (st->st_xprt) {
1206		xprt = st->st_xprt;
1207		st->st_xprt = NULL;
1208		SVC_RELEASE(xprt);
1209	}
1210
1211	KASSERT(STAILQ_EMPTY(&st->st_reqs), ("stray reqs on exit"));
1212	LIST_REMOVE(st, st_link);
1213	pool->sp_threadcount--;
1214
1215	mtx_unlock(&pool->sp_lock);
1216
1217	cv_destroy(&st->st_cond);
1218	mem_free(st, sizeof(*st));
1219
1220	if (!ismaster)
1221		wakeup(pool);
1222}
1223
1224static void
1225svc_thread_start(void *arg)
1226{
1227
1228	svc_run_internal((SVCPOOL *) arg, FALSE);
1229	kthread_exit();
1230}
1231
1232static void
1233svc_new_thread(SVCPOOL *pool)
1234{
1235	struct thread *td;
1236
1237	pool->sp_threadcount++;
1238	kthread_add(svc_thread_start, pool,
1239	    pool->sp_proc, &td, 0, 0,
1240	    "%s: service", pool->sp_name);
1241}
1242
1243void
1244svc_run(SVCPOOL *pool)
1245{
1246	int i;
1247	struct proc *p;
1248	struct thread *td;
1249
1250	p = curproc;
1251	td = curthread;
1252	snprintf(td->td_name, sizeof(td->td_name),
1253	    "%s: master", pool->sp_name);
1254	pool->sp_state = SVCPOOL_ACTIVE;
1255	pool->sp_proc = p;
1256	pool->sp_lastcreatetime = time_uptime;
1257	pool->sp_threadcount = 1;
1258
1259	for (i = 1; i < pool->sp_minthreads; i++) {
1260		svc_new_thread(pool);
1261	}
1262
1263	svc_run_internal(pool, TRUE);
1264
1265	mtx_lock(&pool->sp_lock);
1266	while (pool->sp_threadcount > 0)
1267		msleep(pool, &pool->sp_lock, 0, "svcexit", 0);
1268	mtx_unlock(&pool->sp_lock);
1269}
1270
1271void
1272svc_exit(SVCPOOL *pool)
1273{
1274	SVCTHREAD *st;
1275
1276	mtx_lock(&pool->sp_lock);
1277
1278	if (pool->sp_state != SVCPOOL_CLOSING) {
1279		pool->sp_state = SVCPOOL_CLOSING;
1280		LIST_FOREACH(st, &pool->sp_idlethreads, st_ilink)
1281			cv_signal(&st->st_cond);
1282	}
1283
1284	mtx_unlock(&pool->sp_lock);
1285}
1286
1287bool_t
1288svc_getargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1289{
1290	struct mbuf *m;
1291	XDR xdrs;
1292	bool_t stat;
1293
1294	m = rqstp->rq_args;
1295	rqstp->rq_args = NULL;
1296
1297	xdrmbuf_create(&xdrs, m, XDR_DECODE);
1298	stat = xargs(&xdrs, args);
1299	XDR_DESTROY(&xdrs);
1300
1301	return (stat);
1302}
1303
1304bool_t
1305svc_freeargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1306{
1307	XDR xdrs;
1308
1309	if (rqstp->rq_addr) {
1310		free(rqstp->rq_addr, M_SONAME);
1311		rqstp->rq_addr = NULL;
1312	}
1313
1314	xdrs.x_op = XDR_FREE;
1315	return (xargs(&xdrs, args));
1316}
1317
1318void
1319svc_freereq(struct svc_req *rqstp)
1320{
1321	SVCTHREAD *st;
1322	SVCPOOL *pool;
1323
1324	st = rqstp->rq_thread;
1325	if (st) {
1326		pool = st->st_pool;
1327		if (pool->sp_done)
1328			pool->sp_done(st, rqstp);
1329	}
1330
1331	if (rqstp->rq_auth.svc_ah_ops)
1332		SVCAUTH_RELEASE(&rqstp->rq_auth);
1333
1334	if (rqstp->rq_xprt) {
1335		SVC_RELEASE(rqstp->rq_xprt);
1336	}
1337
1338	if (rqstp->rq_addr)
1339		free(rqstp->rq_addr, M_SONAME);
1340
1341	if (rqstp->rq_args)
1342		m_freem(rqstp->rq_args);
1343
1344	free(rqstp, M_RPC);
1345}
1346