svc.c revision 267741
1/*	$NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $	*/
2
3/*-
4 * Copyright (c) 2009, Sun Microsystems, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 * - Redistributions of source code must retain the above copyright notice,
10 *   this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright notice,
12 *   this list of conditions and the following disclaimer in the documentation
13 *   and/or other materials provided with the distribution.
14 * - Neither the name of Sun Microsystems, Inc. nor the names of its
15 *   contributors may be used to endorse or promote products derived
16 *   from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#if defined(LIBC_SCCS) && !defined(lint)
32static char *sccsid2 = "@(#)svc.c 1.44 88/02/08 Copyr 1984 Sun Micro";
33static char *sccsid = "@(#)svc.c	2.4 88/08/11 4.0 RPCSRC";
34#endif
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: stable/10/sys/rpc/svc.c 267741 2014-06-22 18:02:39Z mav $");
37
38/*
39 * svc.c, Server-side remote procedure call interface.
40 *
41 * There are two sets of procedures here.  The xprt routines are
42 * for handling transport handles.  The svc routines handle the
43 * list of service routines.
44 *
45 * Copyright (C) 1984, Sun Microsystems, Inc.
46 */
47
48#include <sys/param.h>
49#include <sys/lock.h>
50#include <sys/kernel.h>
51#include <sys/kthread.h>
52#include <sys/malloc.h>
53#include <sys/mbuf.h>
54#include <sys/mutex.h>
55#include <sys/proc.h>
56#include <sys/queue.h>
57#include <sys/socketvar.h>
58#include <sys/systm.h>
59#include <sys/sx.h>
60#include <sys/ucred.h>
61
62#include <rpc/rpc.h>
63#include <rpc/rpcb_clnt.h>
64#include <rpc/replay.h>
65
66#include <rpc/rpc_com.h>
67
68#define SVC_VERSQUIET 0x0001		/* keep quiet about vers mismatch */
69#define version_keepquiet(xp) (SVC_EXT(xp)->xp_flags & SVC_VERSQUIET)
70
71static struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t,
72    char *);
73static void svc_new_thread(SVCPOOL *pool);
74static void xprt_unregister_locked(SVCXPRT *xprt);
75static void svc_change_space_used(SVCPOOL *pool, int delta);
76static bool_t svc_request_space_available(SVCPOOL *pool);
77
78/* ***************  SVCXPRT related stuff **************** */
79
80static int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS);
81static int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS);
82
83SVCPOOL*
84svcpool_create(const char *name, struct sysctl_oid_list *sysctl_base)
85{
86	SVCPOOL *pool;
87
88	pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO);
89
90	mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF);
91	pool->sp_name = name;
92	pool->sp_state = SVCPOOL_INIT;
93	pool->sp_proc = NULL;
94	TAILQ_INIT(&pool->sp_xlist);
95	TAILQ_INIT(&pool->sp_active);
96	TAILQ_INIT(&pool->sp_callouts);
97	TAILQ_INIT(&pool->sp_lcallouts);
98	LIST_INIT(&pool->sp_threads);
99	LIST_INIT(&pool->sp_idlethreads);
100	pool->sp_minthreads = 1;
101	pool->sp_maxthreads = 1;
102	pool->sp_threadcount = 0;
103
104	/*
105	 * Don't use more than a quarter of mbuf clusters or more than
106	 * 45Mb buffering requests.
107	 */
108	pool->sp_space_high = nmbclusters * MCLBYTES / 4;
109	if (pool->sp_space_high > 45 << 20)
110		pool->sp_space_high = 45 << 20;
111	pool->sp_space_low = 2 * pool->sp_space_high / 3;
112
113	sysctl_ctx_init(&pool->sp_sysctl);
114	if (sysctl_base) {
115		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
116		    "minthreads", CTLTYPE_INT | CTLFLAG_RW,
117		    pool, 0, svcpool_minthread_sysctl, "I", "");
118		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
119		    "maxthreads", CTLTYPE_INT | CTLFLAG_RW,
120		    pool, 0, svcpool_maxthread_sysctl, "I", "");
121		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
122		    "threads", CTLFLAG_RD, &pool->sp_threadcount, 0, "");
123
124		SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
125		    "request_space_used", CTLFLAG_RD,
126		    &pool->sp_space_used, 0,
127		    "Space in parsed but not handled requests.");
128
129		SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
130		    "request_space_used_highest", CTLFLAG_RD,
131		    &pool->sp_space_used_highest, 0,
132		    "Highest space used since reboot.");
133
134		SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
135		    "request_space_high", CTLFLAG_RW,
136		    &pool->sp_space_high, 0,
137		    "Maximum space in parsed but not handled requests.");
138
139		SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
140		    "request_space_low", CTLFLAG_RW,
141		    &pool->sp_space_low, 0,
142		    "Low water mark for request space.");
143
144		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
145		    "request_space_throttled", CTLFLAG_RD,
146		    &pool->sp_space_throttled, 0,
147		    "Whether nfs requests are currently throttled");
148
149		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
150		    "request_space_throttle_count", CTLFLAG_RD,
151		    &pool->sp_space_throttle_count, 0,
152		    "Count of times throttling based on request space has occurred");
153	}
154
155	return pool;
156}
157
158void
159svcpool_destroy(SVCPOOL *pool)
160{
161	SVCXPRT *xprt, *nxprt;
162	struct svc_callout *s;
163	struct svc_loss_callout *sl;
164	struct svcxprt_list cleanup;
165
166	TAILQ_INIT(&cleanup);
167	mtx_lock(&pool->sp_lock);
168
169	while (TAILQ_FIRST(&pool->sp_xlist)) {
170		xprt = TAILQ_FIRST(&pool->sp_xlist);
171		xprt_unregister_locked(xprt);
172		TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
173	}
174
175	while ((s = TAILQ_FIRST(&pool->sp_callouts)) != NULL) {
176		mtx_unlock(&pool->sp_lock);
177		svc_unreg(pool, s->sc_prog, s->sc_vers);
178		mtx_lock(&pool->sp_lock);
179	}
180	while ((sl = TAILQ_FIRST(&pool->sp_lcallouts)) != NULL) {
181		mtx_unlock(&pool->sp_lock);
182		svc_loss_unreg(pool, sl->slc_dispatch);
183		mtx_lock(&pool->sp_lock);
184	}
185	mtx_unlock(&pool->sp_lock);
186
187	TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
188		SVC_RELEASE(xprt);
189	}
190
191	mtx_destroy(&pool->sp_lock);
192
193	if (pool->sp_rcache)
194		replay_freecache(pool->sp_rcache);
195
196	sysctl_ctx_free(&pool->sp_sysctl);
197	free(pool, M_RPC);
198}
199
200static bool_t
201svcpool_active(SVCPOOL *pool)
202{
203	enum svcpool_state state = pool->sp_state;
204
205	if (state == SVCPOOL_INIT || state == SVCPOOL_CLOSING)
206		return (FALSE);
207	return (TRUE);
208}
209
210/*
211 * Sysctl handler to set the minimum thread count on a pool
212 */
213static int
214svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS)
215{
216	SVCPOOL *pool;
217	int newminthreads, error, n;
218
219	pool = oidp->oid_arg1;
220	newminthreads = pool->sp_minthreads;
221	error = sysctl_handle_int(oidp, &newminthreads, 0, req);
222	if (error == 0 && newminthreads != pool->sp_minthreads) {
223		if (newminthreads > pool->sp_maxthreads)
224			return (EINVAL);
225		mtx_lock(&pool->sp_lock);
226		if (newminthreads > pool->sp_minthreads
227		    && svcpool_active(pool)) {
228			/*
229			 * If the pool is running and we are
230			 * increasing, create some more threads now.
231			 */
232			n = newminthreads - pool->sp_threadcount;
233			if (n > 0) {
234				mtx_unlock(&pool->sp_lock);
235				while (n--)
236					svc_new_thread(pool);
237				mtx_lock(&pool->sp_lock);
238			}
239		}
240		pool->sp_minthreads = newminthreads;
241		mtx_unlock(&pool->sp_lock);
242	}
243	return (error);
244}
245
246/*
247 * Sysctl handler to set the maximum thread count on a pool
248 */
249static int
250svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS)
251{
252	SVCPOOL *pool;
253	SVCTHREAD *st;
254	int newmaxthreads, error;
255
256	pool = oidp->oid_arg1;
257	newmaxthreads = pool->sp_maxthreads;
258	error = sysctl_handle_int(oidp, &newmaxthreads, 0, req);
259	if (error == 0 && newmaxthreads != pool->sp_maxthreads) {
260		if (newmaxthreads < pool->sp_minthreads)
261			return (EINVAL);
262		mtx_lock(&pool->sp_lock);
263		if (newmaxthreads < pool->sp_maxthreads
264		    && svcpool_active(pool)) {
265			/*
266			 * If the pool is running and we are
267			 * decreasing, wake up some idle threads to
268			 * encourage them to exit.
269			 */
270			LIST_FOREACH(st, &pool->sp_idlethreads, st_ilink)
271				cv_signal(&st->st_cond);
272		}
273		pool->sp_maxthreads = newmaxthreads;
274		mtx_unlock(&pool->sp_lock);
275	}
276	return (error);
277}
278
279/*
280 * Activate a transport handle.
281 */
282void
283xprt_register(SVCXPRT *xprt)
284{
285	SVCPOOL *pool = xprt->xp_pool;
286
287	SVC_ACQUIRE(xprt);
288	mtx_lock(&pool->sp_lock);
289	xprt->xp_registered = TRUE;
290	xprt->xp_active = FALSE;
291	TAILQ_INSERT_TAIL(&pool->sp_xlist, xprt, xp_link);
292	mtx_unlock(&pool->sp_lock);
293}
294
295/*
296 * De-activate a transport handle. Note: the locked version doesn't
297 * release the transport - caller must do that after dropping the pool
298 * lock.
299 */
300static void
301xprt_unregister_locked(SVCXPRT *xprt)
302{
303	SVCPOOL *pool = xprt->xp_pool;
304
305	mtx_assert(&pool->sp_lock, MA_OWNED);
306	KASSERT(xprt->xp_registered == TRUE,
307	    ("xprt_unregister_locked: not registered"));
308	xprt_inactive_locked(xprt);
309	TAILQ_REMOVE(&pool->sp_xlist, xprt, xp_link);
310	xprt->xp_registered = FALSE;
311}
312
313void
314xprt_unregister(SVCXPRT *xprt)
315{
316	SVCPOOL *pool = xprt->xp_pool;
317
318	mtx_lock(&pool->sp_lock);
319	if (xprt->xp_registered == FALSE) {
320		/* Already unregistered by another thread */
321		mtx_unlock(&pool->sp_lock);
322		return;
323	}
324	xprt_unregister_locked(xprt);
325	mtx_unlock(&pool->sp_lock);
326
327	SVC_RELEASE(xprt);
328}
329
330/*
331 * Attempt to assign a service thread to this transport.
332 */
333static int
334xprt_assignthread(SVCXPRT *xprt)
335{
336	SVCPOOL *pool = xprt->xp_pool;
337	SVCTHREAD *st;
338
339	mtx_assert(&pool->sp_lock, MA_OWNED);
340	st = LIST_FIRST(&pool->sp_idlethreads);
341	if (st) {
342		LIST_REMOVE(st, st_ilink);
343		SVC_ACQUIRE(xprt);
344		xprt->xp_thread = st;
345		st->st_xprt = xprt;
346		cv_signal(&st->st_cond);
347		return (TRUE);
348	} else {
349		/*
350		 * See if we can create a new thread. The
351		 * actual thread creation happens in
352		 * svc_run_internal because our locking state
353		 * is poorly defined (we are typically called
354		 * from a socket upcall). Don't create more
355		 * than one thread per second.
356		 */
357		if (pool->sp_state == SVCPOOL_ACTIVE
358		    && pool->sp_lastcreatetime < time_uptime
359		    && pool->sp_threadcount < pool->sp_maxthreads) {
360			pool->sp_state = SVCPOOL_THREADWANTED;
361		}
362	}
363	return (FALSE);
364}
365
366void
367xprt_active(SVCXPRT *xprt)
368{
369	SVCPOOL *pool = xprt->xp_pool;
370
371	mtx_lock(&pool->sp_lock);
372
373	if (!xprt->xp_registered) {
374		/*
375		 * Race with xprt_unregister - we lose.
376		 */
377		mtx_unlock(&pool->sp_lock);
378		return;
379	}
380
381	if (!xprt->xp_active) {
382		xprt->xp_active = TRUE;
383		if (xprt->xp_thread == NULL) {
384			if (!svc_request_space_available(pool) ||
385			    !xprt_assignthread(xprt))
386				TAILQ_INSERT_TAIL(&pool->sp_active, xprt,
387				    xp_alink);
388		}
389	}
390
391	mtx_unlock(&pool->sp_lock);
392}
393
394void
395xprt_inactive_locked(SVCXPRT *xprt)
396{
397	SVCPOOL *pool = xprt->xp_pool;
398
399	mtx_assert(&pool->sp_lock, MA_OWNED);
400	if (xprt->xp_active) {
401		if (xprt->xp_thread == NULL)
402			TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
403		xprt->xp_active = FALSE;
404	}
405}
406
407void
408xprt_inactive(SVCXPRT *xprt)
409{
410	SVCPOOL *pool = xprt->xp_pool;
411
412	mtx_lock(&pool->sp_lock);
413	xprt_inactive_locked(xprt);
414	mtx_unlock(&pool->sp_lock);
415}
416
417/*
418 * Variant of xprt_inactive() for use only when sure that port is
419 * assigned to thread. For example, withing receive handlers.
420 */
421void
422xprt_inactive_self(SVCXPRT *xprt)
423{
424
425	KASSERT(xprt->xp_thread != NULL,
426	    ("xprt_inactive_self(%p) with NULL xp_thread", xprt));
427	xprt->xp_active = FALSE;
428}
429
430/*
431 * Add a service program to the callout list.
432 * The dispatch routine will be called when a rpc request for this
433 * program number comes in.
434 */
435bool_t
436svc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
437    void (*dispatch)(struct svc_req *, SVCXPRT *),
438    const struct netconfig *nconf)
439{
440	SVCPOOL *pool = xprt->xp_pool;
441	struct svc_callout *s;
442	char *netid = NULL;
443	int flag = 0;
444
445/* VARIABLES PROTECTED BY svc_lock: s, svc_head */
446
447	if (xprt->xp_netid) {
448		netid = strdup(xprt->xp_netid, M_RPC);
449		flag = 1;
450	} else if (nconf && nconf->nc_netid) {
451		netid = strdup(nconf->nc_netid, M_RPC);
452		flag = 1;
453	} /* must have been created with svc_raw_create */
454	if ((netid == NULL) && (flag == 1)) {
455		return (FALSE);
456	}
457
458	mtx_lock(&pool->sp_lock);
459	if ((s = svc_find(pool, prog, vers, netid)) != NULL) {
460		if (netid)
461			free(netid, M_RPC);
462		if (s->sc_dispatch == dispatch)
463			goto rpcb_it; /* he is registering another xptr */
464		mtx_unlock(&pool->sp_lock);
465		return (FALSE);
466	}
467	s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
468	if (s == NULL) {
469		if (netid)
470			free(netid, M_RPC);
471		mtx_unlock(&pool->sp_lock);
472		return (FALSE);
473	}
474
475	s->sc_prog = prog;
476	s->sc_vers = vers;
477	s->sc_dispatch = dispatch;
478	s->sc_netid = netid;
479	TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link);
480
481	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
482		((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC);
483
484rpcb_it:
485	mtx_unlock(&pool->sp_lock);
486	/* now register the information with the local binder service */
487	if (nconf) {
488		bool_t dummy;
489		struct netconfig tnc;
490		struct netbuf nb;
491		tnc = *nconf;
492		nb.buf = &xprt->xp_ltaddr;
493		nb.len = xprt->xp_ltaddr.ss_len;
494		dummy = rpcb_set(prog, vers, &tnc, &nb);
495		return (dummy);
496	}
497	return (TRUE);
498}
499
500/*
501 * Remove a service program from the callout list.
502 */
503void
504svc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers)
505{
506	struct svc_callout *s;
507
508	/* unregister the information anyway */
509	(void) rpcb_unset(prog, vers, NULL);
510	mtx_lock(&pool->sp_lock);
511	while ((s = svc_find(pool, prog, vers, NULL)) != NULL) {
512		TAILQ_REMOVE(&pool->sp_callouts, s, sc_link);
513		if (s->sc_netid)
514			mem_free(s->sc_netid, sizeof (s->sc_netid) + 1);
515		mem_free(s, sizeof (struct svc_callout));
516	}
517	mtx_unlock(&pool->sp_lock);
518}
519
520/*
521 * Add a service connection loss program to the callout list.
522 * The dispatch routine will be called when some port in ths pool die.
523 */
524bool_t
525svc_loss_reg(SVCXPRT *xprt, void (*dispatch)(SVCXPRT *))
526{
527	SVCPOOL *pool = xprt->xp_pool;
528	struct svc_loss_callout *s;
529
530	mtx_lock(&pool->sp_lock);
531	TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
532		if (s->slc_dispatch == dispatch)
533			break;
534	}
535	if (s != NULL) {
536		mtx_unlock(&pool->sp_lock);
537		return (TRUE);
538	}
539	s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
540	if (s == NULL) {
541		mtx_unlock(&pool->sp_lock);
542		return (FALSE);
543	}
544	s->slc_dispatch = dispatch;
545	TAILQ_INSERT_TAIL(&pool->sp_lcallouts, s, slc_link);
546	mtx_unlock(&pool->sp_lock);
547	return (TRUE);
548}
549
550/*
551 * Remove a service connection loss program from the callout list.
552 */
553void
554svc_loss_unreg(SVCPOOL *pool, void (*dispatch)(SVCXPRT *))
555{
556	struct svc_loss_callout *s;
557
558	mtx_lock(&pool->sp_lock);
559	TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
560		if (s->slc_dispatch == dispatch) {
561			TAILQ_REMOVE(&pool->sp_lcallouts, s, slc_link);
562			free(s, M_RPC);
563			break;
564		}
565	}
566	mtx_unlock(&pool->sp_lock);
567}
568
569/* ********************** CALLOUT list related stuff ************* */
570
571/*
572 * Search the callout list for a program number, return the callout
573 * struct.
574 */
575static struct svc_callout *
576svc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid)
577{
578	struct svc_callout *s;
579
580	mtx_assert(&pool->sp_lock, MA_OWNED);
581	TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
582		if (s->sc_prog == prog && s->sc_vers == vers
583		    && (netid == NULL || s->sc_netid == NULL ||
584			strcmp(netid, s->sc_netid) == 0))
585			break;
586	}
587
588	return (s);
589}
590
591/* ******************* REPLY GENERATION ROUTINES  ************ */
592
593static bool_t
594svc_sendreply_common(struct svc_req *rqstp, struct rpc_msg *rply,
595    struct mbuf *body)
596{
597	SVCXPRT *xprt = rqstp->rq_xprt;
598	bool_t ok;
599
600	if (rqstp->rq_args) {
601		m_freem(rqstp->rq_args);
602		rqstp->rq_args = NULL;
603	}
604
605	if (xprt->xp_pool->sp_rcache)
606		replay_setreply(xprt->xp_pool->sp_rcache,
607		    rply, svc_getrpccaller(rqstp), body);
608
609	if (!SVCAUTH_WRAP(&rqstp->rq_auth, &body))
610		return (FALSE);
611
612	ok = SVC_REPLY(xprt, rply, rqstp->rq_addr, body, &rqstp->rq_reply_seq);
613	if (rqstp->rq_addr) {
614		free(rqstp->rq_addr, M_SONAME);
615		rqstp->rq_addr = NULL;
616	}
617
618	return (ok);
619}
620
621/*
622 * Send a reply to an rpc request
623 */
624bool_t
625svc_sendreply(struct svc_req *rqstp, xdrproc_t xdr_results, void * xdr_location)
626{
627	struct rpc_msg rply;
628	struct mbuf *m;
629	XDR xdrs;
630	bool_t ok;
631
632	rply.rm_xid = rqstp->rq_xid;
633	rply.rm_direction = REPLY;
634	rply.rm_reply.rp_stat = MSG_ACCEPTED;
635	rply.acpted_rply.ar_verf = rqstp->rq_verf;
636	rply.acpted_rply.ar_stat = SUCCESS;
637	rply.acpted_rply.ar_results.where = NULL;
638	rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
639
640	m = m_getcl(M_WAITOK, MT_DATA, 0);
641	xdrmbuf_create(&xdrs, m, XDR_ENCODE);
642	ok = xdr_results(&xdrs, xdr_location);
643	XDR_DESTROY(&xdrs);
644
645	if (ok) {
646		return (svc_sendreply_common(rqstp, &rply, m));
647	} else {
648		m_freem(m);
649		return (FALSE);
650	}
651}
652
653bool_t
654svc_sendreply_mbuf(struct svc_req *rqstp, struct mbuf *m)
655{
656	struct rpc_msg rply;
657
658	rply.rm_xid = rqstp->rq_xid;
659	rply.rm_direction = REPLY;
660	rply.rm_reply.rp_stat = MSG_ACCEPTED;
661	rply.acpted_rply.ar_verf = rqstp->rq_verf;
662	rply.acpted_rply.ar_stat = SUCCESS;
663	rply.acpted_rply.ar_results.where = NULL;
664	rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
665
666	return (svc_sendreply_common(rqstp, &rply, m));
667}
668
669/*
670 * No procedure error reply
671 */
672void
673svcerr_noproc(struct svc_req *rqstp)
674{
675	SVCXPRT *xprt = rqstp->rq_xprt;
676	struct rpc_msg rply;
677
678	rply.rm_xid = rqstp->rq_xid;
679	rply.rm_direction = REPLY;
680	rply.rm_reply.rp_stat = MSG_ACCEPTED;
681	rply.acpted_rply.ar_verf = rqstp->rq_verf;
682	rply.acpted_rply.ar_stat = PROC_UNAVAIL;
683
684	if (xprt->xp_pool->sp_rcache)
685		replay_setreply(xprt->xp_pool->sp_rcache,
686		    &rply, svc_getrpccaller(rqstp), NULL);
687
688	svc_sendreply_common(rqstp, &rply, NULL);
689}
690
691/*
692 * Can't decode args error reply
693 */
694void
695svcerr_decode(struct svc_req *rqstp)
696{
697	SVCXPRT *xprt = rqstp->rq_xprt;
698	struct rpc_msg rply;
699
700	rply.rm_xid = rqstp->rq_xid;
701	rply.rm_direction = REPLY;
702	rply.rm_reply.rp_stat = MSG_ACCEPTED;
703	rply.acpted_rply.ar_verf = rqstp->rq_verf;
704	rply.acpted_rply.ar_stat = GARBAGE_ARGS;
705
706	if (xprt->xp_pool->sp_rcache)
707		replay_setreply(xprt->xp_pool->sp_rcache,
708		    &rply, (struct sockaddr *) &xprt->xp_rtaddr, NULL);
709
710	svc_sendreply_common(rqstp, &rply, NULL);
711}
712
713/*
714 * Some system error
715 */
716void
717svcerr_systemerr(struct svc_req *rqstp)
718{
719	SVCXPRT *xprt = rqstp->rq_xprt;
720	struct rpc_msg rply;
721
722	rply.rm_xid = rqstp->rq_xid;
723	rply.rm_direction = REPLY;
724	rply.rm_reply.rp_stat = MSG_ACCEPTED;
725	rply.acpted_rply.ar_verf = rqstp->rq_verf;
726	rply.acpted_rply.ar_stat = SYSTEM_ERR;
727
728	if (xprt->xp_pool->sp_rcache)
729		replay_setreply(xprt->xp_pool->sp_rcache,
730		    &rply, svc_getrpccaller(rqstp), NULL);
731
732	svc_sendreply_common(rqstp, &rply, NULL);
733}
734
735/*
736 * Authentication error reply
737 */
738void
739svcerr_auth(struct svc_req *rqstp, enum auth_stat why)
740{
741	SVCXPRT *xprt = rqstp->rq_xprt;
742	struct rpc_msg rply;
743
744	rply.rm_xid = rqstp->rq_xid;
745	rply.rm_direction = REPLY;
746	rply.rm_reply.rp_stat = MSG_DENIED;
747	rply.rjcted_rply.rj_stat = AUTH_ERROR;
748	rply.rjcted_rply.rj_why = why;
749
750	if (xprt->xp_pool->sp_rcache)
751		replay_setreply(xprt->xp_pool->sp_rcache,
752		    &rply, svc_getrpccaller(rqstp), NULL);
753
754	svc_sendreply_common(rqstp, &rply, NULL);
755}
756
757/*
758 * Auth too weak error reply
759 */
760void
761svcerr_weakauth(struct svc_req *rqstp)
762{
763
764	svcerr_auth(rqstp, AUTH_TOOWEAK);
765}
766
767/*
768 * Program unavailable error reply
769 */
770void
771svcerr_noprog(struct svc_req *rqstp)
772{
773	SVCXPRT *xprt = rqstp->rq_xprt;
774	struct rpc_msg rply;
775
776	rply.rm_xid = rqstp->rq_xid;
777	rply.rm_direction = REPLY;
778	rply.rm_reply.rp_stat = MSG_ACCEPTED;
779	rply.acpted_rply.ar_verf = rqstp->rq_verf;
780	rply.acpted_rply.ar_stat = PROG_UNAVAIL;
781
782	if (xprt->xp_pool->sp_rcache)
783		replay_setreply(xprt->xp_pool->sp_rcache,
784		    &rply, svc_getrpccaller(rqstp), NULL);
785
786	svc_sendreply_common(rqstp, &rply, NULL);
787}
788
789/*
790 * Program version mismatch error reply
791 */
792void
793svcerr_progvers(struct svc_req *rqstp, rpcvers_t low_vers, rpcvers_t high_vers)
794{
795	SVCXPRT *xprt = rqstp->rq_xprt;
796	struct rpc_msg rply;
797
798	rply.rm_xid = rqstp->rq_xid;
799	rply.rm_direction = REPLY;
800	rply.rm_reply.rp_stat = MSG_ACCEPTED;
801	rply.acpted_rply.ar_verf = rqstp->rq_verf;
802	rply.acpted_rply.ar_stat = PROG_MISMATCH;
803	rply.acpted_rply.ar_vers.low = (uint32_t)low_vers;
804	rply.acpted_rply.ar_vers.high = (uint32_t)high_vers;
805
806	if (xprt->xp_pool->sp_rcache)
807		replay_setreply(xprt->xp_pool->sp_rcache,
808		    &rply, svc_getrpccaller(rqstp), NULL);
809
810	svc_sendreply_common(rqstp, &rply, NULL);
811}
812
813/*
814 * Allocate a new server transport structure. All fields are
815 * initialized to zero and xp_p3 is initialized to point at an
816 * extension structure to hold various flags and authentication
817 * parameters.
818 */
819SVCXPRT *
820svc_xprt_alloc()
821{
822	SVCXPRT *xprt;
823	SVCXPRT_EXT *ext;
824
825	xprt = mem_alloc(sizeof(SVCXPRT));
826	memset(xprt, 0, sizeof(SVCXPRT));
827	ext = mem_alloc(sizeof(SVCXPRT_EXT));
828	memset(ext, 0, sizeof(SVCXPRT_EXT));
829	xprt->xp_p3 = ext;
830	refcount_init(&xprt->xp_refs, 1);
831
832	return (xprt);
833}
834
835/*
836 * Free a server transport structure.
837 */
838void
839svc_xprt_free(xprt)
840	SVCXPRT *xprt;
841{
842
843	mem_free(xprt->xp_p3, sizeof(SVCXPRT_EXT));
844	mem_free(xprt, sizeof(SVCXPRT));
845}
846
847/* ******************* SERVER INPUT STUFF ******************* */
848
849/*
850 * Read RPC requests from a transport and queue them to be
851 * executed. We handle authentication and replay cache replies here.
852 * Actually dispatching the RPC is deferred till svc_executereq.
853 */
854static enum xprt_stat
855svc_getreq(SVCXPRT *xprt, struct svc_req **rqstp_ret)
856{
857	SVCPOOL *pool = xprt->xp_pool;
858	struct svc_req *r;
859	struct rpc_msg msg;
860	struct mbuf *args;
861	struct svc_loss_callout *s;
862	enum xprt_stat stat;
863
864	/* now receive msgs from xprtprt (support batch calls) */
865	r = malloc(sizeof(*r), M_RPC, M_WAITOK|M_ZERO);
866
867	msg.rm_call.cb_cred.oa_base = r->rq_credarea;
868	msg.rm_call.cb_verf.oa_base = &r->rq_credarea[MAX_AUTH_BYTES];
869	r->rq_clntcred = &r->rq_credarea[2*MAX_AUTH_BYTES];
870	if (SVC_RECV(xprt, &msg, &r->rq_addr, &args)) {
871		enum auth_stat why;
872
873		/*
874		 * Handle replays and authenticate before queuing the
875		 * request to be executed.
876		 */
877		SVC_ACQUIRE(xprt);
878		r->rq_xprt = xprt;
879		if (pool->sp_rcache) {
880			struct rpc_msg repmsg;
881			struct mbuf *repbody;
882			enum replay_state rs;
883			rs = replay_find(pool->sp_rcache, &msg,
884			    svc_getrpccaller(r), &repmsg, &repbody);
885			switch (rs) {
886			case RS_NEW:
887				break;
888			case RS_DONE:
889				SVC_REPLY(xprt, &repmsg, r->rq_addr,
890				    repbody, &r->rq_reply_seq);
891				if (r->rq_addr) {
892					free(r->rq_addr, M_SONAME);
893					r->rq_addr = NULL;
894				}
895				m_freem(args);
896				goto call_done;
897
898			default:
899				m_freem(args);
900				goto call_done;
901			}
902		}
903
904		r->rq_xid = msg.rm_xid;
905		r->rq_prog = msg.rm_call.cb_prog;
906		r->rq_vers = msg.rm_call.cb_vers;
907		r->rq_proc = msg.rm_call.cb_proc;
908		r->rq_size = sizeof(*r) + m_length(args, NULL);
909		r->rq_args = args;
910		if ((why = _authenticate(r, &msg)) != AUTH_OK) {
911			/*
912			 * RPCSEC_GSS uses this return code
913			 * for requests that form part of its
914			 * context establishment protocol and
915			 * should not be dispatched to the
916			 * application.
917			 */
918			if (why != RPCSEC_GSS_NODISPATCH)
919				svcerr_auth(r, why);
920			goto call_done;
921		}
922
923		if (!SVCAUTH_UNWRAP(&r->rq_auth, &r->rq_args)) {
924			svcerr_decode(r);
925			goto call_done;
926		}
927
928		/*
929		 * Everything checks out, return request to caller.
930		 */
931		*rqstp_ret = r;
932		r = NULL;
933	}
934call_done:
935	if (r) {
936		svc_freereq(r);
937		r = NULL;
938	}
939	if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
940		TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link)
941			(*s->slc_dispatch)(xprt);
942		xprt_unregister(xprt);
943	}
944
945	return (stat);
946}
947
948static void
949svc_executereq(struct svc_req *rqstp)
950{
951	SVCXPRT *xprt = rqstp->rq_xprt;
952	SVCPOOL *pool = xprt->xp_pool;
953	int prog_found;
954	rpcvers_t low_vers;
955	rpcvers_t high_vers;
956	struct svc_callout *s;
957
958	/* now match message with a registered service*/
959	prog_found = FALSE;
960	low_vers = (rpcvers_t) -1L;
961	high_vers = (rpcvers_t) 0L;
962	TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
963		if (s->sc_prog == rqstp->rq_prog) {
964			if (s->sc_vers == rqstp->rq_vers) {
965				/*
966				 * We hand ownership of r to the
967				 * dispatch method - they must call
968				 * svc_freereq.
969				 */
970				(*s->sc_dispatch)(rqstp, xprt);
971				return;
972			}  /* found correct version */
973			prog_found = TRUE;
974			if (s->sc_vers < low_vers)
975				low_vers = s->sc_vers;
976			if (s->sc_vers > high_vers)
977				high_vers = s->sc_vers;
978		}   /* found correct program */
979	}
980
981	/*
982	 * if we got here, the program or version
983	 * is not served ...
984	 */
985	if (prog_found)
986		svcerr_progvers(rqstp, low_vers, high_vers);
987	else
988		svcerr_noprog(rqstp);
989
990	svc_freereq(rqstp);
991}
992
993static void
994svc_checkidle(SVCPOOL *pool)
995{
996	SVCXPRT *xprt, *nxprt;
997	time_t timo;
998	struct svcxprt_list cleanup;
999
1000	TAILQ_INIT(&cleanup);
1001	TAILQ_FOREACH_SAFE(xprt, &pool->sp_xlist, xp_link, nxprt) {
1002		/*
1003		 * Only some transports have idle timers. Don't time
1004		 * something out which is just waking up.
1005		 */
1006		if (!xprt->xp_idletimeout || xprt->xp_thread)
1007			continue;
1008
1009		timo = xprt->xp_lastactive + xprt->xp_idletimeout;
1010		if (time_uptime > timo) {
1011			xprt_unregister_locked(xprt);
1012			TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
1013		}
1014	}
1015
1016	mtx_unlock(&pool->sp_lock);
1017	TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
1018		SVC_RELEASE(xprt);
1019	}
1020	mtx_lock(&pool->sp_lock);
1021
1022}
1023
1024static void
1025svc_assign_waiting_sockets(SVCPOOL *pool)
1026{
1027	SVCXPRT *xprt;
1028
1029	mtx_lock(&pool->sp_lock);
1030	while ((xprt = TAILQ_FIRST(&pool->sp_active)) != NULL) {
1031		if (xprt_assignthread(xprt))
1032			TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
1033		else
1034			break;
1035	}
1036	mtx_unlock(&pool->sp_lock);
1037}
1038
1039static void
1040svc_change_space_used(SVCPOOL *pool, int delta)
1041{
1042	unsigned int value;
1043
1044	value = atomic_fetchadd_int(&pool->sp_space_used, delta) + delta;
1045	if (delta > 0) {
1046		if (value >= pool->sp_space_high && !pool->sp_space_throttled) {
1047			pool->sp_space_throttled = TRUE;
1048			pool->sp_space_throttle_count++;
1049		}
1050		if (value > pool->sp_space_used_highest)
1051			pool->sp_space_used_highest = value;
1052	} else {
1053		if (value < pool->sp_space_low && pool->sp_space_throttled) {
1054			pool->sp_space_throttled = FALSE;
1055			svc_assign_waiting_sockets(pool);
1056		}
1057	}
1058}
1059
1060static bool_t
1061svc_request_space_available(SVCPOOL *pool)
1062{
1063
1064	if (pool->sp_space_throttled)
1065		return (FALSE);
1066	return (TRUE);
1067}
1068
1069static void
1070svc_run_internal(SVCPOOL *pool, bool_t ismaster)
1071{
1072	SVCTHREAD *st, *stpref;
1073	SVCXPRT *xprt;
1074	enum xprt_stat stat;
1075	struct svc_req *rqstp;
1076	size_t sz;
1077	int error;
1078
1079	st = mem_alloc(sizeof(*st));
1080	mtx_init(&st->st_lock, "st_lock", NULL, MTX_DEF);
1081	st->st_pool = pool;
1082	st->st_xprt = NULL;
1083	STAILQ_INIT(&st->st_reqs);
1084	cv_init(&st->st_cond, "rpcsvc");
1085
1086	mtx_lock(&pool->sp_lock);
1087	LIST_INSERT_HEAD(&pool->sp_threads, st, st_link);
1088
1089	/*
1090	 * If we are a new thread which was spawned to cope with
1091	 * increased load, set the state back to SVCPOOL_ACTIVE.
1092	 */
1093	if (pool->sp_state == SVCPOOL_THREADSTARTING)
1094		pool->sp_state = SVCPOOL_ACTIVE;
1095
1096	while (pool->sp_state != SVCPOOL_CLOSING) {
1097		/*
1098		 * Create new thread if requested.
1099		 */
1100		if (pool->sp_state == SVCPOOL_THREADWANTED) {
1101			pool->sp_state = SVCPOOL_THREADSTARTING;
1102			pool->sp_lastcreatetime = time_uptime;
1103			mtx_unlock(&pool->sp_lock);
1104			svc_new_thread(pool);
1105			mtx_lock(&pool->sp_lock);
1106			continue;
1107		}
1108
1109		/*
1110		 * Check for idle transports once per second.
1111		 */
1112		if (time_uptime > pool->sp_lastidlecheck) {
1113			pool->sp_lastidlecheck = time_uptime;
1114			svc_checkidle(pool);
1115		}
1116
1117		xprt = st->st_xprt;
1118		if (!xprt) {
1119			/*
1120			 * Enforce maxthreads count.
1121			 */
1122			if (pool->sp_threadcount > pool->sp_maxthreads)
1123				break;
1124
1125			/*
1126			 * Before sleeping, see if we can find an
1127			 * active transport which isn't being serviced
1128			 * by a thread.
1129			 */
1130			if (svc_request_space_available(pool) &&
1131			    (xprt = TAILQ_FIRST(&pool->sp_active)) != NULL) {
1132				TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
1133				SVC_ACQUIRE(xprt);
1134				xprt->xp_thread = st;
1135				st->st_xprt = xprt;
1136				continue;
1137			}
1138
1139			LIST_INSERT_HEAD(&pool->sp_idlethreads, st, st_ilink);
1140			if (ismaster || (!ismaster &&
1141			    pool->sp_threadcount > pool->sp_minthreads))
1142				error = cv_timedwait_sig(&st->st_cond,
1143				    &pool->sp_lock, 5 * hz);
1144			else
1145				error = cv_wait_sig(&st->st_cond,
1146				    &pool->sp_lock);
1147			if (st->st_xprt == NULL)
1148				LIST_REMOVE(st, st_ilink);
1149
1150			/*
1151			 * Reduce worker thread count when idle.
1152			 */
1153			if (error == EWOULDBLOCK) {
1154				if (!ismaster
1155				    && (pool->sp_threadcount
1156					> pool->sp_minthreads)
1157					&& !st->st_xprt)
1158					break;
1159			} else if (error) {
1160				mtx_unlock(&pool->sp_lock);
1161				svc_exit(pool);
1162				mtx_lock(&pool->sp_lock);
1163				break;
1164			}
1165			continue;
1166		}
1167		mtx_unlock(&pool->sp_lock);
1168
1169		/*
1170		 * Drain the transport socket and queue up any RPCs.
1171		 */
1172		xprt->xp_lastactive = time_uptime;
1173		do {
1174			if (!svc_request_space_available(pool))
1175				break;
1176			rqstp = NULL;
1177			stat = svc_getreq(xprt, &rqstp);
1178			if (rqstp) {
1179				svc_change_space_used(pool, rqstp->rq_size);
1180				/*
1181				 * See if the application has a preference
1182				 * for some other thread.
1183				 */
1184				if (pool->sp_assign) {
1185					stpref = pool->sp_assign(st, rqstp);
1186					rqstp->rq_thread = stpref;
1187					STAILQ_INSERT_TAIL(&stpref->st_reqs,
1188					    rqstp, rq_link);
1189					mtx_unlock(&stpref->st_lock);
1190					if (stpref != st)
1191						rqstp = NULL;
1192				} else {
1193					rqstp->rq_thread = st;
1194					STAILQ_INSERT_TAIL(&st->st_reqs,
1195					    rqstp, rq_link);
1196				}
1197			}
1198		} while (rqstp == NULL && stat == XPRT_MOREREQS
1199		    && pool->sp_state != SVCPOOL_CLOSING);
1200
1201		/*
1202		 * Move this transport to the end of the active list to
1203		 * ensure fairness when multiple transports are active.
1204		 * If this was the last queued request, svc_getreq will end
1205		 * up calling xprt_inactive to remove from the active list.
1206		 */
1207		mtx_lock(&pool->sp_lock);
1208		xprt->xp_thread = NULL;
1209		st->st_xprt = NULL;
1210		if (xprt->xp_active) {
1211			if (!svc_request_space_available(pool) ||
1212			    !xprt_assignthread(xprt))
1213				TAILQ_INSERT_TAIL(&pool->sp_active,
1214				    xprt, xp_alink);
1215		}
1216		mtx_unlock(&pool->sp_lock);
1217		SVC_RELEASE(xprt);
1218
1219		/*
1220		 * Execute what we have queued.
1221		 */
1222		sz = 0;
1223		mtx_lock(&st->st_lock);
1224		while ((rqstp = STAILQ_FIRST(&st->st_reqs)) != NULL) {
1225			STAILQ_REMOVE_HEAD(&st->st_reqs, rq_link);
1226			mtx_unlock(&st->st_lock);
1227			sz += rqstp->rq_size;
1228			svc_executereq(rqstp);
1229			mtx_lock(&st->st_lock);
1230		}
1231		mtx_unlock(&st->st_lock);
1232		svc_change_space_used(pool, -sz);
1233		mtx_lock(&pool->sp_lock);
1234	}
1235
1236	if (st->st_xprt) {
1237		xprt = st->st_xprt;
1238		st->st_xprt = NULL;
1239		SVC_RELEASE(xprt);
1240	}
1241
1242	KASSERT(STAILQ_EMPTY(&st->st_reqs), ("stray reqs on exit"));
1243	LIST_REMOVE(st, st_link);
1244	pool->sp_threadcount--;
1245
1246	mtx_unlock(&pool->sp_lock);
1247
1248	mtx_destroy(&st->st_lock);
1249	cv_destroy(&st->st_cond);
1250	mem_free(st, sizeof(*st));
1251
1252	if (!ismaster)
1253		wakeup(pool);
1254}
1255
1256static void
1257svc_thread_start(void *arg)
1258{
1259
1260	svc_run_internal((SVCPOOL *) arg, FALSE);
1261	kthread_exit();
1262}
1263
1264static void
1265svc_new_thread(SVCPOOL *pool)
1266{
1267	struct thread *td;
1268
1269	pool->sp_threadcount++;
1270	kthread_add(svc_thread_start, pool,
1271	    pool->sp_proc, &td, 0, 0,
1272	    "%s: service", pool->sp_name);
1273}
1274
1275void
1276svc_run(SVCPOOL *pool)
1277{
1278	int i;
1279	struct proc *p;
1280	struct thread *td;
1281
1282	p = curproc;
1283	td = curthread;
1284	snprintf(td->td_name, sizeof(td->td_name),
1285	    "%s: master", pool->sp_name);
1286	pool->sp_state = SVCPOOL_ACTIVE;
1287	pool->sp_proc = p;
1288	pool->sp_lastcreatetime = time_uptime;
1289	pool->sp_threadcount = 1;
1290
1291	for (i = 1; i < pool->sp_minthreads; i++) {
1292		svc_new_thread(pool);
1293	}
1294
1295	svc_run_internal(pool, TRUE);
1296
1297	mtx_lock(&pool->sp_lock);
1298	while (pool->sp_threadcount > 0)
1299		msleep(pool, &pool->sp_lock, 0, "svcexit", 0);
1300	mtx_unlock(&pool->sp_lock);
1301}
1302
1303void
1304svc_exit(SVCPOOL *pool)
1305{
1306	SVCTHREAD *st;
1307
1308	mtx_lock(&pool->sp_lock);
1309
1310	if (pool->sp_state != SVCPOOL_CLOSING) {
1311		pool->sp_state = SVCPOOL_CLOSING;
1312		LIST_FOREACH(st, &pool->sp_idlethreads, st_ilink)
1313			cv_signal(&st->st_cond);
1314	}
1315
1316	mtx_unlock(&pool->sp_lock);
1317}
1318
1319bool_t
1320svc_getargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1321{
1322	struct mbuf *m;
1323	XDR xdrs;
1324	bool_t stat;
1325
1326	m = rqstp->rq_args;
1327	rqstp->rq_args = NULL;
1328
1329	xdrmbuf_create(&xdrs, m, XDR_DECODE);
1330	stat = xargs(&xdrs, args);
1331	XDR_DESTROY(&xdrs);
1332
1333	return (stat);
1334}
1335
1336bool_t
1337svc_freeargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1338{
1339	XDR xdrs;
1340
1341	if (rqstp->rq_addr) {
1342		free(rqstp->rq_addr, M_SONAME);
1343		rqstp->rq_addr = NULL;
1344	}
1345
1346	xdrs.x_op = XDR_FREE;
1347	return (xargs(&xdrs, args));
1348}
1349
1350void
1351svc_freereq(struct svc_req *rqstp)
1352{
1353	SVCTHREAD *st;
1354	SVCPOOL *pool;
1355
1356	st = rqstp->rq_thread;
1357	if (st) {
1358		pool = st->st_pool;
1359		if (pool->sp_done)
1360			pool->sp_done(st, rqstp);
1361	}
1362
1363	if (rqstp->rq_auth.svc_ah_ops)
1364		SVCAUTH_RELEASE(&rqstp->rq_auth);
1365
1366	if (rqstp->rq_xprt) {
1367		SVC_RELEASE(rqstp->rq_xprt);
1368	}
1369
1370	if (rqstp->rq_addr)
1371		free(rqstp->rq_addr, M_SONAME);
1372
1373	if (rqstp->rq_args)
1374		m_freem(rqstp->rq_args);
1375
1376	free(rqstp, M_RPC);
1377}
1378