1177633Sdfr/*	$NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $	*/
2177633Sdfr
3261046Smav/*-
4261046Smav * Copyright (c) 2009, Sun Microsystems, Inc.
5261046Smav * All rights reserved.
6177633Sdfr *
7261046Smav * Redistribution and use in source and binary forms, with or without
8261046Smav * modification, are permitted provided that the following conditions are met:
9261046Smav * - Redistributions of source code must retain the above copyright notice,
10261046Smav *   this list of conditions and the following disclaimer.
11261046Smav * - Redistributions in binary form must reproduce the above copyright notice,
12261046Smav *   this list of conditions and the following disclaimer in the documentation
13261046Smav *   and/or other materials provided with the distribution.
14261046Smav * - Neither the name of Sun Microsystems, Inc. nor the names of its
15261046Smav *   contributors may be used to endorse or promote products derived
16261046Smav *   from this software without specific prior written permission.
17261046Smav *
18261046Smav * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19261046Smav * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20261046Smav * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21261046Smav * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22261046Smav * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23261046Smav * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24261046Smav * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25261046Smav * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26261046Smav * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27261046Smav * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28261046Smav * POSSIBILITY OF SUCH DAMAGE.
29177633Sdfr */
30177633Sdfr
31177633Sdfr#if defined(LIBC_SCCS) && !defined(lint)
32177633Sdfrstatic char *sccsid2 = "@(#)svc.c 1.44 88/02/08 Copyr 1984 Sun Micro";
33177633Sdfrstatic char *sccsid = "@(#)svc.c	2.4 88/08/11 4.0 RPCSRC";
34177633Sdfr#endif
35177633Sdfr#include <sys/cdefs.h>
36177633Sdfr__FBSDID("$FreeBSD$");
37177633Sdfr
38177633Sdfr/*
39177633Sdfr * svc.c, Server-side remote procedure call interface.
40177633Sdfr *
41177633Sdfr * There are two sets of procedures here.  The xprt routines are
42177633Sdfr * for handling transport handles.  The svc routines handle the
43177633Sdfr * list of service routines.
44177633Sdfr *
45177633Sdfr * Copyright (C) 1984, Sun Microsystems, Inc.
46177633Sdfr */
47177633Sdfr
48177633Sdfr#include <sys/param.h>
49177633Sdfr#include <sys/lock.h>
50177633Sdfr#include <sys/kernel.h>
51184588Sdfr#include <sys/kthread.h>
52177633Sdfr#include <sys/malloc.h>
53184588Sdfr#include <sys/mbuf.h>
54177633Sdfr#include <sys/mutex.h>
55184588Sdfr#include <sys/proc.h>
56177633Sdfr#include <sys/queue.h>
57184588Sdfr#include <sys/socketvar.h>
58177633Sdfr#include <sys/systm.h>
59267742Smav#include <sys/smp.h>
60261055Smav#include <sys/sx.h>
61177633Sdfr#include <sys/ucred.h>
62177633Sdfr
63177633Sdfr#include <rpc/rpc.h>
64177633Sdfr#include <rpc/rpcb_clnt.h>
65184588Sdfr#include <rpc/replay.h>
66177633Sdfr
67177685Sdfr#include <rpc/rpc_com.h>
68177633Sdfr
69177633Sdfr#define SVC_VERSQUIET 0x0001		/* keep quiet about vers mismatch */
70184588Sdfr#define version_keepquiet(xp) (SVC_EXT(xp)->xp_flags & SVC_VERSQUIET)
71177633Sdfr
72177633Sdfrstatic struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t,
73177633Sdfr    char *);
74267742Smavstatic void svc_new_thread(SVCGROUP *grp);
75184588Sdfrstatic void xprt_unregister_locked(SVCXPRT *xprt);
76261054Smavstatic void svc_change_space_used(SVCPOOL *pool, int delta);
77261054Smavstatic bool_t svc_request_space_available(SVCPOOL *pool);
78177633Sdfr
79177633Sdfr/* ***************  SVCXPRT related stuff **************** */
80177633Sdfr
81184588Sdfrstatic int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS);
82184588Sdfrstatic int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS);
83267742Smavstatic int svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS);
84184588Sdfr
85177633SdfrSVCPOOL*
86184588Sdfrsvcpool_create(const char *name, struct sysctl_oid_list *sysctl_base)
87177633Sdfr{
88177633Sdfr	SVCPOOL *pool;
89267742Smav	SVCGROUP *grp;
90267742Smav	int g;
91177633Sdfr
92177633Sdfr	pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO);
93177633Sdfr
94177633Sdfr	mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF);
95184588Sdfr	pool->sp_name = name;
96184588Sdfr	pool->sp_state = SVCPOOL_INIT;
97184588Sdfr	pool->sp_proc = NULL;
98177633Sdfr	TAILQ_INIT(&pool->sp_callouts);
99261055Smav	TAILQ_INIT(&pool->sp_lcallouts);
100184588Sdfr	pool->sp_minthreads = 1;
101184588Sdfr	pool->sp_maxthreads = 1;
102267742Smav	pool->sp_groupcount = 1;
103267742Smav	for (g = 0; g < SVC_MAXGROUPS; g++) {
104267742Smav		grp = &pool->sp_groups[g];
105267742Smav		mtx_init(&grp->sg_lock, "sg_lock", NULL, MTX_DEF);
106267742Smav		grp->sg_pool = pool;
107267742Smav		grp->sg_state = SVCPOOL_ACTIVE;
108267742Smav		TAILQ_INIT(&grp->sg_xlist);
109267742Smav		TAILQ_INIT(&grp->sg_active);
110267742Smav		LIST_INIT(&grp->sg_idlethreads);
111267742Smav		grp->sg_minthreads = 1;
112267742Smav		grp->sg_maxthreads = 1;
113267742Smav	}
114177633Sdfr
115184588Sdfr	/*
116184588Sdfr	 * Don't use more than a quarter of mbuf clusters or more than
117184588Sdfr	 * 45Mb buffering requests.
118184588Sdfr	 */
119184588Sdfr	pool->sp_space_high = nmbclusters * MCLBYTES / 4;
120184588Sdfr	if (pool->sp_space_high > 45 << 20)
121184588Sdfr		pool->sp_space_high = 45 << 20;
122184588Sdfr	pool->sp_space_low = 2 * pool->sp_space_high / 3;
123184588Sdfr
124184588Sdfr	sysctl_ctx_init(&pool->sp_sysctl);
125184588Sdfr	if (sysctl_base) {
126184588Sdfr		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
127184588Sdfr		    "minthreads", CTLTYPE_INT | CTLFLAG_RW,
128267742Smav		    pool, 0, svcpool_minthread_sysctl, "I",
129267742Smav		    "Minimal number of threads");
130184588Sdfr		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
131184588Sdfr		    "maxthreads", CTLTYPE_INT | CTLFLAG_RW,
132267742Smav		    pool, 0, svcpool_maxthread_sysctl, "I",
133267742Smav		    "Maximal number of threads");
134267742Smav		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
135267742Smav		    "threads", CTLTYPE_INT | CTLFLAG_RD,
136267742Smav		    pool, 0, svcpool_threads_sysctl, "I",
137267742Smav		    "Current number of threads");
138184588Sdfr		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
139267742Smav		    "groups", CTLFLAG_RD, &pool->sp_groupcount, 0,
140267742Smav		    "Number of thread groups");
141184588Sdfr
142184588Sdfr		SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
143184588Sdfr		    "request_space_used", CTLFLAG_RD,
144184588Sdfr		    &pool->sp_space_used, 0,
145184588Sdfr		    "Space in parsed but not handled requests.");
146184588Sdfr
147184588Sdfr		SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
148184588Sdfr		    "request_space_used_highest", CTLFLAG_RD,
149184588Sdfr		    &pool->sp_space_used_highest, 0,
150184588Sdfr		    "Highest space used since reboot.");
151184588Sdfr
152184588Sdfr		SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
153184588Sdfr		    "request_space_high", CTLFLAG_RW,
154184588Sdfr		    &pool->sp_space_high, 0,
155184588Sdfr		    "Maximum space in parsed but not handled requests.");
156184588Sdfr
157184588Sdfr		SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
158184588Sdfr		    "request_space_low", CTLFLAG_RW,
159184588Sdfr		    &pool->sp_space_low, 0,
160184588Sdfr		    "Low water mark for request space.");
161184588Sdfr
162217326Smdf		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
163184588Sdfr		    "request_space_throttled", CTLFLAG_RD,
164184588Sdfr		    &pool->sp_space_throttled, 0,
165184588Sdfr		    "Whether nfs requests are currently throttled");
166184588Sdfr
167217326Smdf		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
168184588Sdfr		    "request_space_throttle_count", CTLFLAG_RD,
169184588Sdfr		    &pool->sp_space_throttle_count, 0,
170184588Sdfr		    "Count of times throttling based on request space has occurred");
171184588Sdfr	}
172184588Sdfr
173177633Sdfr	return pool;
174177633Sdfr}
175177633Sdfr
176177633Sdfrvoid
177177633Sdfrsvcpool_destroy(SVCPOOL *pool)
178177633Sdfr{
179267742Smav	SVCGROUP *grp;
180184588Sdfr	SVCXPRT *xprt, *nxprt;
181177633Sdfr	struct svc_callout *s;
182261055Smav	struct svc_loss_callout *sl;
183184588Sdfr	struct svcxprt_list cleanup;
184267742Smav	int g;
185177633Sdfr
186184588Sdfr	TAILQ_INIT(&cleanup);
187177633Sdfr
188267742Smav	for (g = 0; g < SVC_MAXGROUPS; g++) {
189267742Smav		grp = &pool->sp_groups[g];
190267742Smav		mtx_lock(&grp->sg_lock);
191267742Smav		while ((xprt = TAILQ_FIRST(&grp->sg_xlist)) != NULL) {
192267742Smav			xprt_unregister_locked(xprt);
193267742Smav			TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
194267742Smav		}
195267742Smav		mtx_unlock(&grp->sg_lock);
196177633Sdfr	}
197267742Smav	TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
198267742Smav		SVC_RELEASE(xprt);
199267742Smav	}
200177633Sdfr
201267742Smav	mtx_lock(&pool->sp_lock);
202261055Smav	while ((s = TAILQ_FIRST(&pool->sp_callouts)) != NULL) {
203177633Sdfr		mtx_unlock(&pool->sp_lock);
204177633Sdfr		svc_unreg(pool, s->sc_prog, s->sc_vers);
205177633Sdfr		mtx_lock(&pool->sp_lock);
206177633Sdfr	}
207261055Smav	while ((sl = TAILQ_FIRST(&pool->sp_lcallouts)) != NULL) {
208261055Smav		mtx_unlock(&pool->sp_lock);
209261055Smav		svc_loss_unreg(pool, sl->slc_dispatch);
210261055Smav		mtx_lock(&pool->sp_lock);
211261055Smav	}
212193603Srmacklem	mtx_unlock(&pool->sp_lock);
213177633Sdfr
214267742Smav	for (g = 0; g < SVC_MAXGROUPS; g++) {
215267742Smav		grp = &pool->sp_groups[g];
216267742Smav		mtx_destroy(&grp->sg_lock);
217184588Sdfr	}
218193436Srmacklem	mtx_destroy(&pool->sp_lock);
219193436Srmacklem
220184588Sdfr	if (pool->sp_rcache)
221184588Sdfr		replay_freecache(pool->sp_rcache);
222184588Sdfr
223184588Sdfr	sysctl_ctx_free(&pool->sp_sysctl);
224177633Sdfr	free(pool, M_RPC);
225177633Sdfr}
226177633Sdfr
227267742Smav/*
228267742Smav * Sysctl handler to get the present thread count on a pool
229267742Smav */
230267742Smavstatic int
231267742Smavsvcpool_threads_sysctl(SYSCTL_HANDLER_ARGS)
232184588Sdfr{
233267742Smav	SVCPOOL *pool;
234267742Smav	int threads, error, g;
235184588Sdfr
236267742Smav	pool = oidp->oid_arg1;
237267742Smav	threads = 0;
238267742Smav	mtx_lock(&pool->sp_lock);
239267742Smav	for (g = 0; g < pool->sp_groupcount; g++)
240267742Smav		threads += pool->sp_groups[g].sg_threadcount;
241267742Smav	mtx_unlock(&pool->sp_lock);
242267742Smav	error = sysctl_handle_int(oidp, &threads, 0, req);
243267742Smav	return (error);
244184588Sdfr}
245184588Sdfr
246177633Sdfr/*
247184588Sdfr * Sysctl handler to set the minimum thread count on a pool
248184588Sdfr */
249184588Sdfrstatic int
250184588Sdfrsvcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS)
251184588Sdfr{
252184588Sdfr	SVCPOOL *pool;
253267742Smav	int newminthreads, error, g;
254184588Sdfr
255184588Sdfr	pool = oidp->oid_arg1;
256184588Sdfr	newminthreads = pool->sp_minthreads;
257184588Sdfr	error = sysctl_handle_int(oidp, &newminthreads, 0, req);
258184588Sdfr	if (error == 0 && newminthreads != pool->sp_minthreads) {
259184588Sdfr		if (newminthreads > pool->sp_maxthreads)
260184588Sdfr			return (EINVAL);
261184588Sdfr		mtx_lock(&pool->sp_lock);
262267742Smav		pool->sp_minthreads = newminthreads;
263267742Smav		for (g = 0; g < pool->sp_groupcount; g++) {
264267742Smav			pool->sp_groups[g].sg_minthreads = max(1,
265267742Smav			    pool->sp_minthreads / pool->sp_groupcount);
266184588Sdfr		}
267184588Sdfr		mtx_unlock(&pool->sp_lock);
268184588Sdfr	}
269184588Sdfr	return (error);
270184588Sdfr}
271184588Sdfr
272184588Sdfr/*
273184588Sdfr * Sysctl handler to set the maximum thread count on a pool
274184588Sdfr */
275184588Sdfrstatic int
276184588Sdfrsvcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS)
277184588Sdfr{
278184588Sdfr	SVCPOOL *pool;
279267742Smav	int newmaxthreads, error, g;
280184588Sdfr
281184588Sdfr	pool = oidp->oid_arg1;
282184588Sdfr	newmaxthreads = pool->sp_maxthreads;
283184588Sdfr	error = sysctl_handle_int(oidp, &newmaxthreads, 0, req);
284184588Sdfr	if (error == 0 && newmaxthreads != pool->sp_maxthreads) {
285184588Sdfr		if (newmaxthreads < pool->sp_minthreads)
286184588Sdfr			return (EINVAL);
287184588Sdfr		mtx_lock(&pool->sp_lock);
288267742Smav		pool->sp_maxthreads = newmaxthreads;
289267742Smav		for (g = 0; g < pool->sp_groupcount; g++) {
290267742Smav			pool->sp_groups[g].sg_maxthreads = max(1,
291267742Smav			    pool->sp_maxthreads / pool->sp_groupcount);
292184588Sdfr		}
293184588Sdfr		mtx_unlock(&pool->sp_lock);
294184588Sdfr	}
295184588Sdfr	return (error);
296184588Sdfr}
297184588Sdfr
298184588Sdfr/*
299177633Sdfr * Activate a transport handle.
300177633Sdfr */
301177633Sdfrvoid
302177633Sdfrxprt_register(SVCXPRT *xprt)
303177633Sdfr{
304177633Sdfr	SVCPOOL *pool = xprt->xp_pool;
305267742Smav	SVCGROUP *grp;
306267742Smav	int g;
307177633Sdfr
308194407Srmacklem	SVC_ACQUIRE(xprt);
309267742Smav	g = atomic_fetchadd_int(&pool->sp_nextgroup, 1) % pool->sp_groupcount;
310267742Smav	xprt->xp_group = grp = &pool->sp_groups[g];
311267742Smav	mtx_lock(&grp->sg_lock);
312177633Sdfr	xprt->xp_registered = TRUE;
313177633Sdfr	xprt->xp_active = FALSE;
314267742Smav	TAILQ_INSERT_TAIL(&grp->sg_xlist, xprt, xp_link);
315267742Smav	mtx_unlock(&grp->sg_lock);
316177633Sdfr}
317177633Sdfr
318177633Sdfr/*
319184588Sdfr * De-activate a transport handle. Note: the locked version doesn't
320184588Sdfr * release the transport - caller must do that after dropping the pool
321184588Sdfr * lock.
322177633Sdfr */
323177633Sdfrstatic void
324184588Sdfrxprt_unregister_locked(SVCXPRT *xprt)
325177633Sdfr{
326267742Smav	SVCGROUP *grp = xprt->xp_group;
327177633Sdfr
328267742Smav	mtx_assert(&grp->sg_lock, MA_OWNED);
329193649Srmacklem	KASSERT(xprt->xp_registered == TRUE,
330193649Srmacklem	    ("xprt_unregister_locked: not registered"));
331261048Smav	xprt_inactive_locked(xprt);
332267742Smav	TAILQ_REMOVE(&grp->sg_xlist, xprt, xp_link);
333177633Sdfr	xprt->xp_registered = FALSE;
334184588Sdfr}
335177633Sdfr
336184588Sdfrvoid
337184588Sdfrxprt_unregister(SVCXPRT *xprt)
338184588Sdfr{
339267742Smav	SVCGROUP *grp = xprt->xp_group;
340184588Sdfr
341267742Smav	mtx_lock(&grp->sg_lock);
342193649Srmacklem	if (xprt->xp_registered == FALSE) {
343193649Srmacklem		/* Already unregistered by another thread */
344267742Smav		mtx_unlock(&grp->sg_lock);
345193649Srmacklem		return;
346193649Srmacklem	}
347184588Sdfr	xprt_unregister_locked(xprt);
348267742Smav	mtx_unlock(&grp->sg_lock);
349184588Sdfr
350184588Sdfr	SVC_RELEASE(xprt);
351177633Sdfr}
352177633Sdfr
353261048Smav/*
354261048Smav * Attempt to assign a service thread to this transport.
355261048Smav */
356261048Smavstatic int
357184588Sdfrxprt_assignthread(SVCXPRT *xprt)
358184588Sdfr{
359267742Smav	SVCGROUP *grp = xprt->xp_group;
360184588Sdfr	SVCTHREAD *st;
361184588Sdfr
362267742Smav	mtx_assert(&grp->sg_lock, MA_OWNED);
363267742Smav	st = LIST_FIRST(&grp->sg_idlethreads);
364184588Sdfr	if (st) {
365261048Smav		LIST_REMOVE(st, st_ilink);
366184588Sdfr		SVC_ACQUIRE(xprt);
367184588Sdfr		xprt->xp_thread = st;
368184588Sdfr		st->st_xprt = xprt;
369184588Sdfr		cv_signal(&st->st_cond);
370261048Smav		return (TRUE);
371184588Sdfr	} else {
372184588Sdfr		/*
373184588Sdfr		 * See if we can create a new thread. The
374184588Sdfr		 * actual thread creation happens in
375184588Sdfr		 * svc_run_internal because our locking state
376184588Sdfr		 * is poorly defined (we are typically called
377184588Sdfr		 * from a socket upcall). Don't create more
378184588Sdfr		 * than one thread per second.
379184588Sdfr		 */
380267742Smav		if (grp->sg_state == SVCPOOL_ACTIVE
381267742Smav		    && grp->sg_lastcreatetime < time_uptime
382267742Smav		    && grp->sg_threadcount < grp->sg_maxthreads) {
383267742Smav			grp->sg_state = SVCPOOL_THREADWANTED;
384184588Sdfr		}
385184588Sdfr	}
386261048Smav	return (FALSE);
387184588Sdfr}
388184588Sdfr
389177633Sdfrvoid
390177633Sdfrxprt_active(SVCXPRT *xprt)
391177633Sdfr{
392267742Smav	SVCGROUP *grp = xprt->xp_group;
393177633Sdfr
394267742Smav	mtx_lock(&grp->sg_lock);
395193436Srmacklem
396184588Sdfr	if (!xprt->xp_registered) {
397184588Sdfr		/*
398184588Sdfr		 * Race with xprt_unregister - we lose.
399184588Sdfr		 */
400267742Smav		mtx_unlock(&grp->sg_lock);
401184588Sdfr		return;
402184588Sdfr	}
403184588Sdfr
404177633Sdfr	if (!xprt->xp_active) {
405177633Sdfr		xprt->xp_active = TRUE;
406261048Smav		if (xprt->xp_thread == NULL) {
407267742Smav			if (!svc_request_space_available(xprt->xp_pool) ||
408261054Smav			    !xprt_assignthread(xprt))
409267742Smav				TAILQ_INSERT_TAIL(&grp->sg_active, xprt,
410261048Smav				    xp_alink);
411261048Smav		}
412177633Sdfr	}
413177633Sdfr
414267742Smav	mtx_unlock(&grp->sg_lock);
415177633Sdfr}
416177633Sdfr
417177633Sdfrvoid
418184588Sdfrxprt_inactive_locked(SVCXPRT *xprt)
419177633Sdfr{
420267742Smav	SVCGROUP *grp = xprt->xp_group;
421177633Sdfr
422267742Smav	mtx_assert(&grp->sg_lock, MA_OWNED);
423177633Sdfr	if (xprt->xp_active) {
424261048Smav		if (xprt->xp_thread == NULL)
425267742Smav			TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
426177633Sdfr		xprt->xp_active = FALSE;
427177633Sdfr	}
428184588Sdfr}
429177633Sdfr
430184588Sdfrvoid
431184588Sdfrxprt_inactive(SVCXPRT *xprt)
432184588Sdfr{
433267742Smav	SVCGROUP *grp = xprt->xp_group;
434184588Sdfr
435267742Smav	mtx_lock(&grp->sg_lock);
436184588Sdfr	xprt_inactive_locked(xprt);
437267742Smav	mtx_unlock(&grp->sg_lock);
438177633Sdfr}
439177633Sdfr
440177633Sdfr/*
441261053Smav * Variant of xprt_inactive() for use only when sure that port is
442261053Smav * assigned to thread. For example, withing receive handlers.
443261053Smav */
444261053Smavvoid
445261053Smavxprt_inactive_self(SVCXPRT *xprt)
446261053Smav{
447261053Smav
448261053Smav	KASSERT(xprt->xp_thread != NULL,
449261053Smav	    ("xprt_inactive_self(%p) with NULL xp_thread", xprt));
450261053Smav	xprt->xp_active = FALSE;
451261053Smav}
452261053Smav
453261053Smav/*
454177633Sdfr * Add a service program to the callout list.
455177633Sdfr * The dispatch routine will be called when a rpc request for this
456177633Sdfr * program number comes in.
457177633Sdfr */
458177633Sdfrbool_t
459177633Sdfrsvc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
460177633Sdfr    void (*dispatch)(struct svc_req *, SVCXPRT *),
461177633Sdfr    const struct netconfig *nconf)
462177633Sdfr{
463177633Sdfr	SVCPOOL *pool = xprt->xp_pool;
464177633Sdfr	struct svc_callout *s;
465177633Sdfr	char *netid = NULL;
466177633Sdfr	int flag = 0;
467177633Sdfr
468177633Sdfr/* VARIABLES PROTECTED BY svc_lock: s, svc_head */
469177633Sdfr
470177633Sdfr	if (xprt->xp_netid) {
471177633Sdfr		netid = strdup(xprt->xp_netid, M_RPC);
472177633Sdfr		flag = 1;
473177633Sdfr	} else if (nconf && nconf->nc_netid) {
474177633Sdfr		netid = strdup(nconf->nc_netid, M_RPC);
475177633Sdfr		flag = 1;
476177633Sdfr	} /* must have been created with svc_raw_create */
477177633Sdfr	if ((netid == NULL) && (flag == 1)) {
478177633Sdfr		return (FALSE);
479177633Sdfr	}
480177633Sdfr
481177633Sdfr	mtx_lock(&pool->sp_lock);
482177633Sdfr	if ((s = svc_find(pool, prog, vers, netid)) != NULL) {
483177633Sdfr		if (netid)
484177633Sdfr			free(netid, M_RPC);
485177633Sdfr		if (s->sc_dispatch == dispatch)
486177633Sdfr			goto rpcb_it; /* he is registering another xptr */
487177633Sdfr		mtx_unlock(&pool->sp_lock);
488177633Sdfr		return (FALSE);
489177633Sdfr	}
490177633Sdfr	s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
491177633Sdfr	if (s == NULL) {
492177633Sdfr		if (netid)
493177633Sdfr			free(netid, M_RPC);
494177633Sdfr		mtx_unlock(&pool->sp_lock);
495177633Sdfr		return (FALSE);
496177633Sdfr	}
497177633Sdfr
498177633Sdfr	s->sc_prog = prog;
499177633Sdfr	s->sc_vers = vers;
500177633Sdfr	s->sc_dispatch = dispatch;
501177633Sdfr	s->sc_netid = netid;
502177633Sdfr	TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link);
503177633Sdfr
504177633Sdfr	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
505177633Sdfr		((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC);
506177633Sdfr
507177633Sdfrrpcb_it:
508177633Sdfr	mtx_unlock(&pool->sp_lock);
509177633Sdfr	/* now register the information with the local binder service */
510177633Sdfr	if (nconf) {
511177633Sdfr		bool_t dummy;
512177633Sdfr		struct netconfig tnc;
513184588Sdfr		struct netbuf nb;
514177633Sdfr		tnc = *nconf;
515184588Sdfr		nb.buf = &xprt->xp_ltaddr;
516184588Sdfr		nb.len = xprt->xp_ltaddr.ss_len;
517184588Sdfr		dummy = rpcb_set(prog, vers, &tnc, &nb);
518177633Sdfr		return (dummy);
519177633Sdfr	}
520177633Sdfr	return (TRUE);
521177633Sdfr}
522177633Sdfr
523177633Sdfr/*
524177633Sdfr * Remove a service program from the callout list.
525177633Sdfr */
526177633Sdfrvoid
527177633Sdfrsvc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers)
528177633Sdfr{
529177633Sdfr	struct svc_callout *s;
530177633Sdfr
531177633Sdfr	/* unregister the information anyway */
532177633Sdfr	(void) rpcb_unset(prog, vers, NULL);
533177633Sdfr	mtx_lock(&pool->sp_lock);
534177633Sdfr	while ((s = svc_find(pool, prog, vers, NULL)) != NULL) {
535177633Sdfr		TAILQ_REMOVE(&pool->sp_callouts, s, sc_link);
536177633Sdfr		if (s->sc_netid)
537177633Sdfr			mem_free(s->sc_netid, sizeof (s->sc_netid) + 1);
538177633Sdfr		mem_free(s, sizeof (struct svc_callout));
539177633Sdfr	}
540177633Sdfr	mtx_unlock(&pool->sp_lock);
541177633Sdfr}
542177633Sdfr
543261055Smav/*
544261055Smav * Add a service connection loss program to the callout list.
545261055Smav * The dispatch routine will be called when some port in ths pool die.
546261055Smav */
547261055Smavbool_t
548261055Smavsvc_loss_reg(SVCXPRT *xprt, void (*dispatch)(SVCXPRT *))
549261055Smav{
550261055Smav	SVCPOOL *pool = xprt->xp_pool;
551261055Smav	struct svc_loss_callout *s;
552261055Smav
553261055Smav	mtx_lock(&pool->sp_lock);
554261055Smav	TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
555261055Smav		if (s->slc_dispatch == dispatch)
556261055Smav			break;
557261055Smav	}
558261055Smav	if (s != NULL) {
559261055Smav		mtx_unlock(&pool->sp_lock);
560261055Smav		return (TRUE);
561261055Smav	}
562261055Smav	s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
563261055Smav	if (s == NULL) {
564261055Smav		mtx_unlock(&pool->sp_lock);
565261055Smav		return (FALSE);
566261055Smav	}
567261055Smav	s->slc_dispatch = dispatch;
568261055Smav	TAILQ_INSERT_TAIL(&pool->sp_lcallouts, s, slc_link);
569261055Smav	mtx_unlock(&pool->sp_lock);
570261055Smav	return (TRUE);
571261055Smav}
572261055Smav
573261055Smav/*
574261055Smav * Remove a service connection loss program from the callout list.
575261055Smav */
576261055Smavvoid
577261055Smavsvc_loss_unreg(SVCPOOL *pool, void (*dispatch)(SVCXPRT *))
578261055Smav{
579261055Smav	struct svc_loss_callout *s;
580261055Smav
581261055Smav	mtx_lock(&pool->sp_lock);
582261055Smav	TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
583261055Smav		if (s->slc_dispatch == dispatch) {
584261055Smav			TAILQ_REMOVE(&pool->sp_lcallouts, s, slc_link);
585261055Smav			free(s, M_RPC);
586261055Smav			break;
587261055Smav		}
588261055Smav	}
589261055Smav	mtx_unlock(&pool->sp_lock);
590261055Smav}
591261055Smav
592177633Sdfr/* ********************** CALLOUT list related stuff ************* */
593177633Sdfr
594177633Sdfr/*
595177633Sdfr * Search the callout list for a program number, return the callout
596177633Sdfr * struct.
597177633Sdfr */
598177633Sdfrstatic struct svc_callout *
599177633Sdfrsvc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid)
600177633Sdfr{
601177633Sdfr	struct svc_callout *s;
602177633Sdfr
603177633Sdfr	mtx_assert(&pool->sp_lock, MA_OWNED);
604177633Sdfr	TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
605177633Sdfr		if (s->sc_prog == prog && s->sc_vers == vers
606177633Sdfr		    && (netid == NULL || s->sc_netid == NULL ||
607177633Sdfr			strcmp(netid, s->sc_netid) == 0))
608177633Sdfr			break;
609177633Sdfr	}
610177633Sdfr
611177633Sdfr	return (s);
612177633Sdfr}
613177633Sdfr
614177633Sdfr/* ******************* REPLY GENERATION ROUTINES  ************ */
615177633Sdfr
616184588Sdfrstatic bool_t
617184588Sdfrsvc_sendreply_common(struct svc_req *rqstp, struct rpc_msg *rply,
618184588Sdfr    struct mbuf *body)
619184588Sdfr{
620184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
621184588Sdfr	bool_t ok;
622184588Sdfr
623184588Sdfr	if (rqstp->rq_args) {
624184588Sdfr		m_freem(rqstp->rq_args);
625184588Sdfr		rqstp->rq_args = NULL;
626184588Sdfr	}
627184588Sdfr
628184588Sdfr	if (xprt->xp_pool->sp_rcache)
629184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
630184588Sdfr		    rply, svc_getrpccaller(rqstp), body);
631184588Sdfr
632184588Sdfr	if (!SVCAUTH_WRAP(&rqstp->rq_auth, &body))
633184588Sdfr		return (FALSE);
634184588Sdfr
635261055Smav	ok = SVC_REPLY(xprt, rply, rqstp->rq_addr, body, &rqstp->rq_reply_seq);
636184588Sdfr	if (rqstp->rq_addr) {
637184588Sdfr		free(rqstp->rq_addr, M_SONAME);
638184588Sdfr		rqstp->rq_addr = NULL;
639184588Sdfr	}
640184588Sdfr
641184588Sdfr	return (ok);
642184588Sdfr}
643184588Sdfr
644177633Sdfr/*
645177633Sdfr * Send a reply to an rpc request
646177633Sdfr */
647177633Sdfrbool_t
648184588Sdfrsvc_sendreply(struct svc_req *rqstp, xdrproc_t xdr_results, void * xdr_location)
649177633Sdfr{
650177633Sdfr	struct rpc_msg rply;
651184588Sdfr	struct mbuf *m;
652184588Sdfr	XDR xdrs;
653184588Sdfr	bool_t ok;
654177633Sdfr
655184588Sdfr	rply.rm_xid = rqstp->rq_xid;
656177633Sdfr	rply.rm_direction = REPLY;
657177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
658184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
659177633Sdfr	rply.acpted_rply.ar_stat = SUCCESS;
660184588Sdfr	rply.acpted_rply.ar_results.where = NULL;
661184588Sdfr	rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
662177633Sdfr
663248195Sglebius	m = m_getcl(M_WAITOK, MT_DATA, 0);
664184588Sdfr	xdrmbuf_create(&xdrs, m, XDR_ENCODE);
665184588Sdfr	ok = xdr_results(&xdrs, xdr_location);
666184588Sdfr	XDR_DESTROY(&xdrs);
667184588Sdfr
668184588Sdfr	if (ok) {
669184588Sdfr		return (svc_sendreply_common(rqstp, &rply, m));
670184588Sdfr	} else {
671184588Sdfr		m_freem(m);
672184588Sdfr		return (FALSE);
673184588Sdfr	}
674177633Sdfr}
675177633Sdfr
676184588Sdfrbool_t
677184588Sdfrsvc_sendreply_mbuf(struct svc_req *rqstp, struct mbuf *m)
678184588Sdfr{
679184588Sdfr	struct rpc_msg rply;
680184588Sdfr
681184588Sdfr	rply.rm_xid = rqstp->rq_xid;
682184588Sdfr	rply.rm_direction = REPLY;
683184588Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
684184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
685184588Sdfr	rply.acpted_rply.ar_stat = SUCCESS;
686184588Sdfr	rply.acpted_rply.ar_results.where = NULL;
687184588Sdfr	rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
688184588Sdfr
689184588Sdfr	return (svc_sendreply_common(rqstp, &rply, m));
690184588Sdfr}
691184588Sdfr
692177633Sdfr/*
693177633Sdfr * No procedure error reply
694177633Sdfr */
695177633Sdfrvoid
696184588Sdfrsvcerr_noproc(struct svc_req *rqstp)
697177633Sdfr{
698184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
699177633Sdfr	struct rpc_msg rply;
700177633Sdfr
701184588Sdfr	rply.rm_xid = rqstp->rq_xid;
702177633Sdfr	rply.rm_direction = REPLY;
703177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
704184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
705177633Sdfr	rply.acpted_rply.ar_stat = PROC_UNAVAIL;
706177633Sdfr
707184588Sdfr	if (xprt->xp_pool->sp_rcache)
708184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
709184588Sdfr		    &rply, svc_getrpccaller(rqstp), NULL);
710184588Sdfr
711184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
712177633Sdfr}
713177633Sdfr
714177633Sdfr/*
715177633Sdfr * Can't decode args error reply
716177633Sdfr */
717177633Sdfrvoid
718184588Sdfrsvcerr_decode(struct svc_req *rqstp)
719177633Sdfr{
720184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
721177633Sdfr	struct rpc_msg rply;
722177633Sdfr
723184588Sdfr	rply.rm_xid = rqstp->rq_xid;
724177633Sdfr	rply.rm_direction = REPLY;
725177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
726184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
727177633Sdfr	rply.acpted_rply.ar_stat = GARBAGE_ARGS;
728177633Sdfr
729184588Sdfr	if (xprt->xp_pool->sp_rcache)
730184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
731184588Sdfr		    &rply, (struct sockaddr *) &xprt->xp_rtaddr, NULL);
732184588Sdfr
733184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
734177633Sdfr}
735177633Sdfr
736177633Sdfr/*
737177633Sdfr * Some system error
738177633Sdfr */
739177633Sdfrvoid
740184588Sdfrsvcerr_systemerr(struct svc_req *rqstp)
741177633Sdfr{
742184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
743177633Sdfr	struct rpc_msg rply;
744177633Sdfr
745184588Sdfr	rply.rm_xid = rqstp->rq_xid;
746177633Sdfr	rply.rm_direction = REPLY;
747177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
748184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
749177633Sdfr	rply.acpted_rply.ar_stat = SYSTEM_ERR;
750177633Sdfr
751184588Sdfr	if (xprt->xp_pool->sp_rcache)
752184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
753184588Sdfr		    &rply, svc_getrpccaller(rqstp), NULL);
754184588Sdfr
755184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
756177633Sdfr}
757177633Sdfr
758177633Sdfr/*
759177633Sdfr * Authentication error reply
760177633Sdfr */
761177633Sdfrvoid
762184588Sdfrsvcerr_auth(struct svc_req *rqstp, enum auth_stat why)
763177633Sdfr{
764184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
765177633Sdfr	struct rpc_msg rply;
766177633Sdfr
767184588Sdfr	rply.rm_xid = rqstp->rq_xid;
768177633Sdfr	rply.rm_direction = REPLY;
769177633Sdfr	rply.rm_reply.rp_stat = MSG_DENIED;
770177633Sdfr	rply.rjcted_rply.rj_stat = AUTH_ERROR;
771177633Sdfr	rply.rjcted_rply.rj_why = why;
772177633Sdfr
773184588Sdfr	if (xprt->xp_pool->sp_rcache)
774184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
775184588Sdfr		    &rply, svc_getrpccaller(rqstp), NULL);
776184588Sdfr
777184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
778177633Sdfr}
779177633Sdfr
780177633Sdfr/*
781177633Sdfr * Auth too weak error reply
782177633Sdfr */
783177633Sdfrvoid
784184588Sdfrsvcerr_weakauth(struct svc_req *rqstp)
785177633Sdfr{
786177633Sdfr
787184588Sdfr	svcerr_auth(rqstp, AUTH_TOOWEAK);
788177633Sdfr}
789177633Sdfr
790177633Sdfr/*
791177633Sdfr * Program unavailable error reply
792177633Sdfr */
793177633Sdfrvoid
794184588Sdfrsvcerr_noprog(struct svc_req *rqstp)
795177633Sdfr{
796184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
797177633Sdfr	struct rpc_msg rply;
798177633Sdfr
799184588Sdfr	rply.rm_xid = rqstp->rq_xid;
800177633Sdfr	rply.rm_direction = REPLY;
801177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
802184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
803177633Sdfr	rply.acpted_rply.ar_stat = PROG_UNAVAIL;
804177633Sdfr
805184588Sdfr	if (xprt->xp_pool->sp_rcache)
806184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
807184588Sdfr		    &rply, svc_getrpccaller(rqstp), NULL);
808184588Sdfr
809184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
810177633Sdfr}
811177633Sdfr
812177633Sdfr/*
813177633Sdfr * Program version mismatch error reply
814177633Sdfr */
815177633Sdfrvoid
816184588Sdfrsvcerr_progvers(struct svc_req *rqstp, rpcvers_t low_vers, rpcvers_t high_vers)
817177633Sdfr{
818184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
819177633Sdfr	struct rpc_msg rply;
820177633Sdfr
821184588Sdfr	rply.rm_xid = rqstp->rq_xid;
822177633Sdfr	rply.rm_direction = REPLY;
823177633Sdfr	rply.rm_reply.rp_stat = MSG_ACCEPTED;
824184588Sdfr	rply.acpted_rply.ar_verf = rqstp->rq_verf;
825177633Sdfr	rply.acpted_rply.ar_stat = PROG_MISMATCH;
826177633Sdfr	rply.acpted_rply.ar_vers.low = (uint32_t)low_vers;
827177633Sdfr	rply.acpted_rply.ar_vers.high = (uint32_t)high_vers;
828177633Sdfr
829184588Sdfr	if (xprt->xp_pool->sp_rcache)
830184588Sdfr		replay_setreply(xprt->xp_pool->sp_rcache,
831184588Sdfr		    &rply, svc_getrpccaller(rqstp), NULL);
832184588Sdfr
833184588Sdfr	svc_sendreply_common(rqstp, &rply, NULL);
834177633Sdfr}
835177633Sdfr
836184588Sdfr/*
837184588Sdfr * Allocate a new server transport structure. All fields are
838184588Sdfr * initialized to zero and xp_p3 is initialized to point at an
839184588Sdfr * extension structure to hold various flags and authentication
840184588Sdfr * parameters.
841184588Sdfr */
842184588SdfrSVCXPRT *
843184588Sdfrsvc_xprt_alloc()
844184588Sdfr{
845184588Sdfr	SVCXPRT *xprt;
846184588Sdfr	SVCXPRT_EXT *ext;
847184588Sdfr
848184588Sdfr	xprt = mem_alloc(sizeof(SVCXPRT));
849184588Sdfr	memset(xprt, 0, sizeof(SVCXPRT));
850184588Sdfr	ext = mem_alloc(sizeof(SVCXPRT_EXT));
851184588Sdfr	memset(ext, 0, sizeof(SVCXPRT_EXT));
852184588Sdfr	xprt->xp_p3 = ext;
853184588Sdfr	refcount_init(&xprt->xp_refs, 1);
854184588Sdfr
855184588Sdfr	return (xprt);
856184588Sdfr}
857184588Sdfr
858184588Sdfr/*
859184588Sdfr * Free a server transport structure.
860184588Sdfr */
861184588Sdfrvoid
862184588Sdfrsvc_xprt_free(xprt)
863184588Sdfr	SVCXPRT *xprt;
864184588Sdfr{
865184588Sdfr
866184588Sdfr	mem_free(xprt->xp_p3, sizeof(SVCXPRT_EXT));
867184588Sdfr	mem_free(xprt, sizeof(SVCXPRT));
868184588Sdfr}
869184588Sdfr
870177633Sdfr/* ******************* SERVER INPUT STUFF ******************* */
871177633Sdfr
872177633Sdfr/*
873184588Sdfr * Read RPC requests from a transport and queue them to be
874184588Sdfr * executed. We handle authentication and replay cache replies here.
875184588Sdfr * Actually dispatching the RPC is deferred till svc_executereq.
876177633Sdfr */
877184588Sdfrstatic enum xprt_stat
878184588Sdfrsvc_getreq(SVCXPRT *xprt, struct svc_req **rqstp_ret)
879177633Sdfr{
880177633Sdfr	SVCPOOL *pool = xprt->xp_pool;
881184588Sdfr	struct svc_req *r;
882177633Sdfr	struct rpc_msg msg;
883184588Sdfr	struct mbuf *args;
884261055Smav	struct svc_loss_callout *s;
885177633Sdfr	enum xprt_stat stat;
886177633Sdfr
887177633Sdfr	/* now receive msgs from xprtprt (support batch calls) */
888184588Sdfr	r = malloc(sizeof(*r), M_RPC, M_WAITOK|M_ZERO);
889177633Sdfr
890184588Sdfr	msg.rm_call.cb_cred.oa_base = r->rq_credarea;
891184588Sdfr	msg.rm_call.cb_verf.oa_base = &r->rq_credarea[MAX_AUTH_BYTES];
892184588Sdfr	r->rq_clntcred = &r->rq_credarea[2*MAX_AUTH_BYTES];
893184588Sdfr	if (SVC_RECV(xprt, &msg, &r->rq_addr, &args)) {
894184588Sdfr		enum auth_stat why;
895177633Sdfr
896184588Sdfr		/*
897184588Sdfr		 * Handle replays and authenticate before queuing the
898184588Sdfr		 * request to be executed.
899184588Sdfr		 */
900184588Sdfr		SVC_ACQUIRE(xprt);
901184588Sdfr		r->rq_xprt = xprt;
902184588Sdfr		if (pool->sp_rcache) {
903184588Sdfr			struct rpc_msg repmsg;
904184588Sdfr			struct mbuf *repbody;
905184588Sdfr			enum replay_state rs;
906184588Sdfr			rs = replay_find(pool->sp_rcache, &msg,
907184588Sdfr			    svc_getrpccaller(r), &repmsg, &repbody);
908184588Sdfr			switch (rs) {
909184588Sdfr			case RS_NEW:
910184588Sdfr				break;
911184588Sdfr			case RS_DONE:
912184588Sdfr				SVC_REPLY(xprt, &repmsg, r->rq_addr,
913261055Smav				    repbody, &r->rq_reply_seq);
914184588Sdfr				if (r->rq_addr) {
915184588Sdfr					free(r->rq_addr, M_SONAME);
916184588Sdfr					r->rq_addr = NULL;
917184588Sdfr				}
918205562Srmacklem				m_freem(args);
919177633Sdfr				goto call_done;
920184588Sdfr
921184588Sdfr			default:
922205562Srmacklem				m_freem(args);
923184588Sdfr				goto call_done;
924177633Sdfr			}
925184588Sdfr		}
926184588Sdfr
927184588Sdfr		r->rq_xid = msg.rm_xid;
928184588Sdfr		r->rq_prog = msg.rm_call.cb_prog;
929184588Sdfr		r->rq_vers = msg.rm_call.cb_vers;
930184588Sdfr		r->rq_proc = msg.rm_call.cb_proc;
931184588Sdfr		r->rq_size = sizeof(*r) + m_length(args, NULL);
932184588Sdfr		r->rq_args = args;
933184588Sdfr		if ((why = _authenticate(r, &msg)) != AUTH_OK) {
934177633Sdfr			/*
935184588Sdfr			 * RPCSEC_GSS uses this return code
936184588Sdfr			 * for requests that form part of its
937184588Sdfr			 * context establishment protocol and
938184588Sdfr			 * should not be dispatched to the
939184588Sdfr			 * application.
940177633Sdfr			 */
941184588Sdfr			if (why != RPCSEC_GSS_NODISPATCH)
942184588Sdfr				svcerr_auth(r, why);
943184588Sdfr			goto call_done;
944177633Sdfr		}
945184588Sdfr
946184588Sdfr		if (!SVCAUTH_UNWRAP(&r->rq_auth, &r->rq_args)) {
947184588Sdfr			svcerr_decode(r);
948184588Sdfr			goto call_done;
949184588Sdfr		}
950184588Sdfr
951177633Sdfr		/*
952184588Sdfr		 * Everything checks out, return request to caller.
953177633Sdfr		 */
954184588Sdfr		*rqstp_ret = r;
955184588Sdfr		r = NULL;
956184588Sdfr	}
957177633Sdfrcall_done:
958184588Sdfr	if (r) {
959184588Sdfr		svc_freereq(r);
960184588Sdfr		r = NULL;
961184588Sdfr	}
962184588Sdfr	if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
963261055Smav		TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link)
964261055Smav			(*s->slc_dispatch)(xprt);
965184588Sdfr		xprt_unregister(xprt);
966184588Sdfr	}
967184588Sdfr
968184588Sdfr	return (stat);
969184588Sdfr}
970184588Sdfr
971184588Sdfrstatic void
972184588Sdfrsvc_executereq(struct svc_req *rqstp)
973184588Sdfr{
974184588Sdfr	SVCXPRT *xprt = rqstp->rq_xprt;
975184588Sdfr	SVCPOOL *pool = xprt->xp_pool;
976184588Sdfr	int prog_found;
977184588Sdfr	rpcvers_t low_vers;
978184588Sdfr	rpcvers_t high_vers;
979184588Sdfr	struct svc_callout *s;
980184588Sdfr
981184588Sdfr	/* now match message with a registered service*/
982184588Sdfr	prog_found = FALSE;
983184588Sdfr	low_vers = (rpcvers_t) -1L;
984184588Sdfr	high_vers = (rpcvers_t) 0L;
985184588Sdfr	TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
986184588Sdfr		if (s->sc_prog == rqstp->rq_prog) {
987184588Sdfr			if (s->sc_vers == rqstp->rq_vers) {
988184588Sdfr				/*
989184588Sdfr				 * We hand ownership of r to the
990184588Sdfr				 * dispatch method - they must call
991184588Sdfr				 * svc_freereq.
992184588Sdfr				 */
993184588Sdfr				(*s->sc_dispatch)(rqstp, xprt);
994184588Sdfr				return;
995184588Sdfr			}  /* found correct version */
996184588Sdfr			prog_found = TRUE;
997184588Sdfr			if (s->sc_vers < low_vers)
998184588Sdfr				low_vers = s->sc_vers;
999184588Sdfr			if (s->sc_vers > high_vers)
1000184588Sdfr				high_vers = s->sc_vers;
1001184588Sdfr		}   /* found correct program */
1002184588Sdfr	}
1003184588Sdfr
1004184588Sdfr	/*
1005184588Sdfr	 * if we got here, the program or version
1006184588Sdfr	 * is not served ...
1007184588Sdfr	 */
1008184588Sdfr	if (prog_found)
1009184588Sdfr		svcerr_progvers(rqstp, low_vers, high_vers);
1010184588Sdfr	else
1011184588Sdfr		svcerr_noprog(rqstp);
1012184588Sdfr
1013184588Sdfr	svc_freereq(rqstp);
1014184588Sdfr}
1015184588Sdfr
1016184588Sdfrstatic void
1017267742Smavsvc_checkidle(SVCGROUP *grp)
1018184588Sdfr{
1019184588Sdfr	SVCXPRT *xprt, *nxprt;
1020184588Sdfr	time_t timo;
1021184588Sdfr	struct svcxprt_list cleanup;
1022184588Sdfr
1023184588Sdfr	TAILQ_INIT(&cleanup);
1024267742Smav	TAILQ_FOREACH_SAFE(xprt, &grp->sg_xlist, xp_link, nxprt) {
1025184588Sdfr		/*
1026184588Sdfr		 * Only some transports have idle timers. Don't time
1027184588Sdfr		 * something out which is just waking up.
1028184588Sdfr		 */
1029184588Sdfr		if (!xprt->xp_idletimeout || xprt->xp_thread)
1030184588Sdfr			continue;
1031184588Sdfr
1032184588Sdfr		timo = xprt->xp_lastactive + xprt->xp_idletimeout;
1033184588Sdfr		if (time_uptime > timo) {
1034184588Sdfr			xprt_unregister_locked(xprt);
1035184588Sdfr			TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
1036177633Sdfr		}
1037184588Sdfr	}
1038184588Sdfr
1039267742Smav	mtx_unlock(&grp->sg_lock);
1040184588Sdfr	TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
1041184588Sdfr		SVC_RELEASE(xprt);
1042184588Sdfr	}
1043267742Smav	mtx_lock(&grp->sg_lock);
1044177633Sdfr}
1045177633Sdfr
1046184588Sdfrstatic void
1047184588Sdfrsvc_assign_waiting_sockets(SVCPOOL *pool)
1048177633Sdfr{
1049267742Smav	SVCGROUP *grp;
1050177633Sdfr	SVCXPRT *xprt;
1051267742Smav	int g;
1052184588Sdfr
1053267742Smav	for (g = 0; g < pool->sp_groupcount; g++) {
1054267742Smav		grp = &pool->sp_groups[g];
1055267742Smav		mtx_lock(&grp->sg_lock);
1056267742Smav		while ((xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) {
1057267742Smav			if (xprt_assignthread(xprt))
1058267742Smav				TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
1059267742Smav			else
1060267742Smav				break;
1061267742Smav		}
1062267742Smav		mtx_unlock(&grp->sg_lock);
1063184588Sdfr	}
1064184588Sdfr}
1065184588Sdfr
1066261054Smavstatic void
1067261054Smavsvc_change_space_used(SVCPOOL *pool, int delta)
1068184588Sdfr{
1069261054Smav	unsigned int value;
1070184588Sdfr
1071261054Smav	value = atomic_fetchadd_int(&pool->sp_space_used, delta) + delta;
1072261054Smav	if (delta > 0) {
1073261054Smav		if (value >= pool->sp_space_high && !pool->sp_space_throttled) {
1074261054Smav			pool->sp_space_throttled = TRUE;
1075261054Smav			pool->sp_space_throttle_count++;
1076261054Smav		}
1077261054Smav		if (value > pool->sp_space_used_highest)
1078261054Smav			pool->sp_space_used_highest = value;
1079261054Smav	} else {
1080261054Smav		if (value < pool->sp_space_low && pool->sp_space_throttled) {
1081184588Sdfr			pool->sp_space_throttled = FALSE;
1082184588Sdfr			svc_assign_waiting_sockets(pool);
1083184588Sdfr		}
1084184588Sdfr	}
1085184588Sdfr}
1086184588Sdfr
1087261054Smavstatic bool_t
1088261054Smavsvc_request_space_available(SVCPOOL *pool)
1089261054Smav{
1090261054Smav
1091261054Smav	if (pool->sp_space_throttled)
1092261054Smav		return (FALSE);
1093261054Smav	return (TRUE);
1094261054Smav}
1095261054Smav
1096184588Sdfrstatic void
1097267742Smavsvc_run_internal(SVCGROUP *grp, bool_t ismaster)
1098184588Sdfr{
1099267742Smav	SVCPOOL *pool = grp->sg_pool;
1100184588Sdfr	SVCTHREAD *st, *stpref;
1101184588Sdfr	SVCXPRT *xprt;
1102184588Sdfr	enum xprt_stat stat;
1103184588Sdfr	struct svc_req *rqstp;
1104261054Smav	size_t sz;
1105177633Sdfr	int error;
1106177633Sdfr
1107184588Sdfr	st = mem_alloc(sizeof(*st));
1108267740Smav	mtx_init(&st->st_lock, "st_lock", NULL, MTX_DEF);
1109261054Smav	st->st_pool = pool;
1110184588Sdfr	st->st_xprt = NULL;
1111184588Sdfr	STAILQ_INIT(&st->st_reqs);
1112184588Sdfr	cv_init(&st->st_cond, "rpcsvc");
1113184588Sdfr
1114267742Smav	mtx_lock(&grp->sg_lock);
1115177633Sdfr
1116184588Sdfr	/*
1117184588Sdfr	 * If we are a new thread which was spawned to cope with
1118184588Sdfr	 * increased load, set the state back to SVCPOOL_ACTIVE.
1119184588Sdfr	 */
1120267742Smav	if (grp->sg_state == SVCPOOL_THREADSTARTING)
1121267742Smav		grp->sg_state = SVCPOOL_ACTIVE;
1122177633Sdfr
1123267742Smav	while (grp->sg_state != SVCPOOL_CLOSING) {
1124184588Sdfr		/*
1125261045Smav		 * Create new thread if requested.
1126261045Smav		 */
1127267742Smav		if (grp->sg_state == SVCPOOL_THREADWANTED) {
1128267742Smav			grp->sg_state = SVCPOOL_THREADSTARTING;
1129267742Smav			grp->sg_lastcreatetime = time_uptime;
1130267742Smav			mtx_unlock(&grp->sg_lock);
1131267742Smav			svc_new_thread(grp);
1132267742Smav			mtx_lock(&grp->sg_lock);
1133261045Smav			continue;
1134261045Smav		}
1135261045Smav
1136261045Smav		/*
1137184588Sdfr		 * Check for idle transports once per second.
1138184588Sdfr		 */
1139267742Smav		if (time_uptime > grp->sg_lastidlecheck) {
1140267742Smav			grp->sg_lastidlecheck = time_uptime;
1141267742Smav			svc_checkidle(grp);
1142184588Sdfr		}
1143184588Sdfr
1144184588Sdfr		xprt = st->st_xprt;
1145267740Smav		if (!xprt) {
1146184588Sdfr			/*
1147184588Sdfr			 * Enforce maxthreads count.
1148184588Sdfr			 */
1149267742Smav			if (grp->sg_threadcount > grp->sg_maxthreads)
1150177633Sdfr				break;
1151184588Sdfr
1152184588Sdfr			/*
1153184588Sdfr			 * Before sleeping, see if we can find an
1154184588Sdfr			 * active transport which isn't being serviced
1155184588Sdfr			 * by a thread.
1156184588Sdfr			 */
1157261048Smav			if (svc_request_space_available(pool) &&
1158267742Smav			    (xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) {
1159267742Smav				TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
1160261048Smav				SVC_ACQUIRE(xprt);
1161261048Smav				xprt->xp_thread = st;
1162261048Smav				st->st_xprt = xprt;
1163261048Smav				continue;
1164184588Sdfr			}
1165184588Sdfr
1166267742Smav			LIST_INSERT_HEAD(&grp->sg_idlethreads, st, st_ilink);
1167261045Smav			if (ismaster || (!ismaster &&
1168267742Smav			    grp->sg_threadcount > grp->sg_minthreads))
1169261045Smav				error = cv_timedwait_sig(&st->st_cond,
1170267742Smav				    &grp->sg_lock, 5 * hz);
1171261045Smav			else
1172261045Smav				error = cv_wait_sig(&st->st_cond,
1173267742Smav				    &grp->sg_lock);
1174267741Smav			if (st->st_xprt == NULL)
1175261048Smav				LIST_REMOVE(st, st_ilink);
1176184588Sdfr
1177184588Sdfr			/*
1178184588Sdfr			 * Reduce worker thread count when idle.
1179184588Sdfr			 */
1180184588Sdfr			if (error == EWOULDBLOCK) {
1181184588Sdfr				if (!ismaster
1182267742Smav				    && (grp->sg_threadcount
1183267742Smav					> grp->sg_minthreads)
1184267740Smav					&& !st->st_xprt)
1185184588Sdfr					break;
1186261045Smav			} else if (error) {
1187267742Smav				mtx_unlock(&grp->sg_lock);
1188261045Smav				svc_exit(pool);
1189267742Smav				mtx_lock(&grp->sg_lock);
1190261045Smav				break;
1191184588Sdfr			}
1192177633Sdfr			continue;
1193177633Sdfr		}
1194267742Smav		mtx_unlock(&grp->sg_lock);
1195177633Sdfr
1196267740Smav		/*
1197267740Smav		 * Drain the transport socket and queue up any RPCs.
1198267740Smav		 */
1199267740Smav		xprt->xp_lastactive = time_uptime;
1200267740Smav		do {
1201267740Smav			if (!svc_request_space_available(pool))
1202267740Smav				break;
1203267740Smav			rqstp = NULL;
1204267740Smav			stat = svc_getreq(xprt, &rqstp);
1205267740Smav			if (rqstp) {
1206267740Smav				svc_change_space_used(pool, rqstp->rq_size);
1207267740Smav				/*
1208267740Smav				 * See if the application has a preference
1209267740Smav				 * for some other thread.
1210267740Smav				 */
1211267740Smav				if (pool->sp_assign) {
1212267740Smav					stpref = pool->sp_assign(st, rqstp);
1213184588Sdfr					rqstp->rq_thread = stpref;
1214184588Sdfr					STAILQ_INSERT_TAIL(&stpref->st_reqs,
1215184588Sdfr					    rqstp, rq_link);
1216267740Smav					mtx_unlock(&stpref->st_lock);
1217267740Smav					if (stpref != st)
1218267740Smav						rqstp = NULL;
1219267740Smav				} else {
1220267740Smav					rqstp->rq_thread = st;
1221267740Smav					STAILQ_INSERT_TAIL(&st->st_reqs,
1222267740Smav					    rqstp, rq_link);
1223267740Smav				}
1224267740Smav			}
1225267740Smav		} while (rqstp == NULL && stat == XPRT_MOREREQS
1226267742Smav		    && grp->sg_state != SVCPOOL_CLOSING);
1227184588Sdfr
1228267740Smav		/*
1229267740Smav		 * Move this transport to the end of the active list to
1230267740Smav		 * ensure fairness when multiple transports are active.
1231267740Smav		 * If this was the last queued request, svc_getreq will end
1232267740Smav		 * up calling xprt_inactive to remove from the active list.
1233267740Smav		 */
1234267742Smav		mtx_lock(&grp->sg_lock);
1235267740Smav		xprt->xp_thread = NULL;
1236267740Smav		st->st_xprt = NULL;
1237267740Smav		if (xprt->xp_active) {
1238267740Smav			if (!svc_request_space_available(pool) ||
1239267740Smav			    !xprt_assignthread(xprt))
1240267742Smav				TAILQ_INSERT_TAIL(&grp->sg_active,
1241267740Smav				    xprt, xp_alink);
1242184588Sdfr		}
1243267742Smav		mtx_unlock(&grp->sg_lock);
1244267740Smav		SVC_RELEASE(xprt);
1245184588Sdfr
1246177633Sdfr		/*
1247184588Sdfr		 * Execute what we have queued.
1248177633Sdfr		 */
1249261054Smav		sz = 0;
1250267740Smav		mtx_lock(&st->st_lock);
1251267740Smav		while ((rqstp = STAILQ_FIRST(&st->st_reqs)) != NULL) {
1252267740Smav			STAILQ_REMOVE_HEAD(&st->st_reqs, rq_link);
1253267740Smav			mtx_unlock(&st->st_lock);
1254261054Smav			sz += rqstp->rq_size;
1255184588Sdfr			svc_executereq(rqstp);
1256267740Smav			mtx_lock(&st->st_lock);
1257184588Sdfr		}
1258267740Smav		mtx_unlock(&st->st_lock);
1259261054Smav		svc_change_space_used(pool, -sz);
1260267742Smav		mtx_lock(&grp->sg_lock);
1261184588Sdfr	}
1262177633Sdfr
1263184588Sdfr	if (st->st_xprt) {
1264184588Sdfr		xprt = st->st_xprt;
1265184588Sdfr		st->st_xprt = NULL;
1266184588Sdfr		SVC_RELEASE(xprt);
1267177633Sdfr	}
1268184588Sdfr	KASSERT(STAILQ_EMPTY(&st->st_reqs), ("stray reqs on exit"));
1269267740Smav	mtx_destroy(&st->st_lock);
1270184588Sdfr	cv_destroy(&st->st_cond);
1271184588Sdfr	mem_free(st, sizeof(*st));
1272184588Sdfr
1273267742Smav	grp->sg_threadcount--;
1274184588Sdfr	if (!ismaster)
1275267742Smav		wakeup(grp);
1276267742Smav	mtx_unlock(&grp->sg_lock);
1277177633Sdfr}
1278177633Sdfr
1279184588Sdfrstatic void
1280184588Sdfrsvc_thread_start(void *arg)
1281184588Sdfr{
1282184588Sdfr
1283267742Smav	svc_run_internal((SVCGROUP *) arg, FALSE);
1284184588Sdfr	kthread_exit();
1285184588Sdfr}
1286184588Sdfr
1287184588Sdfrstatic void
1288267742Smavsvc_new_thread(SVCGROUP *grp)
1289184588Sdfr{
1290267742Smav	SVCPOOL *pool = grp->sg_pool;
1291184588Sdfr	struct thread *td;
1292184588Sdfr
1293267742Smav	grp->sg_threadcount++;
1294267742Smav	kthread_add(svc_thread_start, grp, pool->sp_proc, &td, 0, 0,
1295184588Sdfr	    "%s: service", pool->sp_name);
1296184588Sdfr}
1297184588Sdfr
1298177633Sdfrvoid
1299184588Sdfrsvc_run(SVCPOOL *pool)
1300184588Sdfr{
1301267742Smav	int g, i;
1302184588Sdfr	struct proc *p;
1303184588Sdfr	struct thread *td;
1304267742Smav	SVCGROUP *grp;
1305184588Sdfr
1306184588Sdfr	p = curproc;
1307184588Sdfr	td = curthread;
1308184588Sdfr	snprintf(td->td_name, sizeof(td->td_name),
1309184588Sdfr	    "%s: master", pool->sp_name);
1310184588Sdfr	pool->sp_state = SVCPOOL_ACTIVE;
1311184588Sdfr	pool->sp_proc = p;
1312184588Sdfr
1313267742Smav	/* Choose group count based on number of threads and CPUs. */
1314267742Smav	pool->sp_groupcount = max(1, min(SVC_MAXGROUPS,
1315267742Smav	    min(pool->sp_maxthreads / 2, mp_ncpus) / 6));
1316267742Smav	for (g = 0; g < pool->sp_groupcount; g++) {
1317267742Smav		grp = &pool->sp_groups[g];
1318267742Smav		grp->sg_minthreads = max(1,
1319267742Smav		    pool->sp_minthreads / pool->sp_groupcount);
1320267742Smav		grp->sg_maxthreads = max(1,
1321267742Smav		    pool->sp_maxthreads / pool->sp_groupcount);
1322267742Smav		grp->sg_lastcreatetime = time_uptime;
1323184588Sdfr	}
1324184588Sdfr
1325267742Smav	/* Starting threads */
1326267742Smav	for (g = 0; g < pool->sp_groupcount; g++) {
1327267742Smav		grp = &pool->sp_groups[g];
1328267742Smav		for (i = ((g == 0) ? 1 : 0); i < grp->sg_minthreads; i++)
1329267742Smav			svc_new_thread(grp);
1330267742Smav	}
1331267742Smav	pool->sp_groups[0].sg_threadcount++;
1332267742Smav	svc_run_internal(&pool->sp_groups[0], TRUE);
1333184588Sdfr
1334267742Smav	/* Waiting for threads to stop. */
1335267742Smav	for (g = 0; g < pool->sp_groupcount; g++) {
1336267742Smav		grp = &pool->sp_groups[g];
1337267742Smav		mtx_lock(&grp->sg_lock);
1338267742Smav		while (grp->sg_threadcount > 0)
1339267742Smav			msleep(grp, &grp->sg_lock, 0, "svcexit", 0);
1340267742Smav		mtx_unlock(&grp->sg_lock);
1341267742Smav	}
1342184588Sdfr}
1343184588Sdfr
1344184588Sdfrvoid
1345177633Sdfrsvc_exit(SVCPOOL *pool)
1346177633Sdfr{
1347267742Smav	SVCGROUP *grp;
1348184588Sdfr	SVCTHREAD *st;
1349267742Smav	int g;
1350184588Sdfr
1351267742Smav	pool->sp_state = SVCPOOL_CLOSING;
1352267742Smav	for (g = 0; g < pool->sp_groupcount; g++) {
1353267742Smav		grp = &pool->sp_groups[g];
1354267742Smav		mtx_lock(&grp->sg_lock);
1355267742Smav		if (grp->sg_state != SVCPOOL_CLOSING) {
1356267742Smav			grp->sg_state = SVCPOOL_CLOSING;
1357267742Smav			LIST_FOREACH(st, &grp->sg_idlethreads, st_ilink)
1358267742Smav				cv_signal(&st->st_cond);
1359267742Smav		}
1360267742Smav		mtx_unlock(&grp->sg_lock);
1361261045Smav	}
1362177633Sdfr}
1363184588Sdfr
1364184588Sdfrbool_t
1365184588Sdfrsvc_getargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1366184588Sdfr{
1367184588Sdfr	struct mbuf *m;
1368184588Sdfr	XDR xdrs;
1369184588Sdfr	bool_t stat;
1370184588Sdfr
1371184588Sdfr	m = rqstp->rq_args;
1372184588Sdfr	rqstp->rq_args = NULL;
1373184588Sdfr
1374184588Sdfr	xdrmbuf_create(&xdrs, m, XDR_DECODE);
1375184588Sdfr	stat = xargs(&xdrs, args);
1376184588Sdfr	XDR_DESTROY(&xdrs);
1377184588Sdfr
1378184588Sdfr	return (stat);
1379184588Sdfr}
1380184588Sdfr
1381184588Sdfrbool_t
1382184588Sdfrsvc_freeargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1383184588Sdfr{
1384184588Sdfr	XDR xdrs;
1385184588Sdfr
1386184588Sdfr	if (rqstp->rq_addr) {
1387184588Sdfr		free(rqstp->rq_addr, M_SONAME);
1388184588Sdfr		rqstp->rq_addr = NULL;
1389184588Sdfr	}
1390184588Sdfr
1391184588Sdfr	xdrs.x_op = XDR_FREE;
1392184588Sdfr	return (xargs(&xdrs, args));
1393184588Sdfr}
1394184588Sdfr
1395184588Sdfrvoid
1396184588Sdfrsvc_freereq(struct svc_req *rqstp)
1397184588Sdfr{
1398184588Sdfr	SVCTHREAD *st;
1399184588Sdfr	SVCPOOL *pool;
1400184588Sdfr
1401184588Sdfr	st = rqstp->rq_thread;
1402184588Sdfr	if (st) {
1403261054Smav		pool = st->st_pool;
1404184588Sdfr		if (pool->sp_done)
1405184588Sdfr			pool->sp_done(st, rqstp);
1406184588Sdfr	}
1407184588Sdfr
1408184588Sdfr	if (rqstp->rq_auth.svc_ah_ops)
1409184588Sdfr		SVCAUTH_RELEASE(&rqstp->rq_auth);
1410184588Sdfr
1411184588Sdfr	if (rqstp->rq_xprt) {
1412184588Sdfr		SVC_RELEASE(rqstp->rq_xprt);
1413184588Sdfr	}
1414184588Sdfr
1415184588Sdfr	if (rqstp->rq_addr)
1416184588Sdfr		free(rqstp->rq_addr, M_SONAME);
1417184588Sdfr
1418184588Sdfr	if (rqstp->rq_args)
1419184588Sdfr		m_freem(rqstp->rq_args);
1420184588Sdfr
1421184588Sdfr	free(rqstp, M_RPC);
1422184588Sdfr}
1423