nfs_nfsiod.c revision 184561
119304Speter/*-
219304Speter * Copyright (c) 1989, 1993
319304Speter *	The Regents of the University of California.  All rights reserved.
419304Speter *
519304Speter * This code is derived from software contributed to Berkeley by
619304Speter * Rick Macklem at The University of Guelph.
719304Speter *
819304Speter * Redistribution and use in source and binary forms, with or without
919304Speter * modification, are permitted provided that the following conditions
1019304Speter * are met:
1119304Speter * 1. Redistributions of source code must retain the above copyright
1219304Speter *    notice, this list of conditions and the following disclaimer.
13254225Speter * 2. Redistributions in binary form must reproduce the above copyright
1419304Speter *    notice, this list of conditions and the following disclaimer in the
1519304Speter *    documentation and/or other materials provided with the distribution.
1619304Speter * 4. Neither the name of the University nor the names of its contributors
1719304Speter *    may be used to endorse or promote products derived from this software
1819304Speter *    without specific prior written permission.
1919304Speter *
2019304Speter * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2119304Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2219304Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2319304Speter * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
2419304Speter * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2519304Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2619304Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2719304Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2819304Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2919304Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3019304Speter * SUCH DAMAGE.
3119304Speter *
3219304Speter *	@(#)nfs_syscalls.c	8.5 (Berkeley) 3/30/95
3319304Speter */
3419304Speter
3519304Speter#include <sys/cdefs.h>
36254225Speter__FBSDID("$FreeBSD: head/sys/nfsclient/nfs_nfsiod.c 184561 2008-11-02 17:00:23Z trhodes $");
3719304Speter
3819304Speter#include <sys/param.h>
3919304Speter#include <sys/systm.h>
4019304Speter#include <sys/sysproto.h>
4119304Speter#include <sys/kernel.h>
4219304Speter#include <sys/sysctl.h>
4319304Speter#include <sys/file.h>
4419304Speter#include <sys/filedesc.h>
4519304Speter#include <sys/vnode.h>
4619304Speter#include <sys/malloc.h>
4719304Speter#include <sys/mount.h>
48254225Speter#include <sys/proc.h>
4919304Speter#include <sys/bio.h>
5019304Speter#include <sys/buf.h>
5119304Speter#include <sys/mbuf.h>
5219304Speter#include <sys/socket.h>
5319304Speter#include <sys/socketvar.h>
5419304Speter#include <sys/domain.h>
5519304Speter#include <sys/protosw.h>
5619304Speter#include <sys/namei.h>
5719304Speter#include <sys/unistd.h>
5819304Speter#include <sys/kthread.h>
5919304Speter#include <sys/fcntl.h>
6019304Speter#include <sys/lockf.h>
6119304Speter#include <sys/mutex.h>
6219304Speter
6319304Speter#include <netinet/in.h>
6419304Speter#include <netinet/tcp.h>
6519304Speter
6619304Speter#include <rpc/rpcclnt.h>
6719304Speter
6819304Speter#include <nfs/xdr_subs.h>
6919304Speter#include <nfs/rpcv2.h>
7019304Speter#include <nfs/nfsproto.h>
7119304Speter#include <nfsclient/nfs.h>
72254225Speter#include <nfsclient/nfsm_subs.h>
7319304Speter#include <nfsclient/nfsmount.h>
7419304Speter#include <nfsclient/nfsnode.h>
7519304Speter#include <nfsclient/nfs_lock.h>
7619304Speter
7719304Speterstatic MALLOC_DEFINE(M_NFSSVC, "nfsclient_srvsock", "Nfs server structure");
7819304Speter
7919304Speterstatic void	nfssvc_iod(void *);
8019304Speter
8119304Speterstatic int nfs_asyncdaemon[NFS_MAXASYNCDAEMON];
8219304Speter
8319304SpeterSYSCTL_DECL(_vfs_nfs);
8419304Speter
8519304Speter/* Maximum number of seconds a nfsiod kthread will sleep before exiting */
8619304Speterstatic unsigned int nfs_iodmaxidle = 120;
8719304SpeterSYSCTL_UINT(_vfs_nfs, OID_AUTO, iodmaxidle, CTLFLAG_RW, &nfs_iodmaxidle, 0,
8819304Speter    "Max number of seconds an nfsiod kthread will sleep before exiting");
8919304Speter
9019304Speter/* Maximum number of nfsiod kthreads */
9119304Speterunsigned int nfs_iodmax = 20;
9219304Speter
9319304Speter/* Minimum number of nfsiod kthreads to keep as spares */
9419304Speterstatic unsigned int nfs_iodmin = 0;
9519304Speter
9619304Speterstatic int
9719304Spetersysctl_iodmin(SYSCTL_HANDLER_ARGS)
9819304Speter{
9919304Speter	int error, i;
10019304Speter	int newmin;
101254225Speter
10219304Speter	newmin = nfs_iodmin;
10319304Speter	error = sysctl_handle_int(oidp, &newmin, 0, req);
10419304Speter	if (error || (req->newptr == NULL))
10519304Speter		return (error);
10619304Speter	mtx_lock(&nfs_iod_mtx);
10719304Speter	if (newmin > nfs_iodmax) {
108254225Speter		error = EINVAL;
10919304Speter		goto out;
11019304Speter	}
11119304Speter	nfs_iodmin = newmin;
11219304Speter	if (nfs_numasync >= nfs_iodmin)
11319304Speter		goto out;
114254225Speter	/*
115254225Speter	 * If the current number of nfsiod is lower
11619304Speter	 * than the new minimum, create some more.
11719304Speter	 */
11819304Speter	for (i = nfs_iodmin - nfs_numasync; i > 0; i--)
11919304Speter		nfs_nfsiodnew();
12019304Speterout:
12119304Speter	mtx_unlock(&nfs_iod_mtx);
12219304Speter	return (0);
12319304Speter}
12419304SpeterSYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmin, CTLTYPE_UINT | CTLFLAG_RW, 0,
12519304Speter    sizeof (nfs_iodmin), sysctl_iodmin, "IU",
12619304Speter    "Min number of nfsiod kthreads to keep as spares");
12719304Speter
12819304Speter
12919304Speterstatic int
13019304Spetersysctl_iodmax(SYSCTL_HANDLER_ARGS)
13119304Speter{
132	int error, i;
133	int iod, newmax;
134
135	newmax = nfs_iodmax;
136	error = sysctl_handle_int(oidp, &newmax, 0, req);
137	if (error || (req->newptr == NULL))
138		return (error);
139	if (newmax > NFS_MAXASYNCDAEMON)
140		return (EINVAL);
141	mtx_lock(&nfs_iod_mtx);
142	nfs_iodmax = newmax;
143	if (nfs_numasync <= nfs_iodmax)
144		goto out;
145	/*
146	 * If there are some asleep nfsiods that should
147	 * exit, wakeup() them so that they check nfs_iodmax
148	 * and exit.  Those who are active will exit as
149	 * soon as they finish I/O.
150	 */
151	iod = nfs_numasync - 1;
152	for (i = 0; i < nfs_numasync - nfs_iodmax; i++) {
153		if (nfs_iodwant[iod])
154			wakeup(&nfs_iodwant[iod]);
155		iod--;
156	}
157out:
158	mtx_unlock(&nfs_iod_mtx);
159	return (0);
160}
161SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmax, CTLTYPE_UINT | CTLFLAG_RW, 0,
162    sizeof (nfs_iodmax), sysctl_iodmax, "IU",
163    "Max number of nfsiod kthreads");
164
165int
166nfs_nfsiodnew(void)
167{
168	int error, i;
169	int newiod;
170
171	if (nfs_numasync >= nfs_iodmax)
172		return (-1);
173	newiod = -1;
174	for (i = 0; i < nfs_iodmax; i++)
175		if (nfs_asyncdaemon[i] == 0) {
176			nfs_asyncdaemon[i]++;
177			newiod = i;
178			break;
179		}
180	if (newiod == -1)
181		return (-1);
182	mtx_unlock(&nfs_iod_mtx);
183	error = kproc_create(nfssvc_iod, nfs_asyncdaemon + i, NULL, RFHIGHPID,
184	    0, "nfsiod %d", newiod);
185	mtx_lock(&nfs_iod_mtx);
186	if (error)
187		return (-1);
188	nfs_numasync++;
189	return (newiod);
190}
191
192static void
193nfsiod_setup(void *dummy)
194{
195	int i;
196	int error;
197
198	TUNABLE_INT_FETCH("vfs.nfs.iodmin", &nfs_iodmin);
199	mtx_lock(&nfs_iod_mtx);
200	/* Silently limit the start number of nfsiod's */
201	if (nfs_iodmin > NFS_MAXASYNCDAEMON)
202		nfs_iodmin = NFS_MAXASYNCDAEMON;
203
204	for (i = 0; i < nfs_iodmin; i++) {
205		error = nfs_nfsiodnew();
206		if (error == -1)
207			panic("nfsiod_setup: nfs_nfsiodnew failed");
208	}
209	mtx_unlock(&nfs_iod_mtx);
210}
211SYSINIT(nfsiod, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, nfsiod_setup, NULL);
212
213static int nfs_defect = 0;
214SYSCTL_INT(_vfs_nfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0,
215    "Allow nfsiods to migrate serving different mounts");
216
217/*
218 * Asynchronous I/O daemons for client nfs.
219 * They do read-ahead and write-behind operations on the block I/O cache.
220 * Returns if we hit the timeout defined by the iodmaxidle sysctl.
221 */
222static void
223nfssvc_iod(void *instance)
224{
225	struct buf *bp;
226	struct nfsmount *nmp;
227	int myiod, timo;
228	int error = 0;
229
230	mtx_lock(&nfs_iod_mtx);
231	myiod = (int *)instance - nfs_asyncdaemon;
232	/*
233	 * Main loop
234	 */
235	for (;;) {
236	    while (((nmp = nfs_iodmount[myiod]) == NULL)
237		   || !TAILQ_FIRST(&nmp->nm_bufq)) {
238		if (myiod >= nfs_iodmax)
239			goto finish;
240		if (nmp)
241			nmp->nm_bufqiods--;
242		nfs_iodwant[myiod] = curthread->td_proc;
243		nfs_iodmount[myiod] = NULL;
244		/*
245		 * Always keep at least nfs_iodmin kthreads.
246		 */
247		timo = (myiod < nfs_iodmin) ? 0 : nfs_iodmaxidle * hz;
248		error = msleep(&nfs_iodwant[myiod], &nfs_iod_mtx, PWAIT | PCATCH,
249		    "-", timo);
250		if (error) {
251			nmp = nfs_iodmount[myiod];
252			/*
253			 * Rechecking the nm_bufq closes a rare race where the
254			 * nfsiod is woken up at the exact time the idle timeout
255			 * fires
256			 */
257			if (nmp && TAILQ_FIRST(&nmp->nm_bufq))
258				error = 0;
259			break;
260		}
261	    }
262	    if (error)
263		    break;
264	    while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
265	        int giant_locked = 0;
266
267		/* Take one off the front of the list */
268		TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist);
269		nmp->nm_bufqlen--;
270		if (nmp->nm_bufqwant && nmp->nm_bufqlen <= nfs_numasync) {
271		    nmp->nm_bufqwant = 0;
272		    wakeup(&nmp->nm_bufq);
273		}
274		mtx_unlock(&nfs_iod_mtx);
275		if (NFS_ISV4(bp->b_vp)) {
276			giant_locked = 1;
277			mtx_lock(&Giant);
278		}
279		if (bp->b_flags & B_DIRECT) {
280			KASSERT((bp->b_iocmd == BIO_WRITE), ("nfscvs_iod: BIO_WRITE not set"));
281			(void)nfs_doio_directwrite(bp);
282		} else {
283			if (bp->b_iocmd == BIO_READ)
284				(void) nfs_doio(bp->b_vp, bp, bp->b_rcred, NULL);
285			else
286				(void) nfs_doio(bp->b_vp, bp, bp->b_wcred, NULL);
287		}
288		if (giant_locked)
289			mtx_unlock(&Giant);
290		mtx_lock(&nfs_iod_mtx);
291		/*
292		 * If there are more than one iod on this mount, then defect
293		 * so that the iods can be shared out fairly between the mounts
294		 */
295		if (nfs_defect && nmp->nm_bufqiods > 1) {
296		    NFS_DPF(ASYNCIO,
297			    ("nfssvc_iod: iod %d defecting from mount %p\n",
298			     myiod, nmp));
299		    nfs_iodmount[myiod] = NULL;
300		    nmp->nm_bufqiods--;
301		    break;
302		}
303	    }
304	}
305finish:
306	nfs_asyncdaemon[myiod] = 0;
307	if (nmp)
308	    nmp->nm_bufqiods--;
309	nfs_iodwant[myiod] = NULL;
310	nfs_iodmount[myiod] = NULL;
311	/* Someone may be waiting for the last nfsiod to terminate. */
312	if (--nfs_numasync == 0)
313		wakeup(&nfs_numasync);
314	mtx_unlock(&nfs_iod_mtx);
315	if ((error == 0) || (error == EWOULDBLOCK))
316		kproc_exit(0);
317	/* Abnormal termination */
318	kproc_exit(1);
319}
320