nfs_bio.c revision 152656
1/*-
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/nfsclient/nfs_bio.c 152656 2005-11-21 19:23:46Z ps $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/bio.h>
41#include <sys/buf.h>
42#include <sys/kernel.h>
43#include <sys/mount.h>
44#include <sys/proc.h>
45#include <sys/resourcevar.h>
46#include <sys/signalvar.h>
47#include <sys/vmmeter.h>
48#include <sys/vnode.h>
49
50#include <vm/vm.h>
51#include <vm/vm_extern.h>
52#include <vm/vm_page.h>
53#include <vm/vm_object.h>
54#include <vm/vm_pager.h>
55#include <vm/vnode_pager.h>
56
57#include <rpc/rpcclnt.h>
58
59#include <nfs/rpcv2.h>
60#include <nfs/nfsproto.h>
61#include <nfsclient/nfs.h>
62#include <nfsclient/nfsmount.h>
63#include <nfsclient/nfsnode.h>
64
65#include <nfs4client/nfs4.h>
66
67static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
68		    struct thread *td);
69static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
70			      struct ucred *cred, int ioflag);
71
72extern int nfs_directio_enable;
73extern int nfs_directio_allow_mmap;
74/*
75 * Vnode op for VM getpages.
76 */
77int
78nfs_getpages(struct vop_getpages_args *ap)
79{
80	int i, error, nextoff, size, toff, count, npages;
81	struct uio uio;
82	struct iovec iov;
83	vm_offset_t kva;
84	struct buf *bp;
85	struct vnode *vp;
86	struct thread *td;
87	struct ucred *cred;
88	struct nfsmount *nmp;
89	vm_object_t object;
90	vm_page_t *pages;
91	struct nfsnode *np;
92
93	GIANT_REQUIRED;
94
95	vp = ap->a_vp;
96	np = VTONFS(vp);
97	td = curthread;				/* XXX */
98	cred = curthread->td_ucred;		/* XXX */
99	nmp = VFSTONFS(vp->v_mount);
100	pages = ap->a_m;
101	count = ap->a_count;
102
103	if ((object = vp->v_object) == NULL) {
104		printf("nfs_getpages: called with non-merged cache vnode??\n");
105		return VM_PAGER_ERROR;
106	}
107
108	if (!nfs_directio_allow_mmap && (np->n_flag & NNONCACHE) &&
109	    (vp->v_type == VREG)) {
110		printf("nfs_getpages: called on non-cacheable vnode??\n");
111		return VM_PAGER_ERROR;
112	}
113
114	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
115	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
116		/* We'll never get here for v4, because we always have fsinfo */
117		(void)nfs_fsinfo(nmp, vp, cred, td);
118	}
119
120	npages = btoc(count);
121
122	/*
123	 * If the requested page is partially valid, just return it and
124	 * allow the pager to zero-out the blanks.  Partially valid pages
125	 * can only occur at the file EOF.
126	 */
127
128	{
129		vm_page_t m = pages[ap->a_reqpage];
130
131		VM_OBJECT_LOCK(object);
132		vm_page_lock_queues();
133		if (m->valid != 0) {
134			/* handled by vm_fault now	  */
135			/* vm_page_zero_invalid(m, TRUE); */
136			for (i = 0; i < npages; ++i) {
137				if (i != ap->a_reqpage)
138					vm_page_free(pages[i]);
139			}
140			vm_page_unlock_queues();
141			VM_OBJECT_UNLOCK(object);
142			return(0);
143		}
144		vm_page_unlock_queues();
145		VM_OBJECT_UNLOCK(object);
146	}
147
148	/*
149	 * We use only the kva address for the buffer, but this is extremely
150	 * convienient and fast.
151	 */
152	bp = getpbuf(&nfs_pbuf_freecnt);
153
154	kva = (vm_offset_t) bp->b_data;
155	pmap_qenter(kva, pages, npages);
156	cnt.v_vnodein++;
157	cnt.v_vnodepgsin += npages;
158
159	iov.iov_base = (caddr_t) kva;
160	iov.iov_len = count;
161	uio.uio_iov = &iov;
162	uio.uio_iovcnt = 1;
163	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
164	uio.uio_resid = count;
165	uio.uio_segflg = UIO_SYSSPACE;
166	uio.uio_rw = UIO_READ;
167	uio.uio_td = td;
168
169	error = (nmp->nm_rpcops->nr_readrpc)(vp, &uio, cred);
170	pmap_qremove(kva, npages);
171
172	relpbuf(bp, &nfs_pbuf_freecnt);
173
174	if (error && (uio.uio_resid == count)) {
175		printf("nfs_getpages: error %d\n", error);
176		VM_OBJECT_LOCK(object);
177		vm_page_lock_queues();
178		for (i = 0; i < npages; ++i) {
179			if (i != ap->a_reqpage)
180				vm_page_free(pages[i]);
181		}
182		vm_page_unlock_queues();
183		VM_OBJECT_UNLOCK(object);
184		return VM_PAGER_ERROR;
185	}
186
187	/*
188	 * Calculate the number of bytes read and validate only that number
189	 * of bytes.  Note that due to pending writes, size may be 0.  This
190	 * does not mean that the remaining data is invalid!
191	 */
192
193	size = count - uio.uio_resid;
194	VM_OBJECT_LOCK(object);
195	vm_page_lock_queues();
196	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
197		vm_page_t m;
198		nextoff = toff + PAGE_SIZE;
199		m = pages[i];
200
201		if (nextoff <= size) {
202			/*
203			 * Read operation filled an entire page
204			 */
205			m->valid = VM_PAGE_BITS_ALL;
206			vm_page_undirty(m);
207		} else if (size > toff) {
208			/*
209			 * Read operation filled a partial page.
210			 */
211			m->valid = 0;
212			vm_page_set_validclean(m, 0, size - toff);
213			/* handled by vm_fault now	  */
214			/* vm_page_zero_invalid(m, TRUE); */
215		} else {
216			/*
217			 * Read operation was short.  If no error occured
218			 * we may have hit a zero-fill section.   We simply
219			 * leave valid set to 0.
220			 */
221			;
222		}
223		if (i != ap->a_reqpage) {
224			/*
225			 * Whether or not to leave the page activated is up in
226			 * the air, but we should put the page on a page queue
227			 * somewhere (it already is in the object).  Result:
228			 * It appears that emperical results show that
229			 * deactivating pages is best.
230			 */
231
232			/*
233			 * Just in case someone was asking for this page we
234			 * now tell them that it is ok to use.
235			 */
236			if (!error) {
237				if (m->flags & PG_WANTED)
238					vm_page_activate(m);
239				else
240					vm_page_deactivate(m);
241				vm_page_wakeup(m);
242			} else {
243				vm_page_free(m);
244			}
245		}
246	}
247	vm_page_unlock_queues();
248	VM_OBJECT_UNLOCK(object);
249	return 0;
250}
251
252/*
253 * Vnode op for VM putpages.
254 */
255int
256nfs_putpages(struct vop_putpages_args *ap)
257{
258	struct uio uio;
259	struct iovec iov;
260	vm_offset_t kva;
261	struct buf *bp;
262	int iomode, must_commit, i, error, npages, count;
263	off_t offset;
264	int *rtvals;
265	struct vnode *vp;
266	struct thread *td;
267	struct ucred *cred;
268	struct nfsmount *nmp;
269	struct nfsnode *np;
270	vm_page_t *pages;
271
272	GIANT_REQUIRED;
273
274	vp = ap->a_vp;
275	np = VTONFS(vp);
276	td = curthread;				/* XXX */
277	cred = curthread->td_ucred;		/* XXX */
278	nmp = VFSTONFS(vp->v_mount);
279	pages = ap->a_m;
280	count = ap->a_count;
281	rtvals = ap->a_rtvals;
282	npages = btoc(count);
283	offset = IDX_TO_OFF(pages[0]->pindex);
284
285	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
286	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
287		(void)nfs_fsinfo(nmp, vp, cred, td);
288	}
289
290	if (!nfs_directio_allow_mmap && (np->n_flag & NNONCACHE) &&
291	    (vp->v_type == VREG))
292		printf("nfs_putpages: called on noncache-able vnode??\n");
293
294	for (i = 0; i < npages; i++)
295		rtvals[i] = VM_PAGER_AGAIN;
296
297	/*
298	 * When putting pages, do not extend file past EOF.
299	 */
300
301	if (offset + count > np->n_size) {
302		count = np->n_size - offset;
303		if (count < 0)
304			count = 0;
305	}
306
307	/*
308	 * We use only the kva address for the buffer, but this is extremely
309	 * convienient and fast.
310	 */
311	bp = getpbuf(&nfs_pbuf_freecnt);
312
313	kva = (vm_offset_t) bp->b_data;
314	pmap_qenter(kva, pages, npages);
315	cnt.v_vnodeout++;
316	cnt.v_vnodepgsout += count;
317
318	iov.iov_base = (caddr_t) kva;
319	iov.iov_len = count;
320	uio.uio_iov = &iov;
321	uio.uio_iovcnt = 1;
322	uio.uio_offset = offset;
323	uio.uio_resid = count;
324	uio.uio_segflg = UIO_SYSSPACE;
325	uio.uio_rw = UIO_WRITE;
326	uio.uio_td = td;
327
328	if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
329	    iomode = NFSV3WRITE_UNSTABLE;
330	else
331	    iomode = NFSV3WRITE_FILESYNC;
332
333	error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred, &iomode, &must_commit);
334
335	pmap_qremove(kva, npages);
336	relpbuf(bp, &nfs_pbuf_freecnt);
337
338	if (!error) {
339		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
340		for (i = 0; i < nwritten; i++) {
341			rtvals[i] = VM_PAGER_OK;
342			vm_page_undirty(pages[i]);
343		}
344		if (must_commit) {
345			nfs_clearcommit(vp->v_mount);
346		}
347	}
348	return rtvals[0];
349}
350
351/*
352 * Vnode op for read using bio
353 */
354int
355nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
356{
357	struct nfsnode *np = VTONFS(vp);
358	int biosize, i;
359	struct buf *bp, *rabp;
360	struct vattr vattr;
361	struct thread *td;
362	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
363	daddr_t lbn, rabn;
364	int bcount;
365	int seqcount;
366	int nra, error = 0, n = 0, on = 0;
367
368#ifdef DIAGNOSTIC
369	if (uio->uio_rw != UIO_READ)
370		panic("nfs_read mode");
371#endif
372	if (uio->uio_resid == 0)
373		return (0);
374	if (uio->uio_offset < 0)	/* XXX VDIR cookies can be negative */
375		return (EINVAL);
376	td = uio->uio_td;
377
378	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
379	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
380		(void)nfs_fsinfo(nmp, vp, cred, td);
381	if (vp->v_type != VDIR &&
382	    (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
383		return (EFBIG);
384
385	if (nfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
386		/* No caching/ no readaheads. Just read data into the user buffer */
387		return nfs_readrpc(vp, uio, cred);
388
389	biosize = vp->v_mount->mnt_stat.f_iosize;
390	seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
391	/*
392	 * For nfs, cache consistency can only be maintained approximately.
393	 * Although RFC1094 does not specify the criteria, the following is
394	 * believed to be compatible with the reference port.
395	 * For nfs:
396	 * If the file's modify time on the server has changed since the
397	 * last read rpc or you have written to the file,
398	 * you may have lost data cache consistency with the
399	 * server, so flush all of the file's data out of the cache.
400	 * Then force a getattr rpc to ensure that you have up to date
401	 * attributes.
402	 * NB: This implies that cache data can be read when up to
403	 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
404	 * attributes this could be forced by setting n_attrstamp to 0 before
405	 * the VOP_GETATTR() call.
406	 */
407	if (np->n_flag & NMODIFIED) {
408		if (vp->v_type != VREG) {
409			if (vp->v_type != VDIR)
410				panic("nfs: bioread, not dir");
411			(nmp->nm_rpcops->nr_invaldir)(vp);
412			error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
413			if (error)
414				return (error);
415		}
416		np->n_attrstamp = 0;
417		error = VOP_GETATTR(vp, &vattr, cred, td);
418		if (error)
419			return (error);
420		np->n_mtime = vattr.va_mtime;
421	} else {
422		error = VOP_GETATTR(vp, &vattr, cred, td);
423		if (error)
424			return (error);
425		if ((np->n_flag & NSIZECHANGED)
426		    || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
427			if (vp->v_type == VDIR)
428				(nmp->nm_rpcops->nr_invaldir)(vp);
429			error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
430			if (error)
431				return (error);
432			np->n_mtime = vattr.va_mtime;
433			np->n_flag &= ~NSIZECHANGED;
434		}
435	}
436	do {
437	    switch (vp->v_type) {
438	    case VREG:
439		nfsstats.biocache_reads++;
440		lbn = uio->uio_offset / biosize;
441		on = uio->uio_offset & (biosize - 1);
442
443		/*
444		 * Start the read ahead(s), as required.
445		 * The readahead is kicked off only if sequential access
446		 * is detected, based on the readahead hint (ra_expect_lbn).
447		 */
448		if (nmp->nm_readahead > 0 && np->ra_expect_lbn == lbn) {
449		    for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
450			(off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) {
451			rabn = lbn + 1 + nra;
452			if (incore(&vp->v_bufobj, rabn) == NULL) {
453			    rabp = nfs_getcacheblk(vp, rabn, biosize, td);
454			    if (!rabp) {
455				error = nfs_sigintr(nmp, NULL, td);
456				return (error ? error : EINTR);
457			    }
458			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
459				rabp->b_flags |= B_ASYNC;
460				rabp->b_iocmd = BIO_READ;
461				vfs_busy_pages(rabp, 0);
462				if (nfs_asyncio(nmp, rabp, cred, td)) {
463				    rabp->b_flags |= B_INVAL;
464				    rabp->b_ioflags |= BIO_ERROR;
465				    vfs_unbusy_pages(rabp);
466				    brelse(rabp);
467				    break;
468				}
469			    } else {
470				brelse(rabp);
471			    }
472			}
473		    }
474		    np->ra_expect_lbn = lbn + 1;
475		}
476
477		/* Note that bcount is *not* DEV_BSIZE aligned. */
478		bcount = biosize;
479		if ((off_t)lbn * biosize >= np->n_size) {
480			bcount = 0;
481		} else if ((off_t)(lbn + 1) * biosize > np->n_size) {
482			bcount = np->n_size - (off_t)lbn * biosize;
483		}
484		bp = nfs_getcacheblk(vp, lbn, bcount, td);
485
486		if (!bp) {
487			error = nfs_sigintr(nmp, NULL, td);
488			return (error ? error : EINTR);
489		}
490
491		/*
492		 * If B_CACHE is not set, we must issue the read.  If this
493		 * fails, we return an error.
494		 */
495
496		if ((bp->b_flags & B_CACHE) == 0) {
497		    bp->b_iocmd = BIO_READ;
498		    vfs_busy_pages(bp, 0);
499		    error = nfs_doio(vp, bp, cred, td);
500		    if (error) {
501			brelse(bp);
502			return (error);
503		    }
504		}
505
506		/*
507		 * on is the offset into the current bp.  Figure out how many
508		 * bytes we can copy out of the bp.  Note that bcount is
509		 * NOT DEV_BSIZE aligned.
510		 *
511		 * Then figure out how many bytes we can copy into the uio.
512		 */
513
514		n = 0;
515		if (on < bcount)
516			n = min((unsigned)(bcount - on), uio->uio_resid);
517		break;
518	    case VLNK:
519		nfsstats.biocache_readlinks++;
520		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
521		if (!bp) {
522			error = nfs_sigintr(nmp, NULL, td);
523			return (error ? error : EINTR);
524		}
525		if ((bp->b_flags & B_CACHE) == 0) {
526		    bp->b_iocmd = BIO_READ;
527		    vfs_busy_pages(bp, 0);
528		    error = nfs_doio(vp, bp, cred, td);
529		    if (error) {
530			bp->b_ioflags |= BIO_ERROR;
531			brelse(bp);
532			return (error);
533		    }
534		}
535		n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
536		on = 0;
537		break;
538	    case VDIR:
539		nfsstats.biocache_readdirs++;
540		if (np->n_direofoffset
541		    && uio->uio_offset >= np->n_direofoffset) {
542		    return (0);
543		}
544		lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
545		on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
546		bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
547		if (!bp) {
548		    error = nfs_sigintr(nmp, NULL, td);
549		    return (error ? error : EINTR);
550		}
551		if ((bp->b_flags & B_CACHE) == 0) {
552		    bp->b_iocmd = BIO_READ;
553		    vfs_busy_pages(bp, 0);
554		    error = nfs_doio(vp, bp, cred, td);
555		    if (error) {
556			    brelse(bp);
557		    }
558		    while (error == NFSERR_BAD_COOKIE) {
559			(nmp->nm_rpcops->nr_invaldir)(vp);
560			error = nfs_vinvalbuf(vp, 0, td, 1);
561			/*
562			 * Yuck! The directory has been modified on the
563			 * server. The only way to get the block is by
564			 * reading from the beginning to get all the
565			 * offset cookies.
566			 *
567			 * Leave the last bp intact unless there is an error.
568			 * Loop back up to the while if the error is another
569			 * NFSERR_BAD_COOKIE (double yuch!).
570			 */
571			for (i = 0; i <= lbn && !error; i++) {
572			    if (np->n_direofoffset
573				&& (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
574				    return (0);
575			    bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
576			    if (!bp) {
577				error = nfs_sigintr(nmp, NULL, td);
578				return (error ? error : EINTR);
579			    }
580			    if ((bp->b_flags & B_CACHE) == 0) {
581				    bp->b_iocmd = BIO_READ;
582				    vfs_busy_pages(bp, 0);
583				    error = nfs_doio(vp, bp, cred, td);
584				    /*
585				     * no error + B_INVAL == directory EOF,
586				     * use the block.
587				     */
588				    if (error == 0 && (bp->b_flags & B_INVAL))
589					    break;
590			    }
591			    /*
592			     * An error will throw away the block and the
593			     * for loop will break out.  If no error and this
594			     * is not the block we want, we throw away the
595			     * block and go for the next one via the for loop.
596			     */
597			    if (error || i < lbn)
598				    brelse(bp);
599			}
600		    }
601		    /*
602		     * The above while is repeated if we hit another cookie
603		     * error.  If we hit an error and it wasn't a cookie error,
604		     * we give up.
605		     */
606		    if (error)
607			    return (error);
608		}
609
610		/*
611		 * If not eof and read aheads are enabled, start one.
612		 * (You need the current block first, so that you have the
613		 *  directory offset cookie of the next block.)
614		 */
615		if (nmp->nm_readahead > 0 &&
616		    (bp->b_flags & B_INVAL) == 0 &&
617		    (np->n_direofoffset == 0 ||
618		    (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
619		    incore(&vp->v_bufobj, lbn + 1) == NULL) {
620			rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
621			if (rabp) {
622			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
623				rabp->b_flags |= B_ASYNC;
624				rabp->b_iocmd = BIO_READ;
625				vfs_busy_pages(rabp, 0);
626				if (nfs_asyncio(nmp, rabp, cred, td)) {
627				    rabp->b_flags |= B_INVAL;
628				    rabp->b_ioflags |= BIO_ERROR;
629				    vfs_unbusy_pages(rabp);
630				    brelse(rabp);
631				}
632			    } else {
633				brelse(rabp);
634			    }
635			}
636		}
637		/*
638		 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
639		 * chopped for the EOF condition, we cannot tell how large
640		 * NFS directories are going to be until we hit EOF.  So
641		 * an NFS directory buffer is *not* chopped to its EOF.  Now,
642		 * it just so happens that b_resid will effectively chop it
643		 * to EOF.  *BUT* this information is lost if the buffer goes
644		 * away and is reconstituted into a B_CACHE state ( due to
645		 * being VMIO ) later.  So we keep track of the directory eof
646		 * in np->n_direofoffset and chop it off as an extra step
647		 * right here.
648		 */
649		n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
650		if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
651			n = np->n_direofoffset - uio->uio_offset;
652		break;
653	    default:
654		printf(" nfs_bioread: type %x unexpected\n", vp->v_type);
655		bp = NULL;
656		break;
657	    };
658
659	    if (n > 0) {
660		    error = uiomove(bp->b_data + on, (int)n, uio);
661	    }
662	    if (vp->v_type == VLNK)
663		n = 0;
664	    if (bp != NULL)
665		brelse(bp);
666	} while (error == 0 && uio->uio_resid > 0 && n > 0);
667	return (error);
668}
669
670/*
671 * The NFS write path cannot handle iovecs with len > 1. So we need to
672 * break up iovecs accordingly (restricting them to wsize).
673 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
674 * For the ASYNC case, 2 copies are needed. The first a copy from the
675 * user buffer to a staging buffer and then a second copy from the staging
676 * buffer to mbufs. This can be optimized by copying from the user buffer
677 * directly into mbufs and passing the chain down, but that requires a
678 * fair amount of re-working of the relevant codepaths (and can be done
679 * later).
680 */
681static int
682nfs_directio_write(vp, uiop, cred, ioflag)
683	struct vnode *vp;
684	struct uio *uiop;
685	struct ucred *cred;
686	int ioflag;
687{
688	int error;
689	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
690	struct thread *td = uiop->uio_td;
691	int size;
692
693	if (ioflag & IO_SYNC) {
694		int iomode, must_commit;
695		struct uio uio;
696		struct iovec iov;
697do_sync:
698		while (uiop->uio_resid > 0) {
699			size = min(uiop->uio_resid, nmp->nm_wsize);
700			size = min(uiop->uio_iov->iov_len, size);
701			iov.iov_base = uiop->uio_iov->iov_base;
702			iov.iov_len = size;
703			uio.uio_iov = &iov;
704			uio.uio_iovcnt = 1;
705			uio.uio_offset = uiop->uio_offset;
706			uio.uio_resid = size;
707			uio.uio_segflg = UIO_USERSPACE;
708			uio.uio_rw = UIO_WRITE;
709			uio.uio_td = td;
710			iomode = NFSV3WRITE_FILESYNC;
711			error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred,
712						      &iomode, &must_commit);
713			KASSERT((must_commit == 0),
714				("nfs_directio_write: Did not commit write"));
715			if (error)
716				return (error);
717			uiop->uio_offset += size;
718			uiop->uio_resid -= size;
719			if (uiop->uio_iov->iov_len <= size) {
720				uiop->uio_iovcnt--;
721				uiop->uio_iov++;
722			} else {
723				uiop->uio_iov->iov_base =
724					(char *)uiop->uio_iov->iov_base + size;
725				uiop->uio_iov->iov_len -= size;
726			}
727		}
728	} else {
729		struct uio *t_uio;
730		struct iovec *t_iov;
731		struct buf *bp;
732
733		/*
734		 * Break up the write into blocksize chunks and hand these
735		 * over to nfsiod's for write back.
736		 * Unfortunately, this incurs a copy of the data. Since
737		 * the user could modify the buffer before the write is
738		 * initiated.
739		 *
740		 * The obvious optimization here is that one of the 2 copies
741		 * in the async write path can be eliminated by copying the
742		 * data here directly into mbufs and passing the mbuf chain
743		 * down. But that will require a fair amount of re-working
744		 * of the code and can be done if there's enough interest
745		 * in NFS directio access.
746		 */
747		while (uiop->uio_resid > 0) {
748			size = min(uiop->uio_resid, nmp->nm_wsize);
749			size = min(uiop->uio_iov->iov_len, size);
750			bp = getpbuf(&nfs_pbuf_freecnt);
751			t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
752			t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
753			t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
754			t_iov->iov_len = size;
755			t_uio->uio_iov = t_iov;
756			t_uio->uio_iovcnt = 1;
757			t_uio->uio_offset = uiop->uio_offset;
758			t_uio->uio_resid = size;
759			t_uio->uio_segflg = UIO_SYSSPACE;
760			t_uio->uio_rw = UIO_WRITE;
761			t_uio->uio_td = td;
762			bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size);
763			bp->b_flags |= B_DIRECT;
764			bp->b_iocmd = BIO_WRITE;
765			if (cred != NOCRED) {
766				crhold(cred);
767				bp->b_wcred = cred;
768			} else
769				bp->b_wcred = NOCRED;
770			bp->b_caller1 = (void *)t_uio;
771			bp->b_vp = vp;
772			vhold(vp);
773			error = nfs_asyncio(nmp, bp, NOCRED, td);
774			if (error) {
775				free(t_iov->iov_base, M_NFSDIRECTIO);
776				free(t_iov, M_NFSDIRECTIO);
777				free(t_uio, M_NFSDIRECTIO);
778				vdrop(bp->b_vp);
779				bp->b_vp = NULL;
780				relpbuf(bp, &nfs_pbuf_freecnt);
781				if (error == EINTR)
782					return (error);
783				goto do_sync;
784			}
785			uiop->uio_offset += size;
786			uiop->uio_resid -= size;
787			if (uiop->uio_iov->iov_len <= size) {
788				uiop->uio_iovcnt--;
789				uiop->uio_iov++;
790			} else {
791				uiop->uio_iov->iov_base =
792					(char *)uiop->uio_iov->iov_base + size;
793				uiop->uio_iov->iov_len -= size;
794			}
795		}
796	}
797	return (0);
798}
799
800/*
801 * Vnode op for write using bio
802 */
803int
804nfs_write(struct vop_write_args *ap)
805{
806	int biosize;
807	struct uio *uio = ap->a_uio;
808	struct thread *td = uio->uio_td;
809	struct vnode *vp = ap->a_vp;
810	struct nfsnode *np = VTONFS(vp);
811	struct ucred *cred = ap->a_cred;
812	int ioflag = ap->a_ioflag;
813	struct buf *bp;
814	struct vattr vattr;
815	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
816	daddr_t lbn;
817	int bcount;
818	int n, on, error = 0;
819	struct proc *p = td?td->td_proc:NULL;
820
821	GIANT_REQUIRED;
822
823#ifdef DIAGNOSTIC
824	if (uio->uio_rw != UIO_WRITE)
825		panic("nfs_write mode");
826	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread)
827		panic("nfs_write proc");
828#endif
829	if (vp->v_type != VREG)
830		return (EIO);
831	if (np->n_flag & NWRITEERR) {
832		np->n_flag &= ~NWRITEERR;
833		return (np->n_error);
834	}
835	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
836	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
837		(void)nfs_fsinfo(nmp, vp, cred, td);
838
839	/*
840	 * Synchronously flush pending buffers if we are in synchronous
841	 * mode or if we are appending.
842	 */
843	if (ioflag & (IO_APPEND | IO_SYNC)) {
844		if (np->n_flag & NMODIFIED) {
845#ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
846			/*
847			 * Require non-blocking, synchronous writes to
848			 * dirty files to inform the program it needs
849			 * to fsync(2) explicitly.
850			 */
851			if (ioflag & IO_NDELAY)
852				return (EAGAIN);
853#endif
854flush_and_restart:
855			np->n_attrstamp = 0;
856			error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
857			if (error)
858				return (error);
859		}
860	}
861
862	/*
863	 * If IO_APPEND then load uio_offset.  We restart here if we cannot
864	 * get the append lock.
865	 */
866	if (ioflag & IO_APPEND) {
867		np->n_attrstamp = 0;
868		error = VOP_GETATTR(vp, &vattr, cred, td);
869		if (error)
870			return (error);
871		uio->uio_offset = np->n_size;
872	}
873
874	if (uio->uio_offset < 0)
875		return (EINVAL);
876	if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
877		return (EFBIG);
878	if (uio->uio_resid == 0)
879		return (0);
880
881	if (nfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
882		return nfs_directio_write(vp, uio, cred, ioflag);
883
884	/*
885	 * Maybe this should be above the vnode op call, but so long as
886	 * file servers have no limits, i don't think it matters
887	 */
888	if (p != NULL) {
889		PROC_LOCK(p);
890		if (uio->uio_offset + uio->uio_resid >
891		    lim_cur(p, RLIMIT_FSIZE)) {
892			psignal(p, SIGXFSZ);
893			PROC_UNLOCK(p);
894			return (EFBIG);
895		}
896		PROC_UNLOCK(p);
897	}
898
899	biosize = vp->v_mount->mnt_stat.f_iosize;
900	/*
901	 * Find all of this file's B_NEEDCOMMIT buffers.  If our writes
902	 * would exceed the local maximum per-file write commit size when
903	 * combined with those, we must decide whether to flush,
904	 * go synchronous, or return error.  We don't bother checking
905	 * IO_UNIT -- we just make all writes atomic anyway, as there's
906	 * no point optimizing for something that really won't ever happen.
907	 */
908	if (!(ioflag & IO_SYNC)) {
909		int needrestart = 0;
910		if (nmp->nm_wcommitsize < uio->uio_resid) {
911			/*
912			 * If this request could not possibly be completed
913			 * without exceeding the maximum outstanding write
914			 * commit size, see if we can convert it into a
915			 * synchronous write operation.
916			 */
917			if (ioflag & IO_NDELAY)
918				return (EAGAIN);
919			ioflag |= IO_SYNC;
920			if (np->n_flag & NMODIFIED)
921				needrestart = 1;
922		} else if (np->n_flag & NMODIFIED) {
923			int wouldcommit = 0;
924			BO_LOCK(&vp->v_bufobj);
925			if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
926				TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
927				    b_bobufs) {
928					if (bp->b_flags & B_NEEDCOMMIT)
929						wouldcommit += bp->b_bcount;
930				}
931			}
932			BO_UNLOCK(&vp->v_bufobj);
933			/*
934			 * Since we're not operating synchronously and
935			 * bypassing the buffer cache, we are in a commit
936			 * and holding all of these buffers whether
937			 * transmitted or not.  If not limited, this
938			 * will lead to the buffer cache deadlocking,
939			 * as no one else can flush our uncommitted buffers.
940			 */
941			wouldcommit += uio->uio_resid;
942			/*
943			 * If we would initially exceed the maximum
944			 * outstanding write commit size, flush and restart.
945			 */
946			if (wouldcommit > nmp->nm_wcommitsize)
947				needrestart = 1;
948		}
949		if (needrestart)
950			goto flush_and_restart;
951	}
952
953	do {
954		nfsstats.biocache_writes++;
955		lbn = uio->uio_offset / biosize;
956		on = uio->uio_offset & (biosize-1);
957		n = min((unsigned)(biosize - on), uio->uio_resid);
958again:
959		/*
960		 * Handle direct append and file extension cases, calculate
961		 * unaligned buffer size.
962		 */
963
964		if (uio->uio_offset == np->n_size && n) {
965			/*
966			 * Get the buffer (in its pre-append state to maintain
967			 * B_CACHE if it was previously set).  Resize the
968			 * nfsnode after we have locked the buffer to prevent
969			 * readers from reading garbage.
970			 */
971			bcount = on;
972			bp = nfs_getcacheblk(vp, lbn, bcount, td);
973
974			if (bp != NULL) {
975				long save;
976
977				np->n_size = uio->uio_offset + n;
978				np->n_flag |= NMODIFIED;
979				vnode_pager_setsize(vp, np->n_size);
980
981				save = bp->b_flags & B_CACHE;
982				bcount += n;
983				allocbuf(bp, bcount);
984				bp->b_flags |= save;
985			}
986		} else {
987			/*
988			 * Obtain the locked cache block first, and then
989			 * adjust the file's size as appropriate.
990			 */
991			bcount = on + n;
992			if ((off_t)lbn * biosize + bcount < np->n_size) {
993				if ((off_t)(lbn + 1) * biosize < np->n_size)
994					bcount = biosize;
995				else
996					bcount = np->n_size - (off_t)lbn * biosize;
997			}
998			bp = nfs_getcacheblk(vp, lbn, bcount, td);
999			if (uio->uio_offset + n > np->n_size) {
1000				np->n_size = uio->uio_offset + n;
1001				np->n_flag |= NMODIFIED;
1002				vnode_pager_setsize(vp, np->n_size);
1003			}
1004		}
1005
1006		if (!bp) {
1007			error = nfs_sigintr(nmp, NULL, td);
1008			if (!error)
1009				error = EINTR;
1010			break;
1011		}
1012
1013		/*
1014		 * Issue a READ if B_CACHE is not set.  In special-append
1015		 * mode, B_CACHE is based on the buffer prior to the write
1016		 * op and is typically set, avoiding the read.  If a read
1017		 * is required in special append mode, the server will
1018		 * probably send us a short-read since we extended the file
1019		 * on our end, resulting in b_resid == 0 and, thusly,
1020		 * B_CACHE getting set.
1021		 *
1022		 * We can also avoid issuing the read if the write covers
1023		 * the entire buffer.  We have to make sure the buffer state
1024		 * is reasonable in this case since we will not be initiating
1025		 * I/O.  See the comments in kern/vfs_bio.c's getblk() for
1026		 * more information.
1027		 *
1028		 * B_CACHE may also be set due to the buffer being cached
1029		 * normally.
1030		 */
1031
1032		if (on == 0 && n == bcount) {
1033			bp->b_flags |= B_CACHE;
1034			bp->b_flags &= ~B_INVAL;
1035			bp->b_ioflags &= ~BIO_ERROR;
1036		}
1037
1038		if ((bp->b_flags & B_CACHE) == 0) {
1039			bp->b_iocmd = BIO_READ;
1040			vfs_busy_pages(bp, 0);
1041			error = nfs_doio(vp, bp, cred, td);
1042			if (error) {
1043				brelse(bp);
1044				break;
1045			}
1046		}
1047		if (bp->b_wcred == NOCRED)
1048			bp->b_wcred = crhold(cred);
1049		np->n_flag |= NMODIFIED;
1050
1051		/*
1052		 * If dirtyend exceeds file size, chop it down.  This should
1053		 * not normally occur but there is an append race where it
1054		 * might occur XXX, so we log it.
1055		 *
1056		 * If the chopping creates a reverse-indexed or degenerate
1057		 * situation with dirtyoff/end, we 0 both of them.
1058		 */
1059
1060		if (bp->b_dirtyend > bcount) {
1061			printf("NFS append race @%lx:%d\n",
1062			    (long)bp->b_blkno * DEV_BSIZE,
1063			    bp->b_dirtyend - bcount);
1064			bp->b_dirtyend = bcount;
1065		}
1066
1067		if (bp->b_dirtyoff >= bp->b_dirtyend)
1068			bp->b_dirtyoff = bp->b_dirtyend = 0;
1069
1070		/*
1071		 * If the new write will leave a contiguous dirty
1072		 * area, just update the b_dirtyoff and b_dirtyend,
1073		 * otherwise force a write rpc of the old dirty area.
1074		 *
1075		 * While it is possible to merge discontiguous writes due to
1076		 * our having a B_CACHE buffer ( and thus valid read data
1077		 * for the hole), we don't because it could lead to
1078		 * significant cache coherency problems with multiple clients,
1079		 * especially if locking is implemented later on.
1080		 *
1081		 * as an optimization we could theoretically maintain
1082		 * a linked list of discontinuous areas, but we would still
1083		 * have to commit them separately so there isn't much
1084		 * advantage to it except perhaps a bit of asynchronization.
1085		 */
1086
1087		if (bp->b_dirtyend > 0 &&
1088		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1089			if (bwrite(bp) == EINTR) {
1090				error = EINTR;
1091				break;
1092			}
1093			goto again;
1094		}
1095
1096		error = uiomove((char *)bp->b_data + on, n, uio);
1097
1098		/*
1099		 * Since this block is being modified, it must be written
1100		 * again and not just committed.  Since write clustering does
1101		 * not work for the stage 1 data write, only the stage 2
1102		 * commit rpc, we have to clear B_CLUSTEROK as well.
1103		 */
1104		bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1105
1106		if (error) {
1107			bp->b_ioflags |= BIO_ERROR;
1108			brelse(bp);
1109			break;
1110		}
1111
1112		/*
1113		 * Only update dirtyoff/dirtyend if not a degenerate
1114		 * condition.
1115		 */
1116		if (n) {
1117			if (bp->b_dirtyend > 0) {
1118				bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1119				bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1120			} else {
1121				bp->b_dirtyoff = on;
1122				bp->b_dirtyend = on + n;
1123			}
1124			vfs_bio_set_validclean(bp, on, n);
1125		}
1126
1127		/*
1128		 * If IO_SYNC do bwrite().
1129		 *
1130		 * IO_INVAL appears to be unused.  The idea appears to be
1131		 * to turn off caching in this case.  Very odd.  XXX
1132		 */
1133		if ((ioflag & IO_SYNC)) {
1134			if (ioflag & IO_INVAL)
1135				bp->b_flags |= B_NOCACHE;
1136			error = bwrite(bp);
1137			if (error)
1138				break;
1139		} else if ((n + on) == biosize) {
1140			bp->b_flags |= B_ASYNC;
1141			(void) (nmp->nm_rpcops->nr_writebp)(bp, 0, 0);
1142		} else {
1143			bdwrite(bp);
1144		}
1145	} while (uio->uio_resid > 0 && n > 0);
1146
1147	return (error);
1148}
1149
1150/*
1151 * Get an nfs cache block.
1152 *
1153 * Allocate a new one if the block isn't currently in the cache
1154 * and return the block marked busy. If the calling process is
1155 * interrupted by a signal for an interruptible mount point, return
1156 * NULL.
1157 *
1158 * The caller must carefully deal with the possible B_INVAL state of
1159 * the buffer.  nfs_doio() clears B_INVAL (and nfs_asyncio() clears it
1160 * indirectly), so synchronous reads can be issued without worrying about
1161 * the B_INVAL state.  We have to be a little more careful when dealing
1162 * with writes (see comments in nfs_write()) when extending a file past
1163 * its EOF.
1164 */
1165static struct buf *
1166nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1167{
1168	struct buf *bp;
1169	struct mount *mp;
1170	struct nfsmount *nmp;
1171
1172	mp = vp->v_mount;
1173	nmp = VFSTONFS(mp);
1174
1175	if (nmp->nm_flag & NFSMNT_INT) {
1176 		sigset_t oldset;
1177
1178 		nfs_set_sigmask(td, &oldset);
1179		bp = getblk(vp, bn, size, PCATCH, 0, 0);
1180 		nfs_restore_sigmask(td, &oldset);
1181		while (bp == NULL) {
1182			if (nfs_sigintr(nmp, NULL, td))
1183				return (NULL);
1184			bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1185		}
1186	} else {
1187		bp = getblk(vp, bn, size, 0, 0, 0);
1188	}
1189
1190	if (vp->v_type == VREG) {
1191		int biosize;
1192
1193		biosize = mp->mnt_stat.f_iosize;
1194		bp->b_blkno = bn * (biosize / DEV_BSIZE);
1195	}
1196	return (bp);
1197}
1198
1199/*
1200 * Flush and invalidate all dirty buffers. If another process is already
1201 * doing the flush, just wait for completion.
1202 */
1203int
1204nfs_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1205{
1206	struct nfsnode *np = VTONFS(vp);
1207	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1208	int error = 0, slpflag, slptimeo;
1209 	int old_lock = 0;
1210
1211	ASSERT_VOP_LOCKED(vp, "nfs_vinvalbuf");
1212
1213	/*
1214	 * XXX This check stops us from needlessly doing a vinvalbuf when
1215	 * being called through vclean().  It is not clear that this is
1216	 * unsafe.
1217	 */
1218	if (vp->v_iflag & VI_DOOMED)
1219		return (0);
1220
1221	if ((nmp->nm_flag & NFSMNT_INT) == 0)
1222		intrflg = 0;
1223	if (intrflg) {
1224		slpflag = PCATCH;
1225		slptimeo = 2 * hz;
1226	} else {
1227		slpflag = 0;
1228		slptimeo = 0;
1229	}
1230
1231 	if ((old_lock = VOP_ISLOCKED(vp, td)) != LK_EXCLUSIVE) {
1232 		if (old_lock == LK_SHARED) {
1233 			/* Upgrade to exclusive lock, this might block */
1234 			vn_lock(vp, LK_UPGRADE | LK_RETRY, td);
1235 		} else {
1236 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1237 		}
1238  	}
1239
1240	/*
1241	 * Now, flush as required.
1242	 */
1243	error = vinvalbuf(vp, flags, td, slpflag, 0);
1244	while (error) {
1245		if (intrflg && (error = nfs_sigintr(nmp, NULL, td)))
1246			goto out;
1247		error = vinvalbuf(vp, flags, td, 0, slptimeo);
1248	}
1249	np->n_flag &= ~NMODIFIED;
1250out:
1251 	if (old_lock != LK_EXCLUSIVE) {
1252 		if (old_lock == LK_SHARED) {
1253 			/* Downgrade from exclusive lock, this might block */
1254 			vn_lock(vp, LK_DOWNGRADE, td);
1255 		} else {
1256 			VOP_UNLOCK(vp, 0, td);
1257 		}
1258  	}
1259	return error;
1260}
1261
1262/*
1263 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1264 * This is mainly to avoid queueing async I/O requests when the nfsiods
1265 * are all hung on a dead server.
1266 *
1267 * Note: nfs_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1268 * is eventually dequeued by the async daemon, nfs_doio() *will*.
1269 */
1270int
1271nfs_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1272{
1273	int iod;
1274	int gotiod;
1275	int slpflag = 0;
1276	int slptimeo = 0;
1277	int error, error2;
1278
1279	/*
1280	 * Commits are usually short and sweet so lets save some cpu and
1281	 * leave the async daemons for more important rpc's (such as reads
1282	 * and writes).
1283	 */
1284	if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1285	    (nmp->nm_bufqiods > nfs_numasync / 2)) {
1286		return(EIO);
1287	}
1288
1289again:
1290	if (nmp->nm_flag & NFSMNT_INT)
1291		slpflag = PCATCH;
1292	gotiod = FALSE;
1293
1294	/*
1295	 * Find a free iod to process this request.
1296	 */
1297	for (iod = 0; iod < nfs_numasync; iod++)
1298		if (nfs_iodwant[iod]) {
1299			gotiod = TRUE;
1300			break;
1301		}
1302
1303	/*
1304	 * Try to create one if none are free.
1305	 */
1306	if (!gotiod) {
1307		iod = nfs_nfsiodnew();
1308		if (iod != -1)
1309			gotiod = TRUE;
1310	}
1311
1312	if (gotiod) {
1313		/*
1314		 * Found one, so wake it up and tell it which
1315		 * mount to process.
1316		 */
1317		NFS_DPF(ASYNCIO, ("nfs_asyncio: waking iod %d for mount %p\n",
1318		    iod, nmp));
1319		nfs_iodwant[iod] = NULL;
1320		nfs_iodmount[iod] = nmp;
1321		nmp->nm_bufqiods++;
1322		wakeup(&nfs_iodwant[iod]);
1323	}
1324
1325	/*
1326	 * If none are free, we may already have an iod working on this mount
1327	 * point.  If so, it will process our request.
1328	 */
1329	if (!gotiod) {
1330		if (nmp->nm_bufqiods > 0) {
1331			NFS_DPF(ASYNCIO,
1332				("nfs_asyncio: %d iods are already processing mount %p\n",
1333				 nmp->nm_bufqiods, nmp));
1334			gotiod = TRUE;
1335		}
1336	}
1337
1338	/*
1339	 * If we have an iod which can process the request, then queue
1340	 * the buffer.
1341	 */
1342	if (gotiod) {
1343		/*
1344		 * Ensure that the queue never grows too large.  We still want
1345		 * to asynchronize so we block rather then return EIO.
1346		 */
1347		while (nmp->nm_bufqlen >= 2*nfs_numasync) {
1348			NFS_DPF(ASYNCIO,
1349				("nfs_asyncio: waiting for mount %p queue to drain\n", nmp));
1350			nmp->nm_bufqwant = TRUE;
1351 			error = nfs_tsleep(td, &nmp->nm_bufq, slpflag | PRIBIO,
1352 					   "nfsaio", slptimeo);
1353			if (error) {
1354				error2 = nfs_sigintr(nmp, NULL, td);
1355				if (error2)
1356					return (error2);
1357				if (slpflag == PCATCH) {
1358					slpflag = 0;
1359					slptimeo = 2 * hz;
1360				}
1361			}
1362			/*
1363			 * We might have lost our iod while sleeping,
1364			 * so check and loop if nescessary.
1365			 */
1366			if (nmp->nm_bufqiods == 0) {
1367				NFS_DPF(ASYNCIO,
1368					("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1369				goto again;
1370			}
1371		}
1372
1373		if (bp->b_iocmd == BIO_READ) {
1374			if (bp->b_rcred == NOCRED && cred != NOCRED)
1375				bp->b_rcred = crhold(cred);
1376		} else {
1377			if (bp->b_wcred == NOCRED && cred != NOCRED)
1378				bp->b_wcred = crhold(cred);
1379		}
1380
1381		if (bp->b_flags & B_REMFREE)
1382			bremfreef(bp);
1383		BUF_KERNPROC(bp);
1384		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1385		nmp->nm_bufqlen++;
1386		return (0);
1387	}
1388
1389	/*
1390	 * All the iods are busy on other mounts, so return EIO to
1391	 * force the caller to process the i/o synchronously.
1392	 */
1393	NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n"));
1394	return (EIO);
1395}
1396
1397void
1398nfs_doio_directwrite(struct buf *bp)
1399{
1400	int iomode, must_commit;
1401	struct uio *uiop = (struct uio *)bp->b_caller1;
1402	char *iov_base = uiop->uio_iov->iov_base;
1403	struct nfsmount *nmp = VFSTONFS(bp->b_vp->v_mount);
1404
1405	iomode = NFSV3WRITE_FILESYNC;
1406	uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1407	(nmp->nm_rpcops->nr_writerpc)(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit);
1408	KASSERT((must_commit == 0), ("nfs_doio_directwrite: Did not commit write"));
1409	free(iov_base, M_NFSDIRECTIO);
1410	free(uiop->uio_iov, M_NFSDIRECTIO);
1411	free(uiop, M_NFSDIRECTIO);
1412	vdrop(bp->b_vp);
1413	bp->b_vp = NULL;
1414	relpbuf(bp, &nfs_pbuf_freecnt);
1415}
1416
1417/*
1418 * Do an I/O operation to/from a cache block. This may be called
1419 * synchronously or from an nfsiod.
1420 */
1421int
1422nfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
1423{
1424	struct uio *uiop;
1425	struct nfsnode *np;
1426	struct nfsmount *nmp;
1427	int error = 0, iomode, must_commit = 0;
1428	struct uio uio;
1429	struct iovec io;
1430	struct proc *p = td ? td->td_proc : NULL;
1431
1432	np = VTONFS(vp);
1433	nmp = VFSTONFS(vp->v_mount);
1434	uiop = &uio;
1435	uiop->uio_iov = &io;
1436	uiop->uio_iovcnt = 1;
1437	uiop->uio_segflg = UIO_SYSSPACE;
1438	uiop->uio_td = td;
1439
1440	/*
1441	 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O.  We
1442	 * do this here so we do not have to do it in all the code that
1443	 * calls us.
1444	 */
1445	bp->b_flags &= ~B_INVAL;
1446	bp->b_ioflags &= ~BIO_ERROR;
1447
1448	KASSERT(!(bp->b_flags & B_DONE), ("nfs_doio: bp %p already marked done", bp));
1449
1450	if (bp->b_iocmd == BIO_READ) {
1451	    io.iov_len = uiop->uio_resid = bp->b_bcount;
1452	    io.iov_base = bp->b_data;
1453	    uiop->uio_rw = UIO_READ;
1454
1455	    switch (vp->v_type) {
1456	    case VREG:
1457		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1458		nfsstats.read_bios++;
1459		error = (nmp->nm_rpcops->nr_readrpc)(vp, uiop, cr);
1460
1461		if (!error) {
1462		    if (uiop->uio_resid) {
1463			/*
1464			 * If we had a short read with no error, we must have
1465			 * hit a file hole.  We should zero-fill the remainder.
1466			 * This can also occur if the server hits the file EOF.
1467			 *
1468			 * Holes used to be able to occur due to pending
1469			 * writes, but that is not possible any longer.
1470			 */
1471			int nread = bp->b_bcount - uiop->uio_resid;
1472			int left  = uiop->uio_resid;
1473
1474			if (left > 0)
1475				bzero((char *)bp->b_data + nread, left);
1476			uiop->uio_resid = 0;
1477		    }
1478		}
1479		/* ASSERT_VOP_LOCKED(vp, "nfs_doio"); */
1480		if (p && (vp->v_vflag & VV_TEXT) &&
1481		    (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.va_mtime))) {
1482			PROC_LOCK(p);
1483			killproc(p, "text file modification");
1484			PROC_UNLOCK(p);
1485		}
1486		break;
1487	    case VLNK:
1488		uiop->uio_offset = (off_t)0;
1489		nfsstats.readlink_bios++;
1490		error = (nmp->nm_rpcops->nr_readlinkrpc)(vp, uiop, cr);
1491		break;
1492	    case VDIR:
1493		nfsstats.readdir_bios++;
1494		uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1495		if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
1496			error = nfs4_readdirrpc(vp, uiop, cr);
1497		else {
1498			if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1499				error = nfs_readdirplusrpc(vp, uiop, cr);
1500				if (error == NFSERR_NOTSUPP)
1501					nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1502			}
1503			if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1504				error = nfs_readdirrpc(vp, uiop, cr);
1505		}
1506		/*
1507		 * end-of-directory sets B_INVAL but does not generate an
1508		 * error.
1509		 */
1510		if (error == 0 && uiop->uio_resid == bp->b_bcount)
1511			bp->b_flags |= B_INVAL;
1512		break;
1513	    default:
1514		printf("nfs_doio:  type %x unexpected\n", vp->v_type);
1515		break;
1516	    };
1517	    if (error) {
1518		bp->b_ioflags |= BIO_ERROR;
1519		bp->b_error = error;
1520	    }
1521	} else {
1522	    /*
1523	     * If we only need to commit, try to commit
1524	     */
1525	    if (bp->b_flags & B_NEEDCOMMIT) {
1526		    int retv;
1527		    off_t off;
1528
1529		    off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1530		    retv = (nmp->nm_rpcops->nr_commit)(
1531				vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1532				bp->b_wcred, td);
1533		    if (retv == 0) {
1534			    bp->b_dirtyoff = bp->b_dirtyend = 0;
1535			    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1536			    bp->b_resid = 0;
1537			    bufdone(bp);
1538			    return (0);
1539		    }
1540		    if (retv == NFSERR_STALEWRITEVERF) {
1541			    nfs_clearcommit(vp->v_mount);
1542		    }
1543	    }
1544
1545	    /*
1546	     * Setup for actual write
1547	     */
1548
1549	    if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1550		bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1551
1552	    if (bp->b_dirtyend > bp->b_dirtyoff) {
1553		io.iov_len = uiop->uio_resid = bp->b_dirtyend
1554		    - bp->b_dirtyoff;
1555		uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1556		    + bp->b_dirtyoff;
1557		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1558		uiop->uio_rw = UIO_WRITE;
1559		nfsstats.write_bios++;
1560
1561		if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1562		    iomode = NFSV3WRITE_UNSTABLE;
1563		else
1564		    iomode = NFSV3WRITE_FILESYNC;
1565
1566		error = (nmp->nm_rpcops->nr_writerpc)(vp, uiop, cr, &iomode, &must_commit);
1567
1568		/*
1569		 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1570		 * to cluster the buffers needing commit.  This will allow
1571		 * the system to submit a single commit rpc for the whole
1572		 * cluster.  We can do this even if the buffer is not 100%
1573		 * dirty (relative to the NFS blocksize), so we optimize the
1574		 * append-to-file-case.
1575		 *
1576		 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1577		 * cleared because write clustering only works for commit
1578		 * rpc's, not for the data portion of the write).
1579		 */
1580
1581		if (!error && iomode == NFSV3WRITE_UNSTABLE) {
1582		    bp->b_flags |= B_NEEDCOMMIT;
1583		    if (bp->b_dirtyoff == 0
1584			&& bp->b_dirtyend == bp->b_bcount)
1585			bp->b_flags |= B_CLUSTEROK;
1586		} else {
1587		    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1588		}
1589
1590		/*
1591		 * For an interrupted write, the buffer is still valid
1592		 * and the write hasn't been pushed to the server yet,
1593		 * so we can't set BIO_ERROR and report the interruption
1594		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1595		 * is not relevant, so the rpc attempt is essentially
1596		 * a noop.  For the case of a V3 write rpc not being
1597		 * committed to stable storage, the block is still
1598		 * dirty and requires either a commit rpc or another
1599		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1600		 * the block is reused. This is indicated by setting
1601		 * the B_DELWRI and B_NEEDCOMMIT flags.
1602		 *
1603		 * If the buffer is marked B_PAGING, it does not reside on
1604		 * the vp's paging queues so we cannot call bdirty().  The
1605		 * bp in this case is not an NFS cache block so we should
1606		 * be safe. XXX
1607		 */
1608    		if (error == EINTR || error == EIO || error == ETIMEDOUT
1609		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1610			int s;
1611
1612			s = splbio();
1613			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1614			if ((bp->b_flags & B_PAGING) == 0) {
1615			    bdirty(bp);
1616			    bp->b_flags &= ~B_DONE;
1617			}
1618			if (error && (bp->b_flags & B_ASYNC) == 0)
1619			    bp->b_flags |= B_EINTR;
1620			splx(s);
1621	    	} else {
1622		    if (error) {
1623			bp->b_ioflags |= BIO_ERROR;
1624			bp->b_error = np->n_error = error;
1625			np->n_flag |= NWRITEERR;
1626		    }
1627		    bp->b_dirtyoff = bp->b_dirtyend = 0;
1628		}
1629	    } else {
1630		bp->b_resid = 0;
1631		bufdone(bp);
1632		return (0);
1633	    }
1634	}
1635	bp->b_resid = uiop->uio_resid;
1636	if (must_commit)
1637	    nfs_clearcommit(vp->v_mount);
1638	bufdone(bp);
1639	return (error);
1640}
1641
1642/*
1643 * Used to aid in handling ftruncate() operations on the NFS client side.
1644 * Truncation creates a number of special problems for NFS.  We have to
1645 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1646 * we have to properly handle VM pages or (potentially dirty) buffers
1647 * that straddle the truncation point.
1648 */
1649
1650int
1651nfs_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1652{
1653	struct nfsnode *np = VTONFS(vp);
1654	u_quad_t tsize = np->n_size;
1655	int biosize = vp->v_mount->mnt_stat.f_iosize;
1656	int error = 0;
1657
1658	np->n_size = nsize;
1659
1660	if (np->n_size < tsize) {
1661		struct buf *bp;
1662		daddr_t lbn;
1663		int bufsize;
1664
1665		/*
1666		 * vtruncbuf() doesn't get the buffer overlapping the
1667		 * truncation point.  We may have a B_DELWRI and/or B_CACHE
1668		 * buffer that now needs to be truncated.
1669		 */
1670		error = vtruncbuf(vp, cred, td, nsize, biosize);
1671		lbn = nsize / biosize;
1672		bufsize = nsize & (biosize - 1);
1673		bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1674 		if (!bp)
1675 			return EINTR;
1676		if (bp->b_dirtyoff > bp->b_bcount)
1677			bp->b_dirtyoff = bp->b_bcount;
1678		if (bp->b_dirtyend > bp->b_bcount)
1679			bp->b_dirtyend = bp->b_bcount;
1680		bp->b_flags |= B_RELBUF;  /* don't leave garbage around */
1681		brelse(bp);
1682	} else {
1683		vnode_pager_setsize(vp, nsize);
1684	}
1685	return(error);
1686}
1687
1688