nfs_bio.c revision 230605
1/*-
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/nfsclient/nfs_bio.c 230605 2012-01-27 02:46:12Z rmacklem $");
37
38#include "opt_kdtrace.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/bio.h>
43#include <sys/buf.h>
44#include <sys/kernel.h>
45#include <sys/mbuf.h>
46#include <sys/mount.h>
47#include <sys/proc.h>
48#include <sys/vmmeter.h>
49#include <sys/vnode.h>
50
51#include <vm/vm.h>
52#include <vm/vm_extern.h>
53#include <vm/vm_page.h>
54#include <vm/vm_object.h>
55#include <vm/vm_pager.h>
56#include <vm/vnode_pager.h>
57
58#include <nfs/nfsproto.h>
59#include <nfsclient/nfs.h>
60#include <nfsclient/nfsmount.h>
61#include <nfsclient/nfsnode.h>
62#include <nfs/nfs_kdtrace.h>
63
64static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
65		    struct thread *td);
66static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
67			      struct ucred *cred, int ioflag);
68
69extern int nfs_directio_enable;
70extern int nfs_directio_allow_mmap;
71
72/*
73 * Vnode op for VM getpages.
74 */
75int
76nfs_getpages(struct vop_getpages_args *ap)
77{
78	int i, error, nextoff, size, toff, count, npages;
79	struct uio uio;
80	struct iovec iov;
81	vm_offset_t kva;
82	struct buf *bp;
83	struct vnode *vp;
84	struct thread *td;
85	struct ucred *cred;
86	struct nfsmount *nmp;
87	vm_object_t object;
88	vm_page_t *pages;
89	struct nfsnode *np;
90
91	vp = ap->a_vp;
92	np = VTONFS(vp);
93	td = curthread;				/* XXX */
94	cred = curthread->td_ucred;		/* XXX */
95	nmp = VFSTONFS(vp->v_mount);
96	pages = ap->a_m;
97	count = ap->a_count;
98
99	if ((object = vp->v_object) == NULL) {
100		nfs_printf("nfs_getpages: called with non-merged cache vnode??\n");
101		return (VM_PAGER_ERROR);
102	}
103
104	if (nfs_directio_enable && !nfs_directio_allow_mmap) {
105		mtx_lock(&np->n_mtx);
106		if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
107			mtx_unlock(&np->n_mtx);
108			nfs_printf("nfs_getpages: called on non-cacheable vnode??\n");
109			return (VM_PAGER_ERROR);
110		} else
111			mtx_unlock(&np->n_mtx);
112	}
113
114	mtx_lock(&nmp->nm_mtx);
115	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
116	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
117		mtx_unlock(&nmp->nm_mtx);
118		/* We'll never get here for v4, because we always have fsinfo */
119		(void)nfs_fsinfo(nmp, vp, cred, td);
120	} else
121		mtx_unlock(&nmp->nm_mtx);
122
123	npages = btoc(count);
124
125	/*
126	 * If the requested page is partially valid, just return it and
127	 * allow the pager to zero-out the blanks.  Partially valid pages
128	 * can only occur at the file EOF.
129	 */
130	VM_OBJECT_LOCK(object);
131	if (pages[ap->a_reqpage]->valid != 0) {
132		for (i = 0; i < npages; ++i) {
133			if (i != ap->a_reqpage) {
134				vm_page_lock(pages[i]);
135				vm_page_free(pages[i]);
136				vm_page_unlock(pages[i]);
137			}
138		}
139		VM_OBJECT_UNLOCK(object);
140		return (0);
141	}
142	VM_OBJECT_UNLOCK(object);
143
144	/*
145	 * We use only the kva address for the buffer, but this is extremely
146	 * convienient and fast.
147	 */
148	bp = getpbuf(&nfs_pbuf_freecnt);
149
150	kva = (vm_offset_t) bp->b_data;
151	pmap_qenter(kva, pages, npages);
152	PCPU_INC(cnt.v_vnodein);
153	PCPU_ADD(cnt.v_vnodepgsin, npages);
154
155	iov.iov_base = (caddr_t) kva;
156	iov.iov_len = count;
157	uio.uio_iov = &iov;
158	uio.uio_iovcnt = 1;
159	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
160	uio.uio_resid = count;
161	uio.uio_segflg = UIO_SYSSPACE;
162	uio.uio_rw = UIO_READ;
163	uio.uio_td = td;
164
165	error = (nmp->nm_rpcops->nr_readrpc)(vp, &uio, cred);
166	pmap_qremove(kva, npages);
167
168	relpbuf(bp, &nfs_pbuf_freecnt);
169
170	if (error && (uio.uio_resid == count)) {
171		nfs_printf("nfs_getpages: error %d\n", error);
172		VM_OBJECT_LOCK(object);
173		for (i = 0; i < npages; ++i) {
174			if (i != ap->a_reqpage) {
175				vm_page_lock(pages[i]);
176				vm_page_free(pages[i]);
177				vm_page_unlock(pages[i]);
178			}
179		}
180		VM_OBJECT_UNLOCK(object);
181		return (VM_PAGER_ERROR);
182	}
183
184	/*
185	 * Calculate the number of bytes read and validate only that number
186	 * of bytes.  Note that due to pending writes, size may be 0.  This
187	 * does not mean that the remaining data is invalid!
188	 */
189
190	size = count - uio.uio_resid;
191	VM_OBJECT_LOCK(object);
192	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
193		vm_page_t m;
194		nextoff = toff + PAGE_SIZE;
195		m = pages[i];
196
197		if (nextoff <= size) {
198			/*
199			 * Read operation filled an entire page
200			 */
201			m->valid = VM_PAGE_BITS_ALL;
202			KASSERT(m->dirty == 0,
203			    ("nfs_getpages: page %p is dirty", m));
204		} else if (size > toff) {
205			/*
206			 * Read operation filled a partial page.
207			 */
208			m->valid = 0;
209			vm_page_set_valid_range(m, 0, size - toff);
210			KASSERT(m->dirty == 0,
211			    ("nfs_getpages: page %p is dirty", m));
212		} else {
213			/*
214			 * Read operation was short.  If no error occured
215			 * we may have hit a zero-fill section.   We simply
216			 * leave valid set to 0.
217			 */
218			;
219		}
220		if (i != ap->a_reqpage) {
221			/*
222			 * Whether or not to leave the page activated is up in
223			 * the air, but we should put the page on a page queue
224			 * somewhere (it already is in the object).  Result:
225			 * It appears that emperical results show that
226			 * deactivating pages is best.
227			 */
228
229			/*
230			 * Just in case someone was asking for this page we
231			 * now tell them that it is ok to use.
232			 */
233			if (!error) {
234				if (m->oflags & VPO_WANTED) {
235					vm_page_lock(m);
236					vm_page_activate(m);
237					vm_page_unlock(m);
238				} else {
239					vm_page_lock(m);
240					vm_page_deactivate(m);
241					vm_page_unlock(m);
242				}
243				vm_page_wakeup(m);
244			} else {
245				vm_page_lock(m);
246				vm_page_free(m);
247				vm_page_unlock(m);
248			}
249		}
250	}
251	VM_OBJECT_UNLOCK(object);
252	return (0);
253}
254
255/*
256 * Vnode op for VM putpages.
257 */
258int
259nfs_putpages(struct vop_putpages_args *ap)
260{
261	struct uio uio;
262	struct iovec iov;
263	vm_offset_t kva;
264	struct buf *bp;
265	int iomode, must_commit, i, error, npages, count;
266	off_t offset;
267	int *rtvals;
268	struct vnode *vp;
269	struct thread *td;
270	struct ucred *cred;
271	struct nfsmount *nmp;
272	struct nfsnode *np;
273	vm_page_t *pages;
274
275	vp = ap->a_vp;
276	np = VTONFS(vp);
277	td = curthread;				/* XXX */
278	cred = curthread->td_ucred;		/* XXX */
279	nmp = VFSTONFS(vp->v_mount);
280	pages = ap->a_m;
281	count = ap->a_count;
282	rtvals = ap->a_rtvals;
283	npages = btoc(count);
284	offset = IDX_TO_OFF(pages[0]->pindex);
285
286	mtx_lock(&nmp->nm_mtx);
287	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
288	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
289		mtx_unlock(&nmp->nm_mtx);
290		(void)nfs_fsinfo(nmp, vp, cred, td);
291	} else
292		mtx_unlock(&nmp->nm_mtx);
293
294	mtx_lock(&np->n_mtx);
295	if (nfs_directio_enable && !nfs_directio_allow_mmap &&
296	    (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
297		mtx_unlock(&np->n_mtx);
298		nfs_printf("nfs_putpages: called on noncache-able vnode??\n");
299		mtx_lock(&np->n_mtx);
300	}
301
302	for (i = 0; i < npages; i++)
303		rtvals[i] = VM_PAGER_ERROR;
304
305	/*
306	 * When putting pages, do not extend file past EOF.
307	 */
308	if (offset + count > np->n_size) {
309		count = np->n_size - offset;
310		if (count < 0)
311			count = 0;
312	}
313	mtx_unlock(&np->n_mtx);
314
315	/*
316	 * We use only the kva address for the buffer, but this is extremely
317	 * convienient and fast.
318	 */
319	bp = getpbuf(&nfs_pbuf_freecnt);
320
321	kva = (vm_offset_t) bp->b_data;
322	pmap_qenter(kva, pages, npages);
323	PCPU_INC(cnt.v_vnodeout);
324	PCPU_ADD(cnt.v_vnodepgsout, count);
325
326	iov.iov_base = (caddr_t) kva;
327	iov.iov_len = count;
328	uio.uio_iov = &iov;
329	uio.uio_iovcnt = 1;
330	uio.uio_offset = offset;
331	uio.uio_resid = count;
332	uio.uio_segflg = UIO_SYSSPACE;
333	uio.uio_rw = UIO_WRITE;
334	uio.uio_td = td;
335
336	if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
337	    iomode = NFSV3WRITE_UNSTABLE;
338	else
339	    iomode = NFSV3WRITE_FILESYNC;
340
341	error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred, &iomode, &must_commit);
342
343	pmap_qremove(kva, npages);
344	relpbuf(bp, &nfs_pbuf_freecnt);
345
346	if (!error) {
347		vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
348		if (must_commit) {
349			nfs_clearcommit(vp->v_mount);
350		}
351	}
352	return rtvals[0];
353}
354
355/*
356 * For nfs, cache consistency can only be maintained approximately.
357 * Although RFC1094 does not specify the criteria, the following is
358 * believed to be compatible with the reference port.
359 * For nfs:
360 * If the file's modify time on the server has changed since the
361 * last read rpc or you have written to the file,
362 * you may have lost data cache consistency with the
363 * server, so flush all of the file's data out of the cache.
364 * Then force a getattr rpc to ensure that you have up to date
365 * attributes.
366 * NB: This implies that cache data can be read when up to
367 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
368 * attributes this could be forced by setting n_attrstamp to 0 before
369 * the VOP_GETATTR() call.
370 */
371static inline int
372nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
373{
374	int error = 0;
375	struct vattr vattr;
376	struct nfsnode *np = VTONFS(vp);
377	int old_lock;
378	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
379
380	/*
381	 * Grab the exclusive lock before checking whether the cache is
382	 * consistent.
383	 * XXX - We can make this cheaper later (by acquiring cheaper locks).
384	 * But for now, this suffices.
385	 */
386	old_lock = nfs_upgrade_vnlock(vp);
387	if (vp->v_iflag & VI_DOOMED) {
388		nfs_downgrade_vnlock(vp, old_lock);
389		return (EBADF);
390	}
391
392	mtx_lock(&np->n_mtx);
393	if (np->n_flag & NMODIFIED) {
394		mtx_unlock(&np->n_mtx);
395		if (vp->v_type != VREG) {
396			if (vp->v_type != VDIR)
397				panic("nfs: bioread, not dir");
398			(nmp->nm_rpcops->nr_invaldir)(vp);
399			error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
400			if (error)
401				goto out;
402		}
403		np->n_attrstamp = 0;
404		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
405		error = VOP_GETATTR(vp, &vattr, cred);
406		if (error)
407			goto out;
408		mtx_lock(&np->n_mtx);
409		np->n_mtime = vattr.va_mtime;
410		mtx_unlock(&np->n_mtx);
411	} else {
412		mtx_unlock(&np->n_mtx);
413		error = VOP_GETATTR(vp, &vattr, cred);
414		if (error)
415			return (error);
416		mtx_lock(&np->n_mtx);
417		if ((np->n_flag & NSIZECHANGED)
418		    || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
419			mtx_unlock(&np->n_mtx);
420			if (vp->v_type == VDIR)
421				(nmp->nm_rpcops->nr_invaldir)(vp);
422			error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
423			if (error)
424				goto out;
425			mtx_lock(&np->n_mtx);
426			np->n_mtime = vattr.va_mtime;
427			np->n_flag &= ~NSIZECHANGED;
428		}
429		mtx_unlock(&np->n_mtx);
430	}
431out:
432	nfs_downgrade_vnlock(vp, old_lock);
433	return error;
434}
435
436/*
437 * Vnode op for read using bio
438 */
439int
440nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
441{
442	struct nfsnode *np = VTONFS(vp);
443	int biosize, i;
444	struct buf *bp, *rabp;
445	struct thread *td;
446	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
447	daddr_t lbn, rabn;
448	off_t end;
449	int bcount;
450	int seqcount;
451	int nra, error = 0, n = 0, on = 0;
452
453	KASSERT(uio->uio_rw == UIO_READ, ("nfs_read mode"));
454	if (uio->uio_resid == 0)
455		return (0);
456	if (uio->uio_offset < 0)	/* XXX VDIR cookies can be negative */
457		return (EINVAL);
458	td = uio->uio_td;
459
460	mtx_lock(&nmp->nm_mtx);
461	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
462	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
463		mtx_unlock(&nmp->nm_mtx);
464		(void)nfs_fsinfo(nmp, vp, cred, td);
465	} else
466		mtx_unlock(&nmp->nm_mtx);
467
468	end = uio->uio_offset + uio->uio_resid;
469	if (vp->v_type != VDIR &&
470	    (end > nmp->nm_maxfilesize || end < uio->uio_offset))
471		return (EFBIG);
472
473	if (nfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
474		/* No caching/ no readaheads. Just read data into the user buffer */
475		return nfs_readrpc(vp, uio, cred);
476
477	biosize = vp->v_bufobj.bo_bsize;
478	seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
479
480	error = nfs_bioread_check_cons(vp, td, cred);
481	if (error)
482		return error;
483
484	do {
485	    u_quad_t nsize;
486
487	    mtx_lock(&np->n_mtx);
488	    nsize = np->n_size;
489	    mtx_unlock(&np->n_mtx);
490
491	    switch (vp->v_type) {
492	    case VREG:
493		nfsstats.biocache_reads++;
494		lbn = uio->uio_offset / biosize;
495		on = uio->uio_offset & (biosize - 1);
496
497		/*
498		 * Start the read ahead(s), as required.
499		 */
500		if (nmp->nm_readahead > 0) {
501		    for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
502			(off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
503			rabn = lbn + 1 + nra;
504			if (incore(&vp->v_bufobj, rabn) == NULL) {
505			    rabp = nfs_getcacheblk(vp, rabn, biosize, td);
506			    if (!rabp) {
507				error = nfs_sigintr(nmp, td);
508				return (error ? error : EINTR);
509			    }
510			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
511				rabp->b_flags |= B_ASYNC;
512				rabp->b_iocmd = BIO_READ;
513				vfs_busy_pages(rabp, 0);
514				if (nfs_asyncio(nmp, rabp, cred, td)) {
515				    rabp->b_flags |= B_INVAL;
516				    rabp->b_ioflags |= BIO_ERROR;
517				    vfs_unbusy_pages(rabp);
518				    brelse(rabp);
519				    break;
520				}
521			    } else {
522				brelse(rabp);
523			    }
524			}
525		    }
526		}
527
528		/* Note that bcount is *not* DEV_BSIZE aligned. */
529		bcount = biosize;
530		if ((off_t)lbn * biosize >= nsize) {
531			bcount = 0;
532		} else if ((off_t)(lbn + 1) * biosize > nsize) {
533			bcount = nsize - (off_t)lbn * biosize;
534		}
535		bp = nfs_getcacheblk(vp, lbn, bcount, td);
536
537		if (!bp) {
538			error = nfs_sigintr(nmp, td);
539			return (error ? error : EINTR);
540		}
541
542		/*
543		 * If B_CACHE is not set, we must issue the read.  If this
544		 * fails, we return an error.
545		 */
546
547		if ((bp->b_flags & B_CACHE) == 0) {
548		    bp->b_iocmd = BIO_READ;
549		    vfs_busy_pages(bp, 0);
550		    error = nfs_doio(vp, bp, cred, td);
551		    if (error) {
552			brelse(bp);
553			return (error);
554		    }
555		}
556
557		/*
558		 * on is the offset into the current bp.  Figure out how many
559		 * bytes we can copy out of the bp.  Note that bcount is
560		 * NOT DEV_BSIZE aligned.
561		 *
562		 * Then figure out how many bytes we can copy into the uio.
563		 */
564
565		n = 0;
566		if (on < bcount)
567			n = min((unsigned)(bcount - on), uio->uio_resid);
568		break;
569	    case VLNK:
570		nfsstats.biocache_readlinks++;
571		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
572		if (!bp) {
573			error = nfs_sigintr(nmp, td);
574			return (error ? error : EINTR);
575		}
576		if ((bp->b_flags & B_CACHE) == 0) {
577		    bp->b_iocmd = BIO_READ;
578		    vfs_busy_pages(bp, 0);
579		    error = nfs_doio(vp, bp, cred, td);
580		    if (error) {
581			bp->b_ioflags |= BIO_ERROR;
582			brelse(bp);
583			return (error);
584		    }
585		}
586		n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
587		on = 0;
588		break;
589	    case VDIR:
590		nfsstats.biocache_readdirs++;
591		if (np->n_direofoffset
592		    && uio->uio_offset >= np->n_direofoffset) {
593		    return (0);
594		}
595		lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
596		on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
597		bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
598		if (!bp) {
599		    error = nfs_sigintr(nmp, td);
600		    return (error ? error : EINTR);
601		}
602		if ((bp->b_flags & B_CACHE) == 0) {
603		    bp->b_iocmd = BIO_READ;
604		    vfs_busy_pages(bp, 0);
605		    error = nfs_doio(vp, bp, cred, td);
606		    if (error) {
607			    brelse(bp);
608		    }
609		    while (error == NFSERR_BAD_COOKIE) {
610			(nmp->nm_rpcops->nr_invaldir)(vp);
611			error = nfs_vinvalbuf(vp, 0, td, 1);
612			/*
613			 * Yuck! The directory has been modified on the
614			 * server. The only way to get the block is by
615			 * reading from the beginning to get all the
616			 * offset cookies.
617			 *
618			 * Leave the last bp intact unless there is an error.
619			 * Loop back up to the while if the error is another
620			 * NFSERR_BAD_COOKIE (double yuch!).
621			 */
622			for (i = 0; i <= lbn && !error; i++) {
623			    if (np->n_direofoffset
624				&& (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
625				    return (0);
626			    bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
627			    if (!bp) {
628				error = nfs_sigintr(nmp, td);
629				return (error ? error : EINTR);
630			    }
631			    if ((bp->b_flags & B_CACHE) == 0) {
632				    bp->b_iocmd = BIO_READ;
633				    vfs_busy_pages(bp, 0);
634				    error = nfs_doio(vp, bp, cred, td);
635				    /*
636				     * no error + B_INVAL == directory EOF,
637				     * use the block.
638				     */
639				    if (error == 0 && (bp->b_flags & B_INVAL))
640					    break;
641			    }
642			    /*
643			     * An error will throw away the block and the
644			     * for loop will break out.  If no error and this
645			     * is not the block we want, we throw away the
646			     * block and go for the next one via the for loop.
647			     */
648			    if (error || i < lbn)
649				    brelse(bp);
650			}
651		    }
652		    /*
653		     * The above while is repeated if we hit another cookie
654		     * error.  If we hit an error and it wasn't a cookie error,
655		     * we give up.
656		     */
657		    if (error)
658			    return (error);
659		}
660
661		/*
662		 * If not eof and read aheads are enabled, start one.
663		 * (You need the current block first, so that you have the
664		 *  directory offset cookie of the next block.)
665		 */
666		if (nmp->nm_readahead > 0 &&
667		    (bp->b_flags & B_INVAL) == 0 &&
668		    (np->n_direofoffset == 0 ||
669		    (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
670		    incore(&vp->v_bufobj, lbn + 1) == NULL) {
671			rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
672			if (rabp) {
673			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
674				rabp->b_flags |= B_ASYNC;
675				rabp->b_iocmd = BIO_READ;
676				vfs_busy_pages(rabp, 0);
677				if (nfs_asyncio(nmp, rabp, cred, td)) {
678				    rabp->b_flags |= B_INVAL;
679				    rabp->b_ioflags |= BIO_ERROR;
680				    vfs_unbusy_pages(rabp);
681				    brelse(rabp);
682				}
683			    } else {
684				brelse(rabp);
685			    }
686			}
687		}
688		/*
689		 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
690		 * chopped for the EOF condition, we cannot tell how large
691		 * NFS directories are going to be until we hit EOF.  So
692		 * an NFS directory buffer is *not* chopped to its EOF.  Now,
693		 * it just so happens that b_resid will effectively chop it
694		 * to EOF.  *BUT* this information is lost if the buffer goes
695		 * away and is reconstituted into a B_CACHE state ( due to
696		 * being VMIO ) later.  So we keep track of the directory eof
697		 * in np->n_direofoffset and chop it off as an extra step
698		 * right here.
699		 */
700		n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
701		if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
702			n = np->n_direofoffset - uio->uio_offset;
703		break;
704	    default:
705		nfs_printf(" nfs_bioread: type %x unexpected\n", vp->v_type);
706		bp = NULL;
707		break;
708	    };
709
710	    if (n > 0) {
711		    error = uiomove(bp->b_data + on, (int)n, uio);
712	    }
713	    if (vp->v_type == VLNK)
714		n = 0;
715	    if (bp != NULL)
716		brelse(bp);
717	} while (error == 0 && uio->uio_resid > 0 && n > 0);
718	return (error);
719}
720
721/*
722 * The NFS write path cannot handle iovecs with len > 1. So we need to
723 * break up iovecs accordingly (restricting them to wsize).
724 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
725 * For the ASYNC case, 2 copies are needed. The first a copy from the
726 * user buffer to a staging buffer and then a second copy from the staging
727 * buffer to mbufs. This can be optimized by copying from the user buffer
728 * directly into mbufs and passing the chain down, but that requires a
729 * fair amount of re-working of the relevant codepaths (and can be done
730 * later).
731 */
732static int
733nfs_directio_write(vp, uiop, cred, ioflag)
734	struct vnode *vp;
735	struct uio *uiop;
736	struct ucred *cred;
737	int ioflag;
738{
739	int error;
740	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
741	struct thread *td = uiop->uio_td;
742	int size;
743	int wsize;
744
745	mtx_lock(&nmp->nm_mtx);
746	wsize = nmp->nm_wsize;
747	mtx_unlock(&nmp->nm_mtx);
748	if (ioflag & IO_SYNC) {
749		int iomode, must_commit;
750		struct uio uio;
751		struct iovec iov;
752do_sync:
753		while (uiop->uio_resid > 0) {
754			size = min(uiop->uio_resid, wsize);
755			size = min(uiop->uio_iov->iov_len, size);
756			iov.iov_base = uiop->uio_iov->iov_base;
757			iov.iov_len = size;
758			uio.uio_iov = &iov;
759			uio.uio_iovcnt = 1;
760			uio.uio_offset = uiop->uio_offset;
761			uio.uio_resid = size;
762			uio.uio_segflg = UIO_USERSPACE;
763			uio.uio_rw = UIO_WRITE;
764			uio.uio_td = td;
765			iomode = NFSV3WRITE_FILESYNC;
766			error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred,
767						      &iomode, &must_commit);
768			KASSERT((must_commit == 0),
769				("nfs_directio_write: Did not commit write"));
770			if (error)
771				return (error);
772			uiop->uio_offset += size;
773			uiop->uio_resid -= size;
774			if (uiop->uio_iov->iov_len <= size) {
775				uiop->uio_iovcnt--;
776				uiop->uio_iov++;
777			} else {
778				uiop->uio_iov->iov_base =
779					(char *)uiop->uio_iov->iov_base + size;
780				uiop->uio_iov->iov_len -= size;
781			}
782		}
783	} else {
784		struct uio *t_uio;
785		struct iovec *t_iov;
786		struct buf *bp;
787
788		/*
789		 * Break up the write into blocksize chunks and hand these
790		 * over to nfsiod's for write back.
791		 * Unfortunately, this incurs a copy of the data. Since
792		 * the user could modify the buffer before the write is
793		 * initiated.
794		 *
795		 * The obvious optimization here is that one of the 2 copies
796		 * in the async write path can be eliminated by copying the
797		 * data here directly into mbufs and passing the mbuf chain
798		 * down. But that will require a fair amount of re-working
799		 * of the code and can be done if there's enough interest
800		 * in NFS directio access.
801		 */
802		while (uiop->uio_resid > 0) {
803			size = min(uiop->uio_resid, wsize);
804			size = min(uiop->uio_iov->iov_len, size);
805			bp = getpbuf(&nfs_pbuf_freecnt);
806			t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
807			t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
808			t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
809			t_iov->iov_len = size;
810			t_uio->uio_iov = t_iov;
811			t_uio->uio_iovcnt = 1;
812			t_uio->uio_offset = uiop->uio_offset;
813			t_uio->uio_resid = size;
814			t_uio->uio_segflg = UIO_SYSSPACE;
815			t_uio->uio_rw = UIO_WRITE;
816			t_uio->uio_td = td;
817			bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size);
818			bp->b_flags |= B_DIRECT;
819			bp->b_iocmd = BIO_WRITE;
820			if (cred != NOCRED) {
821				crhold(cred);
822				bp->b_wcred = cred;
823			} else
824				bp->b_wcred = NOCRED;
825			bp->b_caller1 = (void *)t_uio;
826			bp->b_vp = vp;
827			error = nfs_asyncio(nmp, bp, NOCRED, td);
828			if (error) {
829				free(t_iov->iov_base, M_NFSDIRECTIO);
830				free(t_iov, M_NFSDIRECTIO);
831				free(t_uio, M_NFSDIRECTIO);
832				bp->b_vp = NULL;
833				relpbuf(bp, &nfs_pbuf_freecnt);
834				if (error == EINTR)
835					return (error);
836				goto do_sync;
837			}
838			uiop->uio_offset += size;
839			uiop->uio_resid -= size;
840			if (uiop->uio_iov->iov_len <= size) {
841				uiop->uio_iovcnt--;
842				uiop->uio_iov++;
843			} else {
844				uiop->uio_iov->iov_base =
845					(char *)uiop->uio_iov->iov_base + size;
846				uiop->uio_iov->iov_len -= size;
847			}
848		}
849	}
850	return (0);
851}
852
853/*
854 * Vnode op for write using bio
855 */
856int
857nfs_write(struct vop_write_args *ap)
858{
859	int biosize;
860	struct uio *uio = ap->a_uio;
861	struct thread *td = uio->uio_td;
862	struct vnode *vp = ap->a_vp;
863	struct nfsnode *np = VTONFS(vp);
864	struct ucred *cred = ap->a_cred;
865	int ioflag = ap->a_ioflag;
866	struct buf *bp;
867	struct vattr vattr;
868	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
869	daddr_t lbn;
870	off_t end;
871	int bcount;
872	int n, on, error = 0;
873
874	KASSERT(uio->uio_rw == UIO_WRITE, ("nfs_write mode"));
875	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
876	    ("nfs_write proc"));
877	if (vp->v_type != VREG)
878		return (EIO);
879	mtx_lock(&np->n_mtx);
880	if (np->n_flag & NWRITEERR) {
881		np->n_flag &= ~NWRITEERR;
882		mtx_unlock(&np->n_mtx);
883		return (np->n_error);
884	} else
885		mtx_unlock(&np->n_mtx);
886	mtx_lock(&nmp->nm_mtx);
887	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
888	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
889		mtx_unlock(&nmp->nm_mtx);
890		(void)nfs_fsinfo(nmp, vp, cred, td);
891	} else
892		mtx_unlock(&nmp->nm_mtx);
893
894	/*
895	 * Synchronously flush pending buffers if we are in synchronous
896	 * mode or if we are appending.
897	 */
898	if (ioflag & (IO_APPEND | IO_SYNC)) {
899		mtx_lock(&np->n_mtx);
900		if (np->n_flag & NMODIFIED) {
901			mtx_unlock(&np->n_mtx);
902#ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
903			/*
904			 * Require non-blocking, synchronous writes to
905			 * dirty files to inform the program it needs
906			 * to fsync(2) explicitly.
907			 */
908			if (ioflag & IO_NDELAY)
909				return (EAGAIN);
910#endif
911flush_and_restart:
912			np->n_attrstamp = 0;
913			KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
914			error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
915			if (error)
916				return (error);
917		} else
918			mtx_unlock(&np->n_mtx);
919	}
920
921	/*
922	 * If IO_APPEND then load uio_offset.  We restart here if we cannot
923	 * get the append lock.
924	 */
925	if (ioflag & IO_APPEND) {
926		np->n_attrstamp = 0;
927		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
928		error = VOP_GETATTR(vp, &vattr, cred);
929		if (error)
930			return (error);
931		mtx_lock(&np->n_mtx);
932		uio->uio_offset = np->n_size;
933		mtx_unlock(&np->n_mtx);
934	}
935
936	if (uio->uio_offset < 0)
937		return (EINVAL);
938	end = uio->uio_offset + uio->uio_resid;
939	if (end > nmp->nm_maxfilesize || end < uio->uio_offset)
940		return (EFBIG);
941	if (uio->uio_resid == 0)
942		return (0);
943
944	if (nfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
945		return nfs_directio_write(vp, uio, cred, ioflag);
946
947	/*
948	 * Maybe this should be above the vnode op call, but so long as
949	 * file servers have no limits, i don't think it matters
950	 */
951	if (vn_rlimit_fsize(vp, uio, td))
952		return (EFBIG);
953
954	biosize = vp->v_bufobj.bo_bsize;
955	/*
956	 * Find all of this file's B_NEEDCOMMIT buffers.  If our writes
957	 * would exceed the local maximum per-file write commit size when
958	 * combined with those, we must decide whether to flush,
959	 * go synchronous, or return error.  We don't bother checking
960	 * IO_UNIT -- we just make all writes atomic anyway, as there's
961	 * no point optimizing for something that really won't ever happen.
962	 */
963	if (!(ioflag & IO_SYNC)) {
964		int nflag;
965
966		mtx_lock(&np->n_mtx);
967		nflag = np->n_flag;
968		mtx_unlock(&np->n_mtx);
969		int needrestart = 0;
970		if (nmp->nm_wcommitsize < uio->uio_resid) {
971			/*
972			 * If this request could not possibly be completed
973			 * without exceeding the maximum outstanding write
974			 * commit size, see if we can convert it into a
975			 * synchronous write operation.
976			 */
977			if (ioflag & IO_NDELAY)
978				return (EAGAIN);
979			ioflag |= IO_SYNC;
980			if (nflag & NMODIFIED)
981				needrestart = 1;
982		} else if (nflag & NMODIFIED) {
983			int wouldcommit = 0;
984			BO_LOCK(&vp->v_bufobj);
985			if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
986				TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
987				    b_bobufs) {
988					if (bp->b_flags & B_NEEDCOMMIT)
989						wouldcommit += bp->b_bcount;
990				}
991			}
992			BO_UNLOCK(&vp->v_bufobj);
993			/*
994			 * Since we're not operating synchronously and
995			 * bypassing the buffer cache, we are in a commit
996			 * and holding all of these buffers whether
997			 * transmitted or not.  If not limited, this
998			 * will lead to the buffer cache deadlocking,
999			 * as no one else can flush our uncommitted buffers.
1000			 */
1001			wouldcommit += uio->uio_resid;
1002			/*
1003			 * If we would initially exceed the maximum
1004			 * outstanding write commit size, flush and restart.
1005			 */
1006			if (wouldcommit > nmp->nm_wcommitsize)
1007				needrestart = 1;
1008		}
1009		if (needrestart)
1010			goto flush_and_restart;
1011	}
1012
1013	do {
1014		nfsstats.biocache_writes++;
1015		lbn = uio->uio_offset / biosize;
1016		on = uio->uio_offset & (biosize-1);
1017		n = min((unsigned)(biosize - on), uio->uio_resid);
1018again:
1019		/*
1020		 * Handle direct append and file extension cases, calculate
1021		 * unaligned buffer size.
1022		 */
1023		mtx_lock(&np->n_mtx);
1024		if (uio->uio_offset == np->n_size && n) {
1025			mtx_unlock(&np->n_mtx);
1026			/*
1027			 * Get the buffer (in its pre-append state to maintain
1028			 * B_CACHE if it was previously set).  Resize the
1029			 * nfsnode after we have locked the buffer to prevent
1030			 * readers from reading garbage.
1031			 */
1032			bcount = on;
1033			bp = nfs_getcacheblk(vp, lbn, bcount, td);
1034
1035			if (bp != NULL) {
1036				long save;
1037
1038				mtx_lock(&np->n_mtx);
1039				np->n_size = uio->uio_offset + n;
1040				np->n_flag |= NMODIFIED;
1041				vnode_pager_setsize(vp, np->n_size);
1042				mtx_unlock(&np->n_mtx);
1043
1044				save = bp->b_flags & B_CACHE;
1045				bcount += n;
1046				allocbuf(bp, bcount);
1047				bp->b_flags |= save;
1048			}
1049		} else {
1050			/*
1051			 * Obtain the locked cache block first, and then
1052			 * adjust the file's size as appropriate.
1053			 */
1054			bcount = on + n;
1055			if ((off_t)lbn * biosize + bcount < np->n_size) {
1056				if ((off_t)(lbn + 1) * biosize < np->n_size)
1057					bcount = biosize;
1058				else
1059					bcount = np->n_size - (off_t)lbn * biosize;
1060			}
1061			mtx_unlock(&np->n_mtx);
1062			bp = nfs_getcacheblk(vp, lbn, bcount, td);
1063			mtx_lock(&np->n_mtx);
1064			if (uio->uio_offset + n > np->n_size) {
1065				np->n_size = uio->uio_offset + n;
1066				np->n_flag |= NMODIFIED;
1067				vnode_pager_setsize(vp, np->n_size);
1068			}
1069			mtx_unlock(&np->n_mtx);
1070		}
1071
1072		if (!bp) {
1073			error = nfs_sigintr(nmp, td);
1074			if (!error)
1075				error = EINTR;
1076			break;
1077		}
1078
1079		/*
1080		 * Issue a READ if B_CACHE is not set.  In special-append
1081		 * mode, B_CACHE is based on the buffer prior to the write
1082		 * op and is typically set, avoiding the read.  If a read
1083		 * is required in special append mode, the server will
1084		 * probably send us a short-read since we extended the file
1085		 * on our end, resulting in b_resid == 0 and, thusly,
1086		 * B_CACHE getting set.
1087		 *
1088		 * We can also avoid issuing the read if the write covers
1089		 * the entire buffer.  We have to make sure the buffer state
1090		 * is reasonable in this case since we will not be initiating
1091		 * I/O.  See the comments in kern/vfs_bio.c's getblk() for
1092		 * more information.
1093		 *
1094		 * B_CACHE may also be set due to the buffer being cached
1095		 * normally.
1096		 */
1097
1098		if (on == 0 && n == bcount) {
1099			bp->b_flags |= B_CACHE;
1100			bp->b_flags &= ~B_INVAL;
1101			bp->b_ioflags &= ~BIO_ERROR;
1102		}
1103
1104		if ((bp->b_flags & B_CACHE) == 0) {
1105			bp->b_iocmd = BIO_READ;
1106			vfs_busy_pages(bp, 0);
1107			error = nfs_doio(vp, bp, cred, td);
1108			if (error) {
1109				brelse(bp);
1110				break;
1111			}
1112		}
1113		if (bp->b_wcred == NOCRED)
1114			bp->b_wcred = crhold(cred);
1115		mtx_lock(&np->n_mtx);
1116		np->n_flag |= NMODIFIED;
1117		mtx_unlock(&np->n_mtx);
1118
1119		/*
1120		 * If dirtyend exceeds file size, chop it down.  This should
1121		 * not normally occur but there is an append race where it
1122		 * might occur XXX, so we log it.
1123		 *
1124		 * If the chopping creates a reverse-indexed or degenerate
1125		 * situation with dirtyoff/end, we 0 both of them.
1126		 */
1127
1128		if (bp->b_dirtyend > bcount) {
1129			nfs_printf("NFS append race @%lx:%d\n",
1130			    (long)bp->b_blkno * DEV_BSIZE,
1131			    bp->b_dirtyend - bcount);
1132			bp->b_dirtyend = bcount;
1133		}
1134
1135		if (bp->b_dirtyoff >= bp->b_dirtyend)
1136			bp->b_dirtyoff = bp->b_dirtyend = 0;
1137
1138		/*
1139		 * If the new write will leave a contiguous dirty
1140		 * area, just update the b_dirtyoff and b_dirtyend,
1141		 * otherwise force a write rpc of the old dirty area.
1142		 *
1143		 * While it is possible to merge discontiguous writes due to
1144		 * our having a B_CACHE buffer ( and thus valid read data
1145		 * for the hole), we don't because it could lead to
1146		 * significant cache coherency problems with multiple clients,
1147		 * especially if locking is implemented later on.
1148		 *
1149		 * as an optimization we could theoretically maintain
1150		 * a linked list of discontinuous areas, but we would still
1151		 * have to commit them separately so there isn't much
1152		 * advantage to it except perhaps a bit of asynchronization.
1153		 */
1154
1155		if (bp->b_dirtyend > 0 &&
1156		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1157			if (bwrite(bp) == EINTR) {
1158				error = EINTR;
1159				break;
1160			}
1161			goto again;
1162		}
1163
1164		error = uiomove((char *)bp->b_data + on, n, uio);
1165
1166		/*
1167		 * Since this block is being modified, it must be written
1168		 * again and not just committed.  Since write clustering does
1169		 * not work for the stage 1 data write, only the stage 2
1170		 * commit rpc, we have to clear B_CLUSTEROK as well.
1171		 */
1172		bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1173
1174		if (error) {
1175			bp->b_ioflags |= BIO_ERROR;
1176			brelse(bp);
1177			break;
1178		}
1179
1180		/*
1181		 * Only update dirtyoff/dirtyend if not a degenerate
1182		 * condition.
1183		 */
1184		if (n) {
1185			if (bp->b_dirtyend > 0) {
1186				bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1187				bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1188			} else {
1189				bp->b_dirtyoff = on;
1190				bp->b_dirtyend = on + n;
1191			}
1192			vfs_bio_set_valid(bp, on, n);
1193		}
1194
1195		/*
1196		 * If IO_SYNC do bwrite().
1197		 *
1198		 * IO_INVAL appears to be unused.  The idea appears to be
1199		 * to turn off caching in this case.  Very odd.  XXX
1200		 */
1201		if ((ioflag & IO_SYNC)) {
1202			if (ioflag & IO_INVAL)
1203				bp->b_flags |= B_NOCACHE;
1204			error = bwrite(bp);
1205			if (error)
1206				break;
1207		} else if ((n + on) == biosize) {
1208			bp->b_flags |= B_ASYNC;
1209			(void) (nmp->nm_rpcops->nr_writebp)(bp, 0, NULL);
1210		} else {
1211			bdwrite(bp);
1212		}
1213	} while (uio->uio_resid > 0 && n > 0);
1214
1215	return (error);
1216}
1217
1218/*
1219 * Get an nfs cache block.
1220 *
1221 * Allocate a new one if the block isn't currently in the cache
1222 * and return the block marked busy. If the calling process is
1223 * interrupted by a signal for an interruptible mount point, return
1224 * NULL.
1225 *
1226 * The caller must carefully deal with the possible B_INVAL state of
1227 * the buffer.  nfs_doio() clears B_INVAL (and nfs_asyncio() clears it
1228 * indirectly), so synchronous reads can be issued without worrying about
1229 * the B_INVAL state.  We have to be a little more careful when dealing
1230 * with writes (see comments in nfs_write()) when extending a file past
1231 * its EOF.
1232 */
1233static struct buf *
1234nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1235{
1236	struct buf *bp;
1237	struct mount *mp;
1238	struct nfsmount *nmp;
1239
1240	mp = vp->v_mount;
1241	nmp = VFSTONFS(mp);
1242
1243	if (nmp->nm_flag & NFSMNT_INT) {
1244 		sigset_t oldset;
1245
1246 		nfs_set_sigmask(td, &oldset);
1247		bp = getblk(vp, bn, size, NFS_PCATCH, 0, 0);
1248 		nfs_restore_sigmask(td, &oldset);
1249		while (bp == NULL) {
1250			if (nfs_sigintr(nmp, td))
1251				return (NULL);
1252			bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1253		}
1254	} else {
1255		bp = getblk(vp, bn, size, 0, 0, 0);
1256	}
1257
1258	if (vp->v_type == VREG)
1259		bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE);
1260	return (bp);
1261}
1262
1263/*
1264 * Flush and invalidate all dirty buffers. If another process is already
1265 * doing the flush, just wait for completion.
1266 */
1267int
1268nfs_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1269{
1270	struct nfsnode *np = VTONFS(vp);
1271	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1272	int error = 0, slpflag, slptimeo;
1273 	int old_lock = 0;
1274
1275	ASSERT_VOP_LOCKED(vp, "nfs_vinvalbuf");
1276
1277	if ((nmp->nm_flag & NFSMNT_INT) == 0)
1278		intrflg = 0;
1279	if (intrflg) {
1280		slpflag = NFS_PCATCH;
1281		slptimeo = 2 * hz;
1282	} else {
1283		slpflag = 0;
1284		slptimeo = 0;
1285	}
1286
1287	old_lock = nfs_upgrade_vnlock(vp);
1288	if (vp->v_iflag & VI_DOOMED) {
1289		/*
1290		 * Since vgonel() uses the generic vinvalbuf() to flush
1291		 * dirty buffers and it does not call this function, it
1292		 * is safe to just return OK when VI_DOOMED is set.
1293		 */
1294		nfs_downgrade_vnlock(vp, old_lock);
1295		return (0);
1296	}
1297
1298	/*
1299	 * Now, flush as required.
1300	 */
1301	if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
1302		VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
1303		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1304		VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
1305		/*
1306		 * If the page clean was interrupted, fail the invalidation.
1307		 * Not doing so, we run the risk of losing dirty pages in the
1308		 * vinvalbuf() call below.
1309		 */
1310		if (intrflg && (error = nfs_sigintr(nmp, td)))
1311			goto out;
1312	}
1313
1314	error = vinvalbuf(vp, flags, slpflag, 0);
1315	while (error) {
1316		if (intrflg && (error = nfs_sigintr(nmp, td)))
1317			goto out;
1318		error = vinvalbuf(vp, flags, 0, slptimeo);
1319	}
1320	mtx_lock(&np->n_mtx);
1321	if (np->n_directio_asyncwr == 0)
1322		np->n_flag &= ~NMODIFIED;
1323	mtx_unlock(&np->n_mtx);
1324out:
1325	nfs_downgrade_vnlock(vp, old_lock);
1326	return error;
1327}
1328
1329/*
1330 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1331 * This is mainly to avoid queueing async I/O requests when the nfsiods
1332 * are all hung on a dead server.
1333 *
1334 * Note: nfs_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1335 * is eventually dequeued by the async daemon, nfs_doio() *will*.
1336 */
1337int
1338nfs_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1339{
1340	int iod;
1341	int gotiod;
1342	int slpflag = 0;
1343	int slptimeo = 0;
1344	int error, error2;
1345
1346	/*
1347	 * Commits are usually short and sweet so lets save some cpu and
1348	 * leave the async daemons for more important rpc's (such as reads
1349	 * and writes).
1350	 */
1351	mtx_lock(&nfs_iod_mtx);
1352	if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1353	    (nmp->nm_bufqiods > nfs_numasync / 2)) {
1354		mtx_unlock(&nfs_iod_mtx);
1355		return(EIO);
1356	}
1357again:
1358	if (nmp->nm_flag & NFSMNT_INT)
1359		slpflag = NFS_PCATCH;
1360	gotiod = FALSE;
1361
1362	/*
1363	 * Find a free iod to process this request.
1364	 */
1365	for (iod = 0; iod < nfs_numasync; iod++)
1366		if (nfs_iodwant[iod] == NFSIOD_AVAILABLE) {
1367			gotiod = TRUE;
1368			break;
1369		}
1370
1371	/*
1372	 * Try to create one if none are free.
1373	 */
1374	if (!gotiod)
1375		nfs_nfsiodnew();
1376	else {
1377		/*
1378		 * Found one, so wake it up and tell it which
1379		 * mount to process.
1380		 */
1381		NFS_DPF(ASYNCIO, ("nfs_asyncio: waking iod %d for mount %p\n",
1382		    iod, nmp));
1383		nfs_iodwant[iod] = NFSIOD_NOT_AVAILABLE;
1384		nfs_iodmount[iod] = nmp;
1385		nmp->nm_bufqiods++;
1386		wakeup(&nfs_iodwant[iod]);
1387	}
1388
1389	/*
1390	 * If none are free, we may already have an iod working on this mount
1391	 * point.  If so, it will process our request.
1392	 */
1393	if (!gotiod) {
1394		if (nmp->nm_bufqiods > 0) {
1395			NFS_DPF(ASYNCIO,
1396		("nfs_asyncio: %d iods are already processing mount %p\n",
1397				 nmp->nm_bufqiods, nmp));
1398			gotiod = TRUE;
1399		}
1400	}
1401
1402	/*
1403	 * If we have an iod which can process the request, then queue
1404	 * the buffer.
1405	 */
1406	if (gotiod) {
1407		/*
1408		 * Ensure that the queue never grows too large.  We still want
1409		 * to asynchronize so we block rather then return EIO.
1410		 */
1411		while (nmp->nm_bufqlen >= 2 * nfs_numasync) {
1412			NFS_DPF(ASYNCIO,
1413		("nfs_asyncio: waiting for mount %p queue to drain\n", nmp));
1414			nmp->nm_bufqwant = TRUE;
1415 			error = nfs_msleep(td, &nmp->nm_bufq, &nfs_iod_mtx,
1416					   slpflag | PRIBIO,
1417 					   "nfsaio", slptimeo);
1418			if (error) {
1419				error2 = nfs_sigintr(nmp, td);
1420				if (error2) {
1421					mtx_unlock(&nfs_iod_mtx);
1422					return (error2);
1423				}
1424				if (slpflag == NFS_PCATCH) {
1425					slpflag = 0;
1426					slptimeo = 2 * hz;
1427				}
1428			}
1429			/*
1430			 * We might have lost our iod while sleeping,
1431			 * so check and loop if nescessary.
1432			 */
1433			goto again;
1434		}
1435
1436		/* We might have lost our nfsiod */
1437		if (nmp->nm_bufqiods == 0) {
1438			NFS_DPF(ASYNCIO,
1439("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1440			goto again;
1441		}
1442
1443		if (bp->b_iocmd == BIO_READ) {
1444			if (bp->b_rcred == NOCRED && cred != NOCRED)
1445				bp->b_rcred = crhold(cred);
1446		} else {
1447			if (bp->b_wcred == NOCRED && cred != NOCRED)
1448				bp->b_wcred = crhold(cred);
1449		}
1450
1451		if (bp->b_flags & B_REMFREE)
1452			bremfreef(bp);
1453		BUF_KERNPROC(bp);
1454		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1455		nmp->nm_bufqlen++;
1456		if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1457			mtx_lock(&(VTONFS(bp->b_vp))->n_mtx);
1458			VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
1459			VTONFS(bp->b_vp)->n_directio_asyncwr++;
1460			mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx);
1461		}
1462		mtx_unlock(&nfs_iod_mtx);
1463		return (0);
1464	}
1465
1466	mtx_unlock(&nfs_iod_mtx);
1467
1468	/*
1469	 * All the iods are busy on other mounts, so return EIO to
1470	 * force the caller to process the i/o synchronously.
1471	 */
1472	NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n"));
1473	return (EIO);
1474}
1475
1476void
1477nfs_doio_directwrite(struct buf *bp)
1478{
1479	int iomode, must_commit;
1480	struct uio *uiop = (struct uio *)bp->b_caller1;
1481	char *iov_base = uiop->uio_iov->iov_base;
1482	struct nfsmount *nmp = VFSTONFS(bp->b_vp->v_mount);
1483
1484	iomode = NFSV3WRITE_FILESYNC;
1485	uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1486	(nmp->nm_rpcops->nr_writerpc)(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit);
1487	KASSERT((must_commit == 0), ("nfs_doio_directwrite: Did not commit write"));
1488	free(iov_base, M_NFSDIRECTIO);
1489	free(uiop->uio_iov, M_NFSDIRECTIO);
1490	free(uiop, M_NFSDIRECTIO);
1491	if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1492		struct nfsnode *np = VTONFS(bp->b_vp);
1493		mtx_lock(&np->n_mtx);
1494		np->n_directio_asyncwr--;
1495		if (np->n_directio_asyncwr == 0) {
1496			VTONFS(bp->b_vp)->n_flag &= ~NMODIFIED;
1497			if ((np->n_flag & NFSYNCWAIT)) {
1498				np->n_flag &= ~NFSYNCWAIT;
1499				wakeup((caddr_t)&np->n_directio_asyncwr);
1500			}
1501		}
1502		mtx_unlock(&np->n_mtx);
1503	}
1504	bp->b_vp = NULL;
1505	relpbuf(bp, &nfs_pbuf_freecnt);
1506}
1507
1508/*
1509 * Do an I/O operation to/from a cache block. This may be called
1510 * synchronously or from an nfsiod.
1511 */
1512int
1513nfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
1514{
1515	struct uio *uiop;
1516	struct nfsnode *np;
1517	struct nfsmount *nmp;
1518	int error = 0, iomode, must_commit = 0;
1519	struct uio uio;
1520	struct iovec io;
1521	struct proc *p = td ? td->td_proc : NULL;
1522	uint8_t	iocmd;
1523
1524	np = VTONFS(vp);
1525	nmp = VFSTONFS(vp->v_mount);
1526	uiop = &uio;
1527	uiop->uio_iov = &io;
1528	uiop->uio_iovcnt = 1;
1529	uiop->uio_segflg = UIO_SYSSPACE;
1530	uiop->uio_td = td;
1531
1532	/*
1533	 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O.  We
1534	 * do this here so we do not have to do it in all the code that
1535	 * calls us.
1536	 */
1537	bp->b_flags &= ~B_INVAL;
1538	bp->b_ioflags &= ~BIO_ERROR;
1539
1540	KASSERT(!(bp->b_flags & B_DONE), ("nfs_doio: bp %p already marked done", bp));
1541	iocmd = bp->b_iocmd;
1542	if (iocmd == BIO_READ) {
1543	    io.iov_len = uiop->uio_resid = bp->b_bcount;
1544	    io.iov_base = bp->b_data;
1545	    uiop->uio_rw = UIO_READ;
1546
1547	    switch (vp->v_type) {
1548	    case VREG:
1549		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1550		nfsstats.read_bios++;
1551		error = (nmp->nm_rpcops->nr_readrpc)(vp, uiop, cr);
1552
1553		if (!error) {
1554		    if (uiop->uio_resid) {
1555			/*
1556			 * If we had a short read with no error, we must have
1557			 * hit a file hole.  We should zero-fill the remainder.
1558			 * This can also occur if the server hits the file EOF.
1559			 *
1560			 * Holes used to be able to occur due to pending
1561			 * writes, but that is not possible any longer.
1562			 */
1563			int nread = bp->b_bcount - uiop->uio_resid;
1564			int left  = uiop->uio_resid;
1565
1566			if (left > 0)
1567				bzero((char *)bp->b_data + nread, left);
1568			uiop->uio_resid = 0;
1569		    }
1570		}
1571		/* ASSERT_VOP_LOCKED(vp, "nfs_doio"); */
1572		if (p && (vp->v_vflag & VV_TEXT)) {
1573			mtx_lock(&np->n_mtx);
1574			if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.va_mtime)) {
1575				mtx_unlock(&np->n_mtx);
1576				PROC_LOCK(p);
1577				killproc(p, "text file modification");
1578				PROC_UNLOCK(p);
1579			} else
1580				mtx_unlock(&np->n_mtx);
1581		}
1582		break;
1583	    case VLNK:
1584		uiop->uio_offset = (off_t)0;
1585		nfsstats.readlink_bios++;
1586		error = (nmp->nm_rpcops->nr_readlinkrpc)(vp, uiop, cr);
1587		break;
1588	    case VDIR:
1589		nfsstats.readdir_bios++;
1590		uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1591		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1592			error = nfs_readdirplusrpc(vp, uiop, cr);
1593			if (error == NFSERR_NOTSUPP)
1594				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1595		}
1596		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1597			error = nfs_readdirrpc(vp, uiop, cr);
1598		/*
1599		 * end-of-directory sets B_INVAL but does not generate an
1600		 * error.
1601		 */
1602		if (error == 0 && uiop->uio_resid == bp->b_bcount)
1603			bp->b_flags |= B_INVAL;
1604		break;
1605	    default:
1606		nfs_printf("nfs_doio:  type %x unexpected\n", vp->v_type);
1607		break;
1608	    };
1609	    if (error) {
1610		bp->b_ioflags |= BIO_ERROR;
1611		bp->b_error = error;
1612	    }
1613	} else {
1614	    /*
1615	     * If we only need to commit, try to commit
1616	     */
1617	    if (bp->b_flags & B_NEEDCOMMIT) {
1618		    int retv;
1619		    off_t off;
1620
1621		    off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1622		    retv = (nmp->nm_rpcops->nr_commit)(
1623				vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1624				bp->b_wcred, td);
1625		    if (retv == 0) {
1626			    bp->b_dirtyoff = bp->b_dirtyend = 0;
1627			    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1628			    bp->b_resid = 0;
1629			    bufdone(bp);
1630			    return (0);
1631		    }
1632		    if (retv == NFSERR_STALEWRITEVERF) {
1633			    nfs_clearcommit(vp->v_mount);
1634		    }
1635	    }
1636
1637	    /*
1638	     * Setup for actual write
1639	     */
1640	    mtx_lock(&np->n_mtx);
1641	    if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1642		bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1643	    mtx_unlock(&np->n_mtx);
1644
1645	    if (bp->b_dirtyend > bp->b_dirtyoff) {
1646		io.iov_len = uiop->uio_resid = bp->b_dirtyend
1647		    - bp->b_dirtyoff;
1648		uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1649		    + bp->b_dirtyoff;
1650		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1651		uiop->uio_rw = UIO_WRITE;
1652		nfsstats.write_bios++;
1653
1654		if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1655		    iomode = NFSV3WRITE_UNSTABLE;
1656		else
1657		    iomode = NFSV3WRITE_FILESYNC;
1658
1659		error = (nmp->nm_rpcops->nr_writerpc)(vp, uiop, cr, &iomode, &must_commit);
1660
1661		/*
1662		 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1663		 * to cluster the buffers needing commit.  This will allow
1664		 * the system to submit a single commit rpc for the whole
1665		 * cluster.  We can do this even if the buffer is not 100%
1666		 * dirty (relative to the NFS blocksize), so we optimize the
1667		 * append-to-file-case.
1668		 *
1669		 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1670		 * cleared because write clustering only works for commit
1671		 * rpc's, not for the data portion of the write).
1672		 */
1673
1674		if (!error && iomode == NFSV3WRITE_UNSTABLE) {
1675		    bp->b_flags |= B_NEEDCOMMIT;
1676		    if (bp->b_dirtyoff == 0
1677			&& bp->b_dirtyend == bp->b_bcount)
1678			bp->b_flags |= B_CLUSTEROK;
1679		} else {
1680		    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1681		}
1682
1683		/*
1684		 * For an interrupted write, the buffer is still valid
1685		 * and the write hasn't been pushed to the server yet,
1686		 * so we can't set BIO_ERROR and report the interruption
1687		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1688		 * is not relevant, so the rpc attempt is essentially
1689		 * a noop.  For the case of a V3 write rpc not being
1690		 * committed to stable storage, the block is still
1691		 * dirty and requires either a commit rpc or another
1692		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1693		 * the block is reused. This is indicated by setting
1694		 * the B_DELWRI and B_NEEDCOMMIT flags.
1695		 *
1696		 * If the buffer is marked B_PAGING, it does not reside on
1697		 * the vp's paging queues so we cannot call bdirty().  The
1698		 * bp in this case is not an NFS cache block so we should
1699		 * be safe. XXX
1700		 *
1701		 * The logic below breaks up errors into recoverable and
1702		 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1703		 * and keep the buffer around for potential write retries.
1704		 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1705		 * and save the error in the nfsnode. This is less than ideal
1706		 * but necessary. Keeping such buffers around could potentially
1707		 * cause buffer exhaustion eventually (they can never be written
1708		 * out, so will get constantly be re-dirtied). It also causes
1709		 * all sorts of vfs panics. For non-recoverable write errors,
1710		 * also invalidate the attrcache, so we'll be forced to go over
1711		 * the wire for this object, returning an error to user on next
1712		 * call (most of the time).
1713		 */
1714    		if (error == EINTR || error == EIO || error == ETIMEDOUT
1715		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1716			int s;
1717
1718			s = splbio();
1719			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1720			if ((bp->b_flags & B_PAGING) == 0) {
1721			    bdirty(bp);
1722			    bp->b_flags &= ~B_DONE;
1723			}
1724			if (error && (bp->b_flags & B_ASYNC) == 0)
1725			    bp->b_flags |= B_EINTR;
1726			splx(s);
1727	    	} else {
1728		    if (error) {
1729			bp->b_ioflags |= BIO_ERROR;
1730			bp->b_flags |= B_INVAL;
1731			bp->b_error = np->n_error = error;
1732			mtx_lock(&np->n_mtx);
1733			np->n_flag |= NWRITEERR;
1734			np->n_attrstamp = 0;
1735			KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
1736			mtx_unlock(&np->n_mtx);
1737		    }
1738		    bp->b_dirtyoff = bp->b_dirtyend = 0;
1739		}
1740	    } else {
1741		bp->b_resid = 0;
1742		bufdone(bp);
1743		return (0);
1744	    }
1745	}
1746	bp->b_resid = uiop->uio_resid;
1747	if (must_commit)
1748	    nfs_clearcommit(vp->v_mount);
1749	bufdone(bp);
1750	return (error);
1751}
1752
1753/*
1754 * Used to aid in handling ftruncate() operations on the NFS client side.
1755 * Truncation creates a number of special problems for NFS.  We have to
1756 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1757 * we have to properly handle VM pages or (potentially dirty) buffers
1758 * that straddle the truncation point.
1759 */
1760
1761int
1762nfs_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1763{
1764	struct nfsnode *np = VTONFS(vp);
1765	u_quad_t tsize;
1766	int biosize = vp->v_bufobj.bo_bsize;
1767	int error = 0;
1768
1769	mtx_lock(&np->n_mtx);
1770	tsize = np->n_size;
1771	np->n_size = nsize;
1772	mtx_unlock(&np->n_mtx);
1773
1774	if (nsize < tsize) {
1775		struct buf *bp;
1776		daddr_t lbn;
1777		int bufsize;
1778
1779		/*
1780		 * vtruncbuf() doesn't get the buffer overlapping the
1781		 * truncation point.  We may have a B_DELWRI and/or B_CACHE
1782		 * buffer that now needs to be truncated.
1783		 */
1784		error = vtruncbuf(vp, cred, td, nsize, biosize);
1785		lbn = nsize / biosize;
1786		bufsize = nsize & (biosize - 1);
1787		bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1788 		if (!bp)
1789 			return EINTR;
1790		if (bp->b_dirtyoff > bp->b_bcount)
1791			bp->b_dirtyoff = bp->b_bcount;
1792		if (bp->b_dirtyend > bp->b_bcount)
1793			bp->b_dirtyend = bp->b_bcount;
1794		bp->b_flags |= B_RELBUF;  /* don't leave garbage around */
1795		brelse(bp);
1796	} else {
1797		vnode_pager_setsize(vp, nsize);
1798	}
1799	return(error);
1800}
1801
1802