nfs_clbio.c revision 306663
1/*-
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: stable/10/sys/fs/nfsclient/nfs_clbio.c 306663 2016-10-03 23:17:57Z rmacklem $");
37
38#include "opt_kdtrace.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/bio.h>
43#include <sys/buf.h>
44#include <sys/kernel.h>
45#include <sys/mount.h>
46#include <sys/rwlock.h>
47#include <sys/vmmeter.h>
48#include <sys/vnode.h>
49
50#include <vm/vm.h>
51#include <vm/vm_param.h>
52#include <vm/vm_extern.h>
53#include <vm/vm_page.h>
54#include <vm/vm_object.h>
55#include <vm/vm_pager.h>
56#include <vm/vnode_pager.h>
57
58#include <fs/nfs/nfsport.h>
59#include <fs/nfsclient/nfsmount.h>
60#include <fs/nfsclient/nfs.h>
61#include <fs/nfsclient/nfsnode.h>
62#include <fs/nfsclient/nfs_kdtrace.h>
63
64extern int newnfs_directio_allow_mmap;
65extern struct nfsstats newnfsstats;
66extern struct mtx ncl_iod_mutex;
67extern int ncl_numasync;
68extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
69extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
70extern int newnfs_directio_enable;
71extern int nfs_keep_dirty_on_error;
72
73int ncl_pbuf_freecnt = -1;	/* start out unlimited */
74
75static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
76    struct thread *td);
77static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
78    struct ucred *cred, int ioflag);
79
80/*
81 * Vnode op for VM getpages.
82 */
83int
84ncl_getpages(struct vop_getpages_args *ap)
85{
86	int i, error, nextoff, size, toff, count, npages;
87	struct uio uio;
88	struct iovec iov;
89	vm_offset_t kva;
90	struct buf *bp;
91	struct vnode *vp;
92	struct thread *td;
93	struct ucred *cred;
94	struct nfsmount *nmp;
95	vm_object_t object;
96	vm_page_t *pages;
97	struct nfsnode *np;
98
99	vp = ap->a_vp;
100	np = VTONFS(vp);
101	td = curthread;				/* XXX */
102	cred = curthread->td_ucred;		/* XXX */
103	nmp = VFSTONFS(vp->v_mount);
104	pages = ap->a_m;
105	count = ap->a_count;
106
107	if ((object = vp->v_object) == NULL) {
108		printf("ncl_getpages: called with non-merged cache vnode\n");
109		return (VM_PAGER_ERROR);
110	}
111
112	if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
113		mtx_lock(&np->n_mtx);
114		if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
115			mtx_unlock(&np->n_mtx);
116			printf("ncl_getpages: called on non-cacheable vnode\n");
117			return (VM_PAGER_ERROR);
118		} else
119			mtx_unlock(&np->n_mtx);
120	}
121
122	mtx_lock(&nmp->nm_mtx);
123	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
124	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
125		mtx_unlock(&nmp->nm_mtx);
126		/* We'll never get here for v4, because we always have fsinfo */
127		(void)ncl_fsinfo(nmp, vp, cred, td);
128	} else
129		mtx_unlock(&nmp->nm_mtx);
130
131	npages = btoc(count);
132
133	/*
134	 * If the requested page is partially valid, just return it and
135	 * allow the pager to zero-out the blanks.  Partially valid pages
136	 * can only occur at the file EOF.
137	 */
138	VM_OBJECT_WLOCK(object);
139	if (pages[ap->a_reqpage]->valid != 0) {
140		for (i = 0; i < npages; ++i) {
141			if (i != ap->a_reqpage) {
142				vm_page_lock(pages[i]);
143				vm_page_free(pages[i]);
144				vm_page_unlock(pages[i]);
145			}
146		}
147		VM_OBJECT_WUNLOCK(object);
148		return (0);
149	}
150	VM_OBJECT_WUNLOCK(object);
151
152	/*
153	 * We use only the kva address for the buffer, but this is extremely
154	 * convienient and fast.
155	 */
156	bp = getpbuf(&ncl_pbuf_freecnt);
157
158	kva = (vm_offset_t) bp->b_data;
159	pmap_qenter(kva, pages, npages);
160	PCPU_INC(cnt.v_vnodein);
161	PCPU_ADD(cnt.v_vnodepgsin, npages);
162
163	iov.iov_base = (caddr_t) kva;
164	iov.iov_len = count;
165	uio.uio_iov = &iov;
166	uio.uio_iovcnt = 1;
167	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
168	uio.uio_resid = count;
169	uio.uio_segflg = UIO_SYSSPACE;
170	uio.uio_rw = UIO_READ;
171	uio.uio_td = td;
172
173	error = ncl_readrpc(vp, &uio, cred);
174	pmap_qremove(kva, npages);
175
176	relpbuf(bp, &ncl_pbuf_freecnt);
177
178	if (error && (uio.uio_resid == count)) {
179		printf("ncl_getpages: error %d\n", error);
180		VM_OBJECT_WLOCK(object);
181		for (i = 0; i < npages; ++i) {
182			if (i != ap->a_reqpage) {
183				vm_page_lock(pages[i]);
184				vm_page_free(pages[i]);
185				vm_page_unlock(pages[i]);
186			}
187		}
188		VM_OBJECT_WUNLOCK(object);
189		return (VM_PAGER_ERROR);
190	}
191
192	/*
193	 * Calculate the number of bytes read and validate only that number
194	 * of bytes.  Note that due to pending writes, size may be 0.  This
195	 * does not mean that the remaining data is invalid!
196	 */
197
198	size = count - uio.uio_resid;
199	VM_OBJECT_WLOCK(object);
200	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
201		vm_page_t m;
202		nextoff = toff + PAGE_SIZE;
203		m = pages[i];
204
205		if (nextoff <= size) {
206			/*
207			 * Read operation filled an entire page
208			 */
209			m->valid = VM_PAGE_BITS_ALL;
210			KASSERT(m->dirty == 0,
211			    ("nfs_getpages: page %p is dirty", m));
212		} else if (size > toff) {
213			/*
214			 * Read operation filled a partial page.
215			 */
216			m->valid = 0;
217			vm_page_set_valid_range(m, 0, size - toff);
218			KASSERT(m->dirty == 0,
219			    ("nfs_getpages: page %p is dirty", m));
220		} else {
221			/*
222			 * Read operation was short.  If no error
223			 * occured we may have hit a zero-fill
224			 * section.  We leave valid set to 0, and page
225			 * is freed by vm_page_readahead_finish() if
226			 * its index is not equal to requested, or
227			 * page is zeroed and set valid by
228			 * vm_pager_get_pages() for requested page.
229			 */
230			;
231		}
232		if (i != ap->a_reqpage)
233			vm_page_readahead_finish(m);
234	}
235	VM_OBJECT_WUNLOCK(object);
236	return (0);
237}
238
239/*
240 * Vnode op for VM putpages.
241 */
242int
243ncl_putpages(struct vop_putpages_args *ap)
244{
245	struct uio uio;
246	struct iovec iov;
247	vm_offset_t kva;
248	struct buf *bp;
249	int iomode, must_commit, i, error, npages, count;
250	off_t offset;
251	int *rtvals;
252	struct vnode *vp;
253	struct thread *td;
254	struct ucred *cred;
255	struct nfsmount *nmp;
256	struct nfsnode *np;
257	vm_page_t *pages;
258
259	vp = ap->a_vp;
260	np = VTONFS(vp);
261	td = curthread;				/* XXX */
262	/* Set the cred to n_writecred for the write rpcs. */
263	if (np->n_writecred != NULL)
264		cred = crhold(np->n_writecred);
265	else
266		cred = crhold(curthread->td_ucred);	/* XXX */
267	nmp = VFSTONFS(vp->v_mount);
268	pages = ap->a_m;
269	count = ap->a_count;
270	rtvals = ap->a_rtvals;
271	npages = btoc(count);
272	offset = IDX_TO_OFF(pages[0]->pindex);
273
274	mtx_lock(&nmp->nm_mtx);
275	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
276	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
277		mtx_unlock(&nmp->nm_mtx);
278		(void)ncl_fsinfo(nmp, vp, cred, td);
279	} else
280		mtx_unlock(&nmp->nm_mtx);
281
282	mtx_lock(&np->n_mtx);
283	if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
284	    (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
285		mtx_unlock(&np->n_mtx);
286		printf("ncl_putpages: called on noncache-able vnode\n");
287		mtx_lock(&np->n_mtx);
288	}
289
290	for (i = 0; i < npages; i++)
291		rtvals[i] = VM_PAGER_ERROR;
292
293	/*
294	 * When putting pages, do not extend file past EOF.
295	 */
296	if (offset + count > np->n_size) {
297		count = np->n_size - offset;
298		if (count < 0)
299			count = 0;
300	}
301	mtx_unlock(&np->n_mtx);
302
303	/*
304	 * We use only the kva address for the buffer, but this is extremely
305	 * convienient and fast.
306	 */
307	bp = getpbuf(&ncl_pbuf_freecnt);
308
309	kva = (vm_offset_t) bp->b_data;
310	pmap_qenter(kva, pages, npages);
311	PCPU_INC(cnt.v_vnodeout);
312	PCPU_ADD(cnt.v_vnodepgsout, count);
313
314	iov.iov_base = (caddr_t) kva;
315	iov.iov_len = count;
316	uio.uio_iov = &iov;
317	uio.uio_iovcnt = 1;
318	uio.uio_offset = offset;
319	uio.uio_resid = count;
320	uio.uio_segflg = UIO_SYSSPACE;
321	uio.uio_rw = UIO_WRITE;
322	uio.uio_td = td;
323
324	if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
325	    iomode = NFSWRITE_UNSTABLE;
326	else
327	    iomode = NFSWRITE_FILESYNC;
328
329	error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0);
330	crfree(cred);
331
332	pmap_qremove(kva, npages);
333	relpbuf(bp, &ncl_pbuf_freecnt);
334
335	if (error == 0 || !nfs_keep_dirty_on_error) {
336		vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
337		if (must_commit)
338			ncl_clearcommit(vp->v_mount);
339	}
340	return rtvals[0];
341}
342
343/*
344 * For nfs, cache consistency can only be maintained approximately.
345 * Although RFC1094 does not specify the criteria, the following is
346 * believed to be compatible with the reference port.
347 * For nfs:
348 * If the file's modify time on the server has changed since the
349 * last read rpc or you have written to the file,
350 * you may have lost data cache consistency with the
351 * server, so flush all of the file's data out of the cache.
352 * Then force a getattr rpc to ensure that you have up to date
353 * attributes.
354 * NB: This implies that cache data can be read when up to
355 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
356 * attributes this could be forced by setting n_attrstamp to 0 before
357 * the VOP_GETATTR() call.
358 */
359static inline int
360nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
361{
362	int error = 0;
363	struct vattr vattr;
364	struct nfsnode *np = VTONFS(vp);
365	int old_lock;
366
367	/*
368	 * Grab the exclusive lock before checking whether the cache is
369	 * consistent.
370	 * XXX - We can make this cheaper later (by acquiring cheaper locks).
371	 * But for now, this suffices.
372	 */
373	old_lock = ncl_upgrade_vnlock(vp);
374	if (vp->v_iflag & VI_DOOMED) {
375		ncl_downgrade_vnlock(vp, old_lock);
376		return (EBADF);
377	}
378
379	mtx_lock(&np->n_mtx);
380	if (np->n_flag & NMODIFIED) {
381		mtx_unlock(&np->n_mtx);
382		if (vp->v_type != VREG) {
383			if (vp->v_type != VDIR)
384				panic("nfs: bioread, not dir");
385			ncl_invaldir(vp);
386			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
387			if (error)
388				goto out;
389		}
390		np->n_attrstamp = 0;
391		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
392		error = VOP_GETATTR(vp, &vattr, cred);
393		if (error)
394			goto out;
395		mtx_lock(&np->n_mtx);
396		np->n_mtime = vattr.va_mtime;
397		mtx_unlock(&np->n_mtx);
398	} else {
399		mtx_unlock(&np->n_mtx);
400		error = VOP_GETATTR(vp, &vattr, cred);
401		if (error)
402			return (error);
403		mtx_lock(&np->n_mtx);
404		if ((np->n_flag & NSIZECHANGED)
405		    || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
406			mtx_unlock(&np->n_mtx);
407			if (vp->v_type == VDIR)
408				ncl_invaldir(vp);
409			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
410			if (error)
411				goto out;
412			mtx_lock(&np->n_mtx);
413			np->n_mtime = vattr.va_mtime;
414			np->n_flag &= ~NSIZECHANGED;
415		}
416		mtx_unlock(&np->n_mtx);
417	}
418out:
419	ncl_downgrade_vnlock(vp, old_lock);
420	return error;
421}
422
423/*
424 * Vnode op for read using bio
425 */
426int
427ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
428{
429	struct nfsnode *np = VTONFS(vp);
430	int biosize, i;
431	struct buf *bp, *rabp;
432	struct thread *td;
433	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
434	daddr_t lbn, rabn;
435	int bcount;
436	int seqcount;
437	int nra, error = 0, n = 0, on = 0;
438	off_t tmp_off;
439
440	KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode"));
441	if (uio->uio_resid == 0)
442		return (0);
443	if (uio->uio_offset < 0)	/* XXX VDIR cookies can be negative */
444		return (EINVAL);
445	td = uio->uio_td;
446
447	mtx_lock(&nmp->nm_mtx);
448	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
449	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
450		mtx_unlock(&nmp->nm_mtx);
451		(void)ncl_fsinfo(nmp, vp, cred, td);
452		mtx_lock(&nmp->nm_mtx);
453	}
454	if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0)
455		(void) newnfs_iosize(nmp);
456
457	tmp_off = uio->uio_offset + uio->uio_resid;
458	if (vp->v_type != VDIR &&
459	    (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) {
460		mtx_unlock(&nmp->nm_mtx);
461		return (EFBIG);
462	}
463	mtx_unlock(&nmp->nm_mtx);
464
465	if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
466		/* No caching/ no readaheads. Just read data into the user buffer */
467		return ncl_readrpc(vp, uio, cred);
468
469	biosize = vp->v_bufobj.bo_bsize;
470	seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
471
472	error = nfs_bioread_check_cons(vp, td, cred);
473	if (error)
474		return error;
475
476	do {
477	    u_quad_t nsize;
478
479	    mtx_lock(&np->n_mtx);
480	    nsize = np->n_size;
481	    mtx_unlock(&np->n_mtx);
482
483	    switch (vp->v_type) {
484	    case VREG:
485		NFSINCRGLOBAL(newnfsstats.biocache_reads);
486		lbn = uio->uio_offset / biosize;
487		on = uio->uio_offset - (lbn * biosize);
488
489		/*
490		 * Start the read ahead(s), as required.
491		 */
492		if (nmp->nm_readahead > 0) {
493		    for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
494			(off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
495			rabn = lbn + 1 + nra;
496			if (incore(&vp->v_bufobj, rabn) == NULL) {
497			    rabp = nfs_getcacheblk(vp, rabn, biosize, td);
498			    if (!rabp) {
499				error = newnfs_sigintr(nmp, td);
500				return (error ? error : EINTR);
501			    }
502			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
503				rabp->b_flags |= B_ASYNC;
504				rabp->b_iocmd = BIO_READ;
505				vfs_busy_pages(rabp, 0);
506				if (ncl_asyncio(nmp, rabp, cred, td)) {
507				    rabp->b_flags |= B_INVAL;
508				    rabp->b_ioflags |= BIO_ERROR;
509				    vfs_unbusy_pages(rabp);
510				    brelse(rabp);
511				    break;
512				}
513			    } else {
514				brelse(rabp);
515			    }
516			}
517		    }
518		}
519
520		/* Note that bcount is *not* DEV_BSIZE aligned. */
521		bcount = biosize;
522		if ((off_t)lbn * biosize >= nsize) {
523			bcount = 0;
524		} else if ((off_t)(lbn + 1) * biosize > nsize) {
525			bcount = nsize - (off_t)lbn * biosize;
526		}
527		bp = nfs_getcacheblk(vp, lbn, bcount, td);
528
529		if (!bp) {
530			error = newnfs_sigintr(nmp, td);
531			return (error ? error : EINTR);
532		}
533
534		/*
535		 * If B_CACHE is not set, we must issue the read.  If this
536		 * fails, we return an error.
537		 */
538
539		if ((bp->b_flags & B_CACHE) == 0) {
540		    bp->b_iocmd = BIO_READ;
541		    vfs_busy_pages(bp, 0);
542		    error = ncl_doio(vp, bp, cred, td, 0);
543		    if (error) {
544			brelse(bp);
545			return (error);
546		    }
547		}
548
549		/*
550		 * on is the offset into the current bp.  Figure out how many
551		 * bytes we can copy out of the bp.  Note that bcount is
552		 * NOT DEV_BSIZE aligned.
553		 *
554		 * Then figure out how many bytes we can copy into the uio.
555		 */
556
557		n = 0;
558		if (on < bcount)
559			n = MIN((unsigned)(bcount - on), uio->uio_resid);
560		break;
561	    case VLNK:
562		NFSINCRGLOBAL(newnfsstats.biocache_readlinks);
563		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
564		if (!bp) {
565			error = newnfs_sigintr(nmp, td);
566			return (error ? error : EINTR);
567		}
568		if ((bp->b_flags & B_CACHE) == 0) {
569		    bp->b_iocmd = BIO_READ;
570		    vfs_busy_pages(bp, 0);
571		    error = ncl_doio(vp, bp, cred, td, 0);
572		    if (error) {
573			bp->b_ioflags |= BIO_ERROR;
574			brelse(bp);
575			return (error);
576		    }
577		}
578		n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
579		on = 0;
580		break;
581	    case VDIR:
582		NFSINCRGLOBAL(newnfsstats.biocache_readdirs);
583		if (np->n_direofoffset
584		    && uio->uio_offset >= np->n_direofoffset) {
585		    return (0);
586		}
587		lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
588		on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
589		bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
590		if (!bp) {
591		    error = newnfs_sigintr(nmp, td);
592		    return (error ? error : EINTR);
593		}
594		if ((bp->b_flags & B_CACHE) == 0) {
595		    bp->b_iocmd = BIO_READ;
596		    vfs_busy_pages(bp, 0);
597		    error = ncl_doio(vp, bp, cred, td, 0);
598		    if (error) {
599			    brelse(bp);
600		    }
601		    while (error == NFSERR_BAD_COOKIE) {
602			ncl_invaldir(vp);
603			error = ncl_vinvalbuf(vp, 0, td, 1);
604			/*
605			 * Yuck! The directory has been modified on the
606			 * server. The only way to get the block is by
607			 * reading from the beginning to get all the
608			 * offset cookies.
609			 *
610			 * Leave the last bp intact unless there is an error.
611			 * Loop back up to the while if the error is another
612			 * NFSERR_BAD_COOKIE (double yuch!).
613			 */
614			for (i = 0; i <= lbn && !error; i++) {
615			    if (np->n_direofoffset
616				&& (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
617				    return (0);
618			    bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
619			    if (!bp) {
620				error = newnfs_sigintr(nmp, td);
621				return (error ? error : EINTR);
622			    }
623			    if ((bp->b_flags & B_CACHE) == 0) {
624				    bp->b_iocmd = BIO_READ;
625				    vfs_busy_pages(bp, 0);
626				    error = ncl_doio(vp, bp, cred, td, 0);
627				    /*
628				     * no error + B_INVAL == directory EOF,
629				     * use the block.
630				     */
631				    if (error == 0 && (bp->b_flags & B_INVAL))
632					    break;
633			    }
634			    /*
635			     * An error will throw away the block and the
636			     * for loop will break out.  If no error and this
637			     * is not the block we want, we throw away the
638			     * block and go for the next one via the for loop.
639			     */
640			    if (error || i < lbn)
641				    brelse(bp);
642			}
643		    }
644		    /*
645		     * The above while is repeated if we hit another cookie
646		     * error.  If we hit an error and it wasn't a cookie error,
647		     * we give up.
648		     */
649		    if (error)
650			    return (error);
651		}
652
653		/*
654		 * If not eof and read aheads are enabled, start one.
655		 * (You need the current block first, so that you have the
656		 *  directory offset cookie of the next block.)
657		 */
658		if (nmp->nm_readahead > 0 &&
659		    (bp->b_flags & B_INVAL) == 0 &&
660		    (np->n_direofoffset == 0 ||
661		    (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
662		    incore(&vp->v_bufobj, lbn + 1) == NULL) {
663			rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
664			if (rabp) {
665			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
666				rabp->b_flags |= B_ASYNC;
667				rabp->b_iocmd = BIO_READ;
668				vfs_busy_pages(rabp, 0);
669				if (ncl_asyncio(nmp, rabp, cred, td)) {
670				    rabp->b_flags |= B_INVAL;
671				    rabp->b_ioflags |= BIO_ERROR;
672				    vfs_unbusy_pages(rabp);
673				    brelse(rabp);
674				}
675			    } else {
676				brelse(rabp);
677			    }
678			}
679		}
680		/*
681		 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
682		 * chopped for the EOF condition, we cannot tell how large
683		 * NFS directories are going to be until we hit EOF.  So
684		 * an NFS directory buffer is *not* chopped to its EOF.  Now,
685		 * it just so happens that b_resid will effectively chop it
686		 * to EOF.  *BUT* this information is lost if the buffer goes
687		 * away and is reconstituted into a B_CACHE state ( due to
688		 * being VMIO ) later.  So we keep track of the directory eof
689		 * in np->n_direofoffset and chop it off as an extra step
690		 * right here.
691		 */
692		n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
693		if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
694			n = np->n_direofoffset - uio->uio_offset;
695		break;
696	    default:
697		printf(" ncl_bioread: type %x unexpected\n", vp->v_type);
698		bp = NULL;
699		break;
700	    };
701
702	    if (n > 0) {
703		    error = vn_io_fault_uiomove(bp->b_data + on, (int)n, uio);
704	    }
705	    if (vp->v_type == VLNK)
706		n = 0;
707	    if (bp != NULL)
708		brelse(bp);
709	} while (error == 0 && uio->uio_resid > 0 && n > 0);
710	return (error);
711}
712
713/*
714 * The NFS write path cannot handle iovecs with len > 1. So we need to
715 * break up iovecs accordingly (restricting them to wsize).
716 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
717 * For the ASYNC case, 2 copies are needed. The first a copy from the
718 * user buffer to a staging buffer and then a second copy from the staging
719 * buffer to mbufs. This can be optimized by copying from the user buffer
720 * directly into mbufs and passing the chain down, but that requires a
721 * fair amount of re-working of the relevant codepaths (and can be done
722 * later).
723 */
724static int
725nfs_directio_write(vp, uiop, cred, ioflag)
726	struct vnode *vp;
727	struct uio *uiop;
728	struct ucred *cred;
729	int ioflag;
730{
731	int error;
732	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
733	struct thread *td = uiop->uio_td;
734	int size;
735	int wsize;
736
737	mtx_lock(&nmp->nm_mtx);
738	wsize = nmp->nm_wsize;
739	mtx_unlock(&nmp->nm_mtx);
740	if (ioflag & IO_SYNC) {
741		int iomode, must_commit;
742		struct uio uio;
743		struct iovec iov;
744do_sync:
745		while (uiop->uio_resid > 0) {
746			size = MIN(uiop->uio_resid, wsize);
747			size = MIN(uiop->uio_iov->iov_len, size);
748			iov.iov_base = uiop->uio_iov->iov_base;
749			iov.iov_len = size;
750			uio.uio_iov = &iov;
751			uio.uio_iovcnt = 1;
752			uio.uio_offset = uiop->uio_offset;
753			uio.uio_resid = size;
754			uio.uio_segflg = UIO_USERSPACE;
755			uio.uio_rw = UIO_WRITE;
756			uio.uio_td = td;
757			iomode = NFSWRITE_FILESYNC;
758			error = ncl_writerpc(vp, &uio, cred, &iomode,
759			    &must_commit, 0);
760			KASSERT((must_commit == 0),
761				("ncl_directio_write: Did not commit write"));
762			if (error)
763				return (error);
764			uiop->uio_offset += size;
765			uiop->uio_resid -= size;
766			if (uiop->uio_iov->iov_len <= size) {
767				uiop->uio_iovcnt--;
768				uiop->uio_iov++;
769			} else {
770				uiop->uio_iov->iov_base =
771					(char *)uiop->uio_iov->iov_base + size;
772				uiop->uio_iov->iov_len -= size;
773			}
774		}
775	} else {
776		struct uio *t_uio;
777		struct iovec *t_iov;
778		struct buf *bp;
779
780		/*
781		 * Break up the write into blocksize chunks and hand these
782		 * over to nfsiod's for write back.
783		 * Unfortunately, this incurs a copy of the data. Since
784		 * the user could modify the buffer before the write is
785		 * initiated.
786		 *
787		 * The obvious optimization here is that one of the 2 copies
788		 * in the async write path can be eliminated by copying the
789		 * data here directly into mbufs and passing the mbuf chain
790		 * down. But that will require a fair amount of re-working
791		 * of the code and can be done if there's enough interest
792		 * in NFS directio access.
793		 */
794		while (uiop->uio_resid > 0) {
795			size = MIN(uiop->uio_resid, wsize);
796			size = MIN(uiop->uio_iov->iov_len, size);
797			bp = getpbuf(&ncl_pbuf_freecnt);
798			t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
799			t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
800			t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
801			t_iov->iov_len = size;
802			t_uio->uio_iov = t_iov;
803			t_uio->uio_iovcnt = 1;
804			t_uio->uio_offset = uiop->uio_offset;
805			t_uio->uio_resid = size;
806			t_uio->uio_segflg = UIO_SYSSPACE;
807			t_uio->uio_rw = UIO_WRITE;
808			t_uio->uio_td = td;
809			KASSERT(uiop->uio_segflg == UIO_USERSPACE ||
810			    uiop->uio_segflg == UIO_SYSSPACE,
811			    ("nfs_directio_write: Bad uio_segflg"));
812			if (uiop->uio_segflg == UIO_USERSPACE) {
813				error = copyin(uiop->uio_iov->iov_base,
814				    t_iov->iov_base, size);
815				if (error != 0)
816					goto err_free;
817			} else
818				/*
819				 * UIO_SYSSPACE may never happen, but handle
820				 * it just in case it does.
821				 */
822				bcopy(uiop->uio_iov->iov_base, t_iov->iov_base,
823				    size);
824			bp->b_flags |= B_DIRECT;
825			bp->b_iocmd = BIO_WRITE;
826			if (cred != NOCRED) {
827				crhold(cred);
828				bp->b_wcred = cred;
829			} else
830				bp->b_wcred = NOCRED;
831			bp->b_caller1 = (void *)t_uio;
832			bp->b_vp = vp;
833			error = ncl_asyncio(nmp, bp, NOCRED, td);
834err_free:
835			if (error) {
836				free(t_iov->iov_base, M_NFSDIRECTIO);
837				free(t_iov, M_NFSDIRECTIO);
838				free(t_uio, M_NFSDIRECTIO);
839				bp->b_vp = NULL;
840				relpbuf(bp, &ncl_pbuf_freecnt);
841				if (error == EINTR)
842					return (error);
843				goto do_sync;
844			}
845			uiop->uio_offset += size;
846			uiop->uio_resid -= size;
847			if (uiop->uio_iov->iov_len <= size) {
848				uiop->uio_iovcnt--;
849				uiop->uio_iov++;
850			} else {
851				uiop->uio_iov->iov_base =
852					(char *)uiop->uio_iov->iov_base + size;
853				uiop->uio_iov->iov_len -= size;
854			}
855		}
856	}
857	return (0);
858}
859
860/*
861 * Vnode op for write using bio
862 */
863int
864ncl_write(struct vop_write_args *ap)
865{
866	int biosize;
867	struct uio *uio = ap->a_uio;
868	struct thread *td = uio->uio_td;
869	struct vnode *vp = ap->a_vp;
870	struct nfsnode *np = VTONFS(vp);
871	struct ucred *cred = ap->a_cred;
872	int ioflag = ap->a_ioflag;
873	struct buf *bp;
874	struct vattr vattr;
875	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
876	daddr_t lbn;
877	int bcount, noncontig_write, obcount;
878	int bp_cached, n, on, error = 0, error1, wouldcommit;
879	size_t orig_resid, local_resid;
880	off_t orig_size, tmp_off;
881
882	KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
883	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
884	    ("ncl_write proc"));
885	if (vp->v_type != VREG)
886		return (EIO);
887	mtx_lock(&np->n_mtx);
888	if (np->n_flag & NWRITEERR) {
889		np->n_flag &= ~NWRITEERR;
890		mtx_unlock(&np->n_mtx);
891		return (np->n_error);
892	} else
893		mtx_unlock(&np->n_mtx);
894	mtx_lock(&nmp->nm_mtx);
895	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
896	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
897		mtx_unlock(&nmp->nm_mtx);
898		(void)ncl_fsinfo(nmp, vp, cred, td);
899		mtx_lock(&nmp->nm_mtx);
900	}
901	if (nmp->nm_wsize == 0)
902		(void) newnfs_iosize(nmp);
903	mtx_unlock(&nmp->nm_mtx);
904
905	/*
906	 * Synchronously flush pending buffers if we are in synchronous
907	 * mode or if we are appending.
908	 */
909	if (ioflag & (IO_APPEND | IO_SYNC)) {
910		mtx_lock(&np->n_mtx);
911		if (np->n_flag & NMODIFIED) {
912			mtx_unlock(&np->n_mtx);
913#ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
914			/*
915			 * Require non-blocking, synchronous writes to
916			 * dirty files to inform the program it needs
917			 * to fsync(2) explicitly.
918			 */
919			if (ioflag & IO_NDELAY)
920				return (EAGAIN);
921#endif
922			np->n_attrstamp = 0;
923			KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
924			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
925			if (error)
926				return (error);
927		} else
928			mtx_unlock(&np->n_mtx);
929	}
930
931	orig_resid = uio->uio_resid;
932	mtx_lock(&np->n_mtx);
933	orig_size = np->n_size;
934	mtx_unlock(&np->n_mtx);
935
936	/*
937	 * If IO_APPEND then load uio_offset.  We restart here if we cannot
938	 * get the append lock.
939	 */
940	if (ioflag & IO_APPEND) {
941		np->n_attrstamp = 0;
942		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
943		error = VOP_GETATTR(vp, &vattr, cred);
944		if (error)
945			return (error);
946		mtx_lock(&np->n_mtx);
947		uio->uio_offset = np->n_size;
948		mtx_unlock(&np->n_mtx);
949	}
950
951	if (uio->uio_offset < 0)
952		return (EINVAL);
953	tmp_off = uio->uio_offset + uio->uio_resid;
954	if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)
955		return (EFBIG);
956	if (uio->uio_resid == 0)
957		return (0);
958
959	if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
960		return nfs_directio_write(vp, uio, cred, ioflag);
961
962	/*
963	 * Maybe this should be above the vnode op call, but so long as
964	 * file servers have no limits, i don't think it matters
965	 */
966	if (vn_rlimit_fsize(vp, uio, td))
967		return (EFBIG);
968
969	biosize = vp->v_bufobj.bo_bsize;
970	/*
971	 * Find all of this file's B_NEEDCOMMIT buffers.  If our writes
972	 * would exceed the local maximum per-file write commit size when
973	 * combined with those, we must decide whether to flush,
974	 * go synchronous, or return error.  We don't bother checking
975	 * IO_UNIT -- we just make all writes atomic anyway, as there's
976	 * no point optimizing for something that really won't ever happen.
977	 */
978	wouldcommit = 0;
979	if (!(ioflag & IO_SYNC)) {
980		int nflag;
981
982		mtx_lock(&np->n_mtx);
983		nflag = np->n_flag;
984		mtx_unlock(&np->n_mtx);
985		if (nflag & NMODIFIED) {
986			BO_LOCK(&vp->v_bufobj);
987			if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
988				TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
989				    b_bobufs) {
990					if (bp->b_flags & B_NEEDCOMMIT)
991						wouldcommit += bp->b_bcount;
992				}
993			}
994			BO_UNLOCK(&vp->v_bufobj);
995		}
996	}
997
998	do {
999		if (!(ioflag & IO_SYNC)) {
1000			wouldcommit += biosize;
1001			if (wouldcommit > nmp->nm_wcommitsize) {
1002				np->n_attrstamp = 0;
1003				KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
1004				error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
1005				if (error)
1006					return (error);
1007				wouldcommit = biosize;
1008			}
1009		}
1010
1011		NFSINCRGLOBAL(newnfsstats.biocache_writes);
1012		lbn = uio->uio_offset / biosize;
1013		on = uio->uio_offset - (lbn * biosize);
1014		n = MIN((unsigned)(biosize - on), uio->uio_resid);
1015again:
1016		/*
1017		 * Handle direct append and file extension cases, calculate
1018		 * unaligned buffer size.
1019		 */
1020		mtx_lock(&np->n_mtx);
1021		if ((np->n_flag & NHASBEENLOCKED) == 0 &&
1022		    (nmp->nm_flag & NFSMNT_NONCONTIGWR) != 0)
1023			noncontig_write = 1;
1024		else
1025			noncontig_write = 0;
1026		if ((uio->uio_offset == np->n_size ||
1027		    (noncontig_write != 0 &&
1028		    lbn == (np->n_size / biosize) &&
1029		    uio->uio_offset + n > np->n_size)) && n) {
1030			mtx_unlock(&np->n_mtx);
1031			/*
1032			 * Get the buffer (in its pre-append state to maintain
1033			 * B_CACHE if it was previously set).  Resize the
1034			 * nfsnode after we have locked the buffer to prevent
1035			 * readers from reading garbage.
1036			 */
1037			obcount = np->n_size - (lbn * biosize);
1038			bp = nfs_getcacheblk(vp, lbn, obcount, td);
1039
1040			if (bp != NULL) {
1041				long save;
1042
1043				mtx_lock(&np->n_mtx);
1044				np->n_size = uio->uio_offset + n;
1045				np->n_flag |= NMODIFIED;
1046				vnode_pager_setsize(vp, np->n_size);
1047				mtx_unlock(&np->n_mtx);
1048
1049				save = bp->b_flags & B_CACHE;
1050				bcount = on + n;
1051				allocbuf(bp, bcount);
1052				bp->b_flags |= save;
1053				if (noncontig_write != 0 && on > obcount)
1054					vfs_bio_bzero_buf(bp, obcount, on -
1055					    obcount);
1056			}
1057		} else {
1058			/*
1059			 * Obtain the locked cache block first, and then
1060			 * adjust the file's size as appropriate.
1061			 */
1062			bcount = on + n;
1063			if ((off_t)lbn * biosize + bcount < np->n_size) {
1064				if ((off_t)(lbn + 1) * biosize < np->n_size)
1065					bcount = biosize;
1066				else
1067					bcount = np->n_size - (off_t)lbn * biosize;
1068			}
1069			mtx_unlock(&np->n_mtx);
1070			bp = nfs_getcacheblk(vp, lbn, bcount, td);
1071			mtx_lock(&np->n_mtx);
1072			if (uio->uio_offset + n > np->n_size) {
1073				np->n_size = uio->uio_offset + n;
1074				np->n_flag |= NMODIFIED;
1075				vnode_pager_setsize(vp, np->n_size);
1076			}
1077			mtx_unlock(&np->n_mtx);
1078		}
1079
1080		if (!bp) {
1081			error = newnfs_sigintr(nmp, td);
1082			if (!error)
1083				error = EINTR;
1084			break;
1085		}
1086
1087		/*
1088		 * Issue a READ if B_CACHE is not set.  In special-append
1089		 * mode, B_CACHE is based on the buffer prior to the write
1090		 * op and is typically set, avoiding the read.  If a read
1091		 * is required in special append mode, the server will
1092		 * probably send us a short-read since we extended the file
1093		 * on our end, resulting in b_resid == 0 and, thusly,
1094		 * B_CACHE getting set.
1095		 *
1096		 * We can also avoid issuing the read if the write covers
1097		 * the entire buffer.  We have to make sure the buffer state
1098		 * is reasonable in this case since we will not be initiating
1099		 * I/O.  See the comments in kern/vfs_bio.c's getblk() for
1100		 * more information.
1101		 *
1102		 * B_CACHE may also be set due to the buffer being cached
1103		 * normally.
1104		 */
1105
1106		bp_cached = 1;
1107		if (on == 0 && n == bcount) {
1108			if ((bp->b_flags & B_CACHE) == 0)
1109				bp_cached = 0;
1110			bp->b_flags |= B_CACHE;
1111			bp->b_flags &= ~B_INVAL;
1112			bp->b_ioflags &= ~BIO_ERROR;
1113		}
1114
1115		if ((bp->b_flags & B_CACHE) == 0) {
1116			bp->b_iocmd = BIO_READ;
1117			vfs_busy_pages(bp, 0);
1118			error = ncl_doio(vp, bp, cred, td, 0);
1119			if (error) {
1120				brelse(bp);
1121				break;
1122			}
1123		}
1124		if (bp->b_wcred == NOCRED)
1125			bp->b_wcred = crhold(cred);
1126		mtx_lock(&np->n_mtx);
1127		np->n_flag |= NMODIFIED;
1128		mtx_unlock(&np->n_mtx);
1129
1130		/*
1131		 * If dirtyend exceeds file size, chop it down.  This should
1132		 * not normally occur but there is an append race where it
1133		 * might occur XXX, so we log it.
1134		 *
1135		 * If the chopping creates a reverse-indexed or degenerate
1136		 * situation with dirtyoff/end, we 0 both of them.
1137		 */
1138
1139		if (bp->b_dirtyend > bcount) {
1140			printf("NFS append race @%lx:%d\n",
1141			    (long)bp->b_blkno * DEV_BSIZE,
1142			    bp->b_dirtyend - bcount);
1143			bp->b_dirtyend = bcount;
1144		}
1145
1146		if (bp->b_dirtyoff >= bp->b_dirtyend)
1147			bp->b_dirtyoff = bp->b_dirtyend = 0;
1148
1149		/*
1150		 * If the new write will leave a contiguous dirty
1151		 * area, just update the b_dirtyoff and b_dirtyend,
1152		 * otherwise force a write rpc of the old dirty area.
1153		 *
1154		 * If there has been a file lock applied to this file
1155		 * or vfs.nfs.old_noncontig_writing is set, do the following:
1156		 * While it is possible to merge discontiguous writes due to
1157		 * our having a B_CACHE buffer ( and thus valid read data
1158		 * for the hole), we don't because it could lead to
1159		 * significant cache coherency problems with multiple clients,
1160		 * especially if locking is implemented later on.
1161		 *
1162		 * If vfs.nfs.old_noncontig_writing is not set and there has
1163		 * not been file locking done on this file:
1164		 * Relax coherency a bit for the sake of performance and
1165		 * expand the current dirty region to contain the new
1166		 * write even if it means we mark some non-dirty data as
1167		 * dirty.
1168		 */
1169
1170		if (noncontig_write == 0 && bp->b_dirtyend > 0 &&
1171		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1172			if (bwrite(bp) == EINTR) {
1173				error = EINTR;
1174				break;
1175			}
1176			goto again;
1177		}
1178
1179		local_resid = uio->uio_resid;
1180		error = vn_io_fault_uiomove((char *)bp->b_data + on, n, uio);
1181
1182		if (error != 0 && !bp_cached) {
1183			/*
1184			 * This block has no other content then what
1185			 * possibly was written by the faulty uiomove.
1186			 * Release it, forgetting the data pages, to
1187			 * prevent the leak of uninitialized data to
1188			 * usermode.
1189			 */
1190			bp->b_ioflags |= BIO_ERROR;
1191			brelse(bp);
1192			uio->uio_offset -= local_resid - uio->uio_resid;
1193			uio->uio_resid = local_resid;
1194			break;
1195		}
1196
1197		/*
1198		 * Since this block is being modified, it must be written
1199		 * again and not just committed.  Since write clustering does
1200		 * not work for the stage 1 data write, only the stage 2
1201		 * commit rpc, we have to clear B_CLUSTEROK as well.
1202		 */
1203		bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1204
1205		/*
1206		 * Get the partial update on the progress made from
1207		 * uiomove, if an error occured.
1208		 */
1209		if (error != 0)
1210			n = local_resid - uio->uio_resid;
1211
1212		/*
1213		 * Only update dirtyoff/dirtyend if not a degenerate
1214		 * condition.
1215		 */
1216		if (n > 0) {
1217			if (bp->b_dirtyend > 0) {
1218				bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1219				bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1220			} else {
1221				bp->b_dirtyoff = on;
1222				bp->b_dirtyend = on + n;
1223			}
1224			vfs_bio_set_valid(bp, on, n);
1225		}
1226
1227		/*
1228		 * If IO_SYNC do bwrite().
1229		 *
1230		 * IO_INVAL appears to be unused.  The idea appears to be
1231		 * to turn off caching in this case.  Very odd.  XXX
1232		 */
1233		if ((ioflag & IO_SYNC)) {
1234			if (ioflag & IO_INVAL)
1235				bp->b_flags |= B_NOCACHE;
1236			error1 = bwrite(bp);
1237			if (error1 != 0) {
1238				if (error == 0)
1239					error = error1;
1240				break;
1241			}
1242		} else if ((n + on) == biosize) {
1243			bp->b_flags |= B_ASYNC;
1244			(void) ncl_writebp(bp, 0, NULL);
1245		} else {
1246			bdwrite(bp);
1247		}
1248
1249		if (error != 0)
1250			break;
1251	} while (uio->uio_resid > 0 && n > 0);
1252
1253	if (error != 0) {
1254		if (ioflag & IO_UNIT) {
1255			VATTR_NULL(&vattr);
1256			vattr.va_size = orig_size;
1257			/* IO_SYNC is handled implicitely */
1258			(void)VOP_SETATTR(vp, &vattr, cred);
1259			uio->uio_offset -= orig_resid - uio->uio_resid;
1260			uio->uio_resid = orig_resid;
1261		}
1262	}
1263
1264	return (error);
1265}
1266
1267/*
1268 * Get an nfs cache block.
1269 *
1270 * Allocate a new one if the block isn't currently in the cache
1271 * and return the block marked busy. If the calling process is
1272 * interrupted by a signal for an interruptible mount point, return
1273 * NULL.
1274 *
1275 * The caller must carefully deal with the possible B_INVAL state of
1276 * the buffer.  ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
1277 * indirectly), so synchronous reads can be issued without worrying about
1278 * the B_INVAL state.  We have to be a little more careful when dealing
1279 * with writes (see comments in nfs_write()) when extending a file past
1280 * its EOF.
1281 */
1282static struct buf *
1283nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1284{
1285	struct buf *bp;
1286	struct mount *mp;
1287	struct nfsmount *nmp;
1288
1289	mp = vp->v_mount;
1290	nmp = VFSTONFS(mp);
1291
1292	if (nmp->nm_flag & NFSMNT_INT) {
1293		sigset_t oldset;
1294
1295		newnfs_set_sigmask(td, &oldset);
1296		bp = getblk(vp, bn, size, PCATCH, 0, 0);
1297		newnfs_restore_sigmask(td, &oldset);
1298		while (bp == NULL) {
1299			if (newnfs_sigintr(nmp, td))
1300				return (NULL);
1301			bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1302		}
1303	} else {
1304		bp = getblk(vp, bn, size, 0, 0, 0);
1305	}
1306
1307	if (vp->v_type == VREG)
1308		bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE);
1309	return (bp);
1310}
1311
1312/*
1313 * Flush and invalidate all dirty buffers. If another process is already
1314 * doing the flush, just wait for completion.
1315 */
1316int
1317ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1318{
1319	struct nfsnode *np = VTONFS(vp);
1320	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1321	int error = 0, slpflag, slptimeo;
1322	int old_lock = 0;
1323
1324	ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf");
1325
1326	if ((nmp->nm_flag & NFSMNT_INT) == 0)
1327		intrflg = 0;
1328	if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF))
1329		intrflg = 1;
1330	if (intrflg) {
1331		slpflag = PCATCH;
1332		slptimeo = 2 * hz;
1333	} else {
1334		slpflag = 0;
1335		slptimeo = 0;
1336	}
1337
1338	old_lock = ncl_upgrade_vnlock(vp);
1339	if (vp->v_iflag & VI_DOOMED) {
1340		/*
1341		 * Since vgonel() uses the generic vinvalbuf() to flush
1342		 * dirty buffers and it does not call this function, it
1343		 * is safe to just return OK when VI_DOOMED is set.
1344		 */
1345		ncl_downgrade_vnlock(vp, old_lock);
1346		return (0);
1347	}
1348
1349	/*
1350	 * Now, flush as required.
1351	 */
1352	if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
1353		VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
1354		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1355		VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
1356		/*
1357		 * If the page clean was interrupted, fail the invalidation.
1358		 * Not doing so, we run the risk of losing dirty pages in the
1359		 * vinvalbuf() call below.
1360		 */
1361		if (intrflg && (error = newnfs_sigintr(nmp, td)))
1362			goto out;
1363	}
1364
1365	error = vinvalbuf(vp, flags, slpflag, 0);
1366	while (error) {
1367		if (intrflg && (error = newnfs_sigintr(nmp, td)))
1368			goto out;
1369		error = vinvalbuf(vp, flags, 0, slptimeo);
1370	}
1371	if (NFSHASPNFS(nmp)) {
1372		nfscl_layoutcommit(vp, td);
1373		/*
1374		 * Invalidate the attribute cache, since writes to a DS
1375		 * won't update the size attribute.
1376		 */
1377		mtx_lock(&np->n_mtx);
1378		np->n_attrstamp = 0;
1379	} else
1380		mtx_lock(&np->n_mtx);
1381	if (np->n_directio_asyncwr == 0)
1382		np->n_flag &= ~NMODIFIED;
1383	mtx_unlock(&np->n_mtx);
1384out:
1385	ncl_downgrade_vnlock(vp, old_lock);
1386	return error;
1387}
1388
1389/*
1390 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1391 * This is mainly to avoid queueing async I/O requests when the nfsiods
1392 * are all hung on a dead server.
1393 *
1394 * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1395 * is eventually dequeued by the async daemon, ncl_doio() *will*.
1396 */
1397int
1398ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1399{
1400	int iod;
1401	int gotiod;
1402	int slpflag = 0;
1403	int slptimeo = 0;
1404	int error, error2;
1405
1406	/*
1407	 * Commits are usually short and sweet so lets save some cpu and
1408	 * leave the async daemons for more important rpc's (such as reads
1409	 * and writes).
1410	 *
1411	 * Readdirplus RPCs do vget()s to acquire the vnodes for entries
1412	 * in the directory in order to update attributes. This can deadlock
1413	 * with another thread that is waiting for async I/O to be done by
1414	 * an nfsiod thread while holding a lock on one of these vnodes.
1415	 * To avoid this deadlock, don't allow the async nfsiod threads to
1416	 * perform Readdirplus RPCs.
1417	 */
1418	mtx_lock(&ncl_iod_mutex);
1419	if ((bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1420	     (nmp->nm_bufqiods > ncl_numasync / 2)) ||
1421	    (bp->b_vp->v_type == VDIR && (nmp->nm_flag & NFSMNT_RDIRPLUS))) {
1422		mtx_unlock(&ncl_iod_mutex);
1423		return(EIO);
1424	}
1425again:
1426	if (nmp->nm_flag & NFSMNT_INT)
1427		slpflag = PCATCH;
1428	gotiod = FALSE;
1429
1430	/*
1431	 * Find a free iod to process this request.
1432	 */
1433	for (iod = 0; iod < ncl_numasync; iod++)
1434		if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) {
1435			gotiod = TRUE;
1436			break;
1437		}
1438
1439	/*
1440	 * Try to create one if none are free.
1441	 */
1442	if (!gotiod)
1443		ncl_nfsiodnew();
1444	else {
1445		/*
1446		 * Found one, so wake it up and tell it which
1447		 * mount to process.
1448		 */
1449		NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n",
1450		    iod, nmp));
1451		ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE;
1452		ncl_iodmount[iod] = nmp;
1453		nmp->nm_bufqiods++;
1454		wakeup(&ncl_iodwant[iod]);
1455	}
1456
1457	/*
1458	 * If none are free, we may already have an iod working on this mount
1459	 * point.  If so, it will process our request.
1460	 */
1461	if (!gotiod) {
1462		if (nmp->nm_bufqiods > 0) {
1463			NFS_DPF(ASYNCIO,
1464				("ncl_asyncio: %d iods are already processing mount %p\n",
1465				 nmp->nm_bufqiods, nmp));
1466			gotiod = TRUE;
1467		}
1468	}
1469
1470	/*
1471	 * If we have an iod which can process the request, then queue
1472	 * the buffer.
1473	 */
1474	if (gotiod) {
1475		/*
1476		 * Ensure that the queue never grows too large.  We still want
1477		 * to asynchronize so we block rather then return EIO.
1478		 */
1479		while (nmp->nm_bufqlen >= 2*ncl_numasync) {
1480			NFS_DPF(ASYNCIO,
1481				("ncl_asyncio: waiting for mount %p queue to drain\n", nmp));
1482			nmp->nm_bufqwant = TRUE;
1483			error = newnfs_msleep(td, &nmp->nm_bufq,
1484			    &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio",
1485			   slptimeo);
1486			if (error) {
1487				error2 = newnfs_sigintr(nmp, td);
1488				if (error2) {
1489					mtx_unlock(&ncl_iod_mutex);
1490					return (error2);
1491				}
1492				if (slpflag == PCATCH) {
1493					slpflag = 0;
1494					slptimeo = 2 * hz;
1495				}
1496			}
1497			/*
1498			 * We might have lost our iod while sleeping,
1499			 * so check and loop if nescessary.
1500			 */
1501			goto again;
1502		}
1503
1504		/* We might have lost our nfsiod */
1505		if (nmp->nm_bufqiods == 0) {
1506			NFS_DPF(ASYNCIO,
1507				("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1508			goto again;
1509		}
1510
1511		if (bp->b_iocmd == BIO_READ) {
1512			if (bp->b_rcred == NOCRED && cred != NOCRED)
1513				bp->b_rcred = crhold(cred);
1514		} else {
1515			if (bp->b_wcred == NOCRED && cred != NOCRED)
1516				bp->b_wcred = crhold(cred);
1517		}
1518
1519		if (bp->b_flags & B_REMFREE)
1520			bremfreef(bp);
1521		BUF_KERNPROC(bp);
1522		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1523		nmp->nm_bufqlen++;
1524		if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1525			mtx_lock(&(VTONFS(bp->b_vp))->n_mtx);
1526			VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
1527			VTONFS(bp->b_vp)->n_directio_asyncwr++;
1528			mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx);
1529		}
1530		mtx_unlock(&ncl_iod_mutex);
1531		return (0);
1532	}
1533
1534	mtx_unlock(&ncl_iod_mutex);
1535
1536	/*
1537	 * All the iods are busy on other mounts, so return EIO to
1538	 * force the caller to process the i/o synchronously.
1539	 */
1540	NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
1541	return (EIO);
1542}
1543
1544void
1545ncl_doio_directwrite(struct buf *bp)
1546{
1547	int iomode, must_commit;
1548	struct uio *uiop = (struct uio *)bp->b_caller1;
1549	char *iov_base = uiop->uio_iov->iov_base;
1550
1551	iomode = NFSWRITE_FILESYNC;
1552	uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1553	ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0);
1554	KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write"));
1555	free(iov_base, M_NFSDIRECTIO);
1556	free(uiop->uio_iov, M_NFSDIRECTIO);
1557	free(uiop, M_NFSDIRECTIO);
1558	if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1559		struct nfsnode *np = VTONFS(bp->b_vp);
1560		mtx_lock(&np->n_mtx);
1561		if (NFSHASPNFS(VFSTONFS(vnode_mount(bp->b_vp)))) {
1562			/*
1563			 * Invalidate the attribute cache, since writes to a DS
1564			 * won't update the size attribute.
1565			 */
1566			np->n_attrstamp = 0;
1567		}
1568		np->n_directio_asyncwr--;
1569		if (np->n_directio_asyncwr == 0) {
1570			np->n_flag &= ~NMODIFIED;
1571			if ((np->n_flag & NFSYNCWAIT)) {
1572				np->n_flag &= ~NFSYNCWAIT;
1573				wakeup((caddr_t)&np->n_directio_asyncwr);
1574			}
1575		}
1576		mtx_unlock(&np->n_mtx);
1577	}
1578	bp->b_vp = NULL;
1579	relpbuf(bp, &ncl_pbuf_freecnt);
1580}
1581
1582/*
1583 * Do an I/O operation to/from a cache block. This may be called
1584 * synchronously or from an nfsiod.
1585 */
1586int
1587ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
1588    int called_from_strategy)
1589{
1590	struct uio *uiop;
1591	struct nfsnode *np;
1592	struct nfsmount *nmp;
1593	int error = 0, iomode, must_commit = 0;
1594	struct uio uio;
1595	struct iovec io;
1596	struct proc *p = td ? td->td_proc : NULL;
1597	uint8_t	iocmd;
1598
1599	np = VTONFS(vp);
1600	nmp = VFSTONFS(vp->v_mount);
1601	uiop = &uio;
1602	uiop->uio_iov = &io;
1603	uiop->uio_iovcnt = 1;
1604	uiop->uio_segflg = UIO_SYSSPACE;
1605	uiop->uio_td = td;
1606
1607	/*
1608	 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O.  We
1609	 * do this here so we do not have to do it in all the code that
1610	 * calls us.
1611	 */
1612	bp->b_flags &= ~B_INVAL;
1613	bp->b_ioflags &= ~BIO_ERROR;
1614
1615	KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp));
1616	iocmd = bp->b_iocmd;
1617	if (iocmd == BIO_READ) {
1618	    io.iov_len = uiop->uio_resid = bp->b_bcount;
1619	    io.iov_base = bp->b_data;
1620	    uiop->uio_rw = UIO_READ;
1621
1622	    switch (vp->v_type) {
1623	    case VREG:
1624		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1625		NFSINCRGLOBAL(newnfsstats.read_bios);
1626		error = ncl_readrpc(vp, uiop, cr);
1627
1628		if (!error) {
1629		    if (uiop->uio_resid) {
1630			/*
1631			 * If we had a short read with no error, we must have
1632			 * hit a file hole.  We should zero-fill the remainder.
1633			 * This can also occur if the server hits the file EOF.
1634			 *
1635			 * Holes used to be able to occur due to pending
1636			 * writes, but that is not possible any longer.
1637			 */
1638			int nread = bp->b_bcount - uiop->uio_resid;
1639			ssize_t left = uiop->uio_resid;
1640
1641			if (left > 0)
1642				bzero((char *)bp->b_data + nread, left);
1643			uiop->uio_resid = 0;
1644		    }
1645		}
1646		/* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
1647		if (p && (vp->v_vflag & VV_TEXT)) {
1648			mtx_lock(&np->n_mtx);
1649			if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
1650				mtx_unlock(&np->n_mtx);
1651				PROC_LOCK(p);
1652				killproc(p, "text file modification");
1653				PROC_UNLOCK(p);
1654			} else
1655				mtx_unlock(&np->n_mtx);
1656		}
1657		break;
1658	    case VLNK:
1659		uiop->uio_offset = (off_t)0;
1660		NFSINCRGLOBAL(newnfsstats.readlink_bios);
1661		error = ncl_readlinkrpc(vp, uiop, cr);
1662		break;
1663	    case VDIR:
1664		NFSINCRGLOBAL(newnfsstats.readdir_bios);
1665		uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1666		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1667			error = ncl_readdirplusrpc(vp, uiop, cr, td);
1668			if (error == NFSERR_NOTSUPP)
1669				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1670		}
1671		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1672			error = ncl_readdirrpc(vp, uiop, cr, td);
1673		/*
1674		 * end-of-directory sets B_INVAL but does not generate an
1675		 * error.
1676		 */
1677		if (error == 0 && uiop->uio_resid == bp->b_bcount)
1678			bp->b_flags |= B_INVAL;
1679		break;
1680	    default:
1681		printf("ncl_doio:  type %x unexpected\n", vp->v_type);
1682		break;
1683	    };
1684	    if (error) {
1685		bp->b_ioflags |= BIO_ERROR;
1686		bp->b_error = error;
1687	    }
1688	} else {
1689	    /*
1690	     * If we only need to commit, try to commit
1691	     */
1692	    if (bp->b_flags & B_NEEDCOMMIT) {
1693		    int retv;
1694		    off_t off;
1695
1696		    off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1697		    retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1698			bp->b_wcred, td);
1699		    if (retv == 0) {
1700			    bp->b_dirtyoff = bp->b_dirtyend = 0;
1701			    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1702			    bp->b_resid = 0;
1703			    bufdone(bp);
1704			    return (0);
1705		    }
1706		    if (retv == NFSERR_STALEWRITEVERF) {
1707			    ncl_clearcommit(vp->v_mount);
1708		    }
1709	    }
1710
1711	    /*
1712	     * Setup for actual write
1713	     */
1714	    mtx_lock(&np->n_mtx);
1715	    if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1716		bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1717	    mtx_unlock(&np->n_mtx);
1718
1719	    if (bp->b_dirtyend > bp->b_dirtyoff) {
1720		io.iov_len = uiop->uio_resid = bp->b_dirtyend
1721		    - bp->b_dirtyoff;
1722		uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1723		    + bp->b_dirtyoff;
1724		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1725		uiop->uio_rw = UIO_WRITE;
1726		NFSINCRGLOBAL(newnfsstats.write_bios);
1727
1728		if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1729		    iomode = NFSWRITE_UNSTABLE;
1730		else
1731		    iomode = NFSWRITE_FILESYNC;
1732
1733		error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit,
1734		    called_from_strategy);
1735
1736		/*
1737		 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1738		 * to cluster the buffers needing commit.  This will allow
1739		 * the system to submit a single commit rpc for the whole
1740		 * cluster.  We can do this even if the buffer is not 100%
1741		 * dirty (relative to the NFS blocksize), so we optimize the
1742		 * append-to-file-case.
1743		 *
1744		 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1745		 * cleared because write clustering only works for commit
1746		 * rpc's, not for the data portion of the write).
1747		 */
1748
1749		if (!error && iomode == NFSWRITE_UNSTABLE) {
1750		    bp->b_flags |= B_NEEDCOMMIT;
1751		    if (bp->b_dirtyoff == 0
1752			&& bp->b_dirtyend == bp->b_bcount)
1753			bp->b_flags |= B_CLUSTEROK;
1754		} else {
1755		    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1756		}
1757
1758		/*
1759		 * For an interrupted write, the buffer is still valid
1760		 * and the write hasn't been pushed to the server yet,
1761		 * so we can't set BIO_ERROR and report the interruption
1762		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1763		 * is not relevant, so the rpc attempt is essentially
1764		 * a noop.  For the case of a V3 write rpc not being
1765		 * committed to stable storage, the block is still
1766		 * dirty and requires either a commit rpc or another
1767		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1768		 * the block is reused. This is indicated by setting
1769		 * the B_DELWRI and B_NEEDCOMMIT flags.
1770		 *
1771		 * EIO is returned by ncl_writerpc() to indicate a recoverable
1772		 * write error and is handled as above, except that
1773		 * B_EINTR isn't set. One cause of this is a stale stateid
1774		 * error for the RPC that indicates recovery is required,
1775		 * when called with called_from_strategy != 0.
1776		 *
1777		 * If the buffer is marked B_PAGING, it does not reside on
1778		 * the vp's paging queues so we cannot call bdirty().  The
1779		 * bp in this case is not an NFS cache block so we should
1780		 * be safe. XXX
1781		 *
1782		 * The logic below breaks up errors into recoverable and
1783		 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1784		 * and keep the buffer around for potential write retries.
1785		 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1786		 * and save the error in the nfsnode. This is less than ideal
1787		 * but necessary. Keeping such buffers around could potentially
1788		 * cause buffer exhaustion eventually (they can never be written
1789		 * out, so will get constantly be re-dirtied). It also causes
1790		 * all sorts of vfs panics. For non-recoverable write errors,
1791		 * also invalidate the attrcache, so we'll be forced to go over
1792		 * the wire for this object, returning an error to user on next
1793		 * call (most of the time).
1794		 */
1795		if (error == EINTR || error == EIO || error == ETIMEDOUT
1796		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1797			int s;
1798
1799			s = splbio();
1800			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1801			if ((bp->b_flags & B_PAGING) == 0) {
1802			    bdirty(bp);
1803			    bp->b_flags &= ~B_DONE;
1804			}
1805			if ((error == EINTR || error == ETIMEDOUT) &&
1806			    (bp->b_flags & B_ASYNC) == 0)
1807			    bp->b_flags |= B_EINTR;
1808			splx(s);
1809		} else {
1810		    if (error) {
1811			bp->b_ioflags |= BIO_ERROR;
1812			bp->b_flags |= B_INVAL;
1813			bp->b_error = np->n_error = error;
1814			mtx_lock(&np->n_mtx);
1815			np->n_flag |= NWRITEERR;
1816			np->n_attrstamp = 0;
1817			KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
1818			mtx_unlock(&np->n_mtx);
1819		    }
1820		    bp->b_dirtyoff = bp->b_dirtyend = 0;
1821		}
1822	    } else {
1823		bp->b_resid = 0;
1824		bufdone(bp);
1825		return (0);
1826	    }
1827	}
1828	bp->b_resid = uiop->uio_resid;
1829	if (must_commit)
1830	    ncl_clearcommit(vp->v_mount);
1831	bufdone(bp);
1832	return (error);
1833}
1834
1835/*
1836 * Used to aid in handling ftruncate() operations on the NFS client side.
1837 * Truncation creates a number of special problems for NFS.  We have to
1838 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1839 * we have to properly handle VM pages or (potentially dirty) buffers
1840 * that straddle the truncation point.
1841 */
1842
1843int
1844ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1845{
1846	struct nfsnode *np = VTONFS(vp);
1847	u_quad_t tsize;
1848	int biosize = vp->v_bufobj.bo_bsize;
1849	int error = 0;
1850
1851	mtx_lock(&np->n_mtx);
1852	tsize = np->n_size;
1853	np->n_size = nsize;
1854	mtx_unlock(&np->n_mtx);
1855
1856	if (nsize < tsize) {
1857		struct buf *bp;
1858		daddr_t lbn;
1859		int bufsize;
1860
1861		/*
1862		 * vtruncbuf() doesn't get the buffer overlapping the
1863		 * truncation point.  We may have a B_DELWRI and/or B_CACHE
1864		 * buffer that now needs to be truncated.
1865		 */
1866		error = vtruncbuf(vp, cred, nsize, biosize);
1867		lbn = nsize / biosize;
1868		bufsize = nsize - (lbn * biosize);
1869		bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1870		if (!bp)
1871			return EINTR;
1872		if (bp->b_dirtyoff > bp->b_bcount)
1873			bp->b_dirtyoff = bp->b_bcount;
1874		if (bp->b_dirtyend > bp->b_bcount)
1875			bp->b_dirtyend = bp->b_bcount;
1876		bp->b_flags |= B_RELBUF;  /* don't leave garbage around */
1877		brelse(bp);
1878	} else {
1879		vnode_pager_setsize(vp, nsize);
1880	}
1881	return(error);
1882}
1883
1884