nfs_bio.c revision 143822
1/*-
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/nfsclient/nfs_bio.c 143822 2005-03-18 21:23:32Z das $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/bio.h>
41#include <sys/buf.h>
42#include <sys/kernel.h>
43#include <sys/mount.h>
44#include <sys/proc.h>
45#include <sys/resourcevar.h>
46#include <sys/signalvar.h>
47#include <sys/vmmeter.h>
48#include <sys/vnode.h>
49
50#include <vm/vm.h>
51#include <vm/vm_extern.h>
52#include <vm/vm_page.h>
53#include <vm/vm_object.h>
54#include <vm/vm_pager.h>
55#include <vm/vnode_pager.h>
56
57#include <rpc/rpcclnt.h>
58
59#include <nfs/rpcv2.h>
60#include <nfs/nfsproto.h>
61#include <nfsclient/nfs.h>
62#include <nfsclient/nfsmount.h>
63#include <nfsclient/nfsnode.h>
64
65#include <nfs4client/nfs4.h>
66
67static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
68		    struct thread *td);
69static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
70			      struct ucred *cred, int ioflag);
71
72extern int nfs_directio_enable;
73extern int nfs_directio_allow_mmap;
74/*
75 * Vnode op for VM getpages.
76 */
77int
78nfs_getpages(struct vop_getpages_args *ap)
79{
80	int i, error, nextoff, size, toff, count, npages;
81	struct uio uio;
82	struct iovec iov;
83	vm_offset_t kva;
84	struct buf *bp;
85	struct vnode *vp;
86	struct thread *td;
87	struct ucred *cred;
88	struct nfsmount *nmp;
89	vm_object_t object;
90	vm_page_t *pages;
91	struct nfsnode *np;
92
93	GIANT_REQUIRED;
94
95	vp = ap->a_vp;
96	np = VTONFS(vp);
97	td = curthread;				/* XXX */
98	cred = curthread->td_ucred;		/* XXX */
99	nmp = VFSTONFS(vp->v_mount);
100	pages = ap->a_m;
101	count = ap->a_count;
102
103	if ((object = vp->v_object) == NULL) {
104		printf("nfs_getpages: called with non-merged cache vnode??\n");
105		return VM_PAGER_ERROR;
106	}
107
108	if (!nfs_directio_allow_mmap && (np->n_flag & NNONCACHE) &&
109	    (vp->v_type == VREG)) {
110		printf("nfs_getpages: called on non-cacheable vnode??\n");
111		return VM_PAGER_ERROR;
112	}
113
114	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
115	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
116		/* We'll never get here for v4, because we always have fsinfo */
117		(void)nfs_fsinfo(nmp, vp, cred, td);
118	}
119
120	npages = btoc(count);
121
122	/*
123	 * If the requested page is partially valid, just return it and
124	 * allow the pager to zero-out the blanks.  Partially valid pages
125	 * can only occur at the file EOF.
126	 */
127
128	{
129		vm_page_t m = pages[ap->a_reqpage];
130
131		VM_OBJECT_LOCK(object);
132		vm_page_lock_queues();
133		if (m->valid != 0) {
134			/* handled by vm_fault now	  */
135			/* vm_page_zero_invalid(m, TRUE); */
136			for (i = 0; i < npages; ++i) {
137				if (i != ap->a_reqpage)
138					vm_page_free(pages[i]);
139			}
140			vm_page_unlock_queues();
141			VM_OBJECT_UNLOCK(object);
142			return(0);
143		}
144		vm_page_unlock_queues();
145		VM_OBJECT_UNLOCK(object);
146	}
147
148	/*
149	 * We use only the kva address for the buffer, but this is extremely
150	 * convienient and fast.
151	 */
152	bp = getpbuf(&nfs_pbuf_freecnt);
153
154	kva = (vm_offset_t) bp->b_data;
155	pmap_qenter(kva, pages, npages);
156	cnt.v_vnodein++;
157	cnt.v_vnodepgsin += npages;
158
159	iov.iov_base = (caddr_t) kva;
160	iov.iov_len = count;
161	uio.uio_iov = &iov;
162	uio.uio_iovcnt = 1;
163	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
164	uio.uio_resid = count;
165	uio.uio_segflg = UIO_SYSSPACE;
166	uio.uio_rw = UIO_READ;
167	uio.uio_td = td;
168
169	error = (nmp->nm_rpcops->nr_readrpc)(vp, &uio, cred);
170	pmap_qremove(kva, npages);
171
172	relpbuf(bp, &nfs_pbuf_freecnt);
173
174	if (error && (uio.uio_resid == count)) {
175		printf("nfs_getpages: error %d\n", error);
176		VM_OBJECT_LOCK(object);
177		vm_page_lock_queues();
178		for (i = 0; i < npages; ++i) {
179			if (i != ap->a_reqpage)
180				vm_page_free(pages[i]);
181		}
182		vm_page_unlock_queues();
183		VM_OBJECT_UNLOCK(object);
184		return VM_PAGER_ERROR;
185	}
186
187	/*
188	 * Calculate the number of bytes read and validate only that number
189	 * of bytes.  Note that due to pending writes, size may be 0.  This
190	 * does not mean that the remaining data is invalid!
191	 */
192
193	size = count - uio.uio_resid;
194	VM_OBJECT_LOCK(object);
195	vm_page_lock_queues();
196	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
197		vm_page_t m;
198		nextoff = toff + PAGE_SIZE;
199		m = pages[i];
200
201		if (nextoff <= size) {
202			/*
203			 * Read operation filled an entire page
204			 */
205			m->valid = VM_PAGE_BITS_ALL;
206			vm_page_undirty(m);
207		} else if (size > toff) {
208			/*
209			 * Read operation filled a partial page.
210			 */
211			m->valid = 0;
212			vm_page_set_validclean(m, 0, size - toff);
213			/* handled by vm_fault now	  */
214			/* vm_page_zero_invalid(m, TRUE); */
215		} else {
216			/*
217			 * Read operation was short.  If no error occured
218			 * we may have hit a zero-fill section.   We simply
219			 * leave valid set to 0.
220			 */
221			;
222		}
223		if (i != ap->a_reqpage) {
224			/*
225			 * Whether or not to leave the page activated is up in
226			 * the air, but we should put the page on a page queue
227			 * somewhere (it already is in the object).  Result:
228			 * It appears that emperical results show that
229			 * deactivating pages is best.
230			 */
231
232			/*
233			 * Just in case someone was asking for this page we
234			 * now tell them that it is ok to use.
235			 */
236			if (!error) {
237				if (m->flags & PG_WANTED)
238					vm_page_activate(m);
239				else
240					vm_page_deactivate(m);
241				vm_page_wakeup(m);
242			} else {
243				vm_page_free(m);
244			}
245		}
246	}
247	vm_page_unlock_queues();
248	VM_OBJECT_UNLOCK(object);
249	return 0;
250}
251
252/*
253 * Vnode op for VM putpages.
254 */
255int
256nfs_putpages(struct vop_putpages_args *ap)
257{
258	struct uio uio;
259	struct iovec iov;
260	vm_offset_t kva;
261	struct buf *bp;
262	int iomode, must_commit, i, error, npages, count;
263	off_t offset;
264	int *rtvals;
265	struct vnode *vp;
266	struct thread *td;
267	struct ucred *cred;
268	struct nfsmount *nmp;
269	struct nfsnode *np;
270	vm_page_t *pages;
271
272	GIANT_REQUIRED;
273
274	vp = ap->a_vp;
275	np = VTONFS(vp);
276	td = curthread;				/* XXX */
277	cred = curthread->td_ucred;		/* XXX */
278	nmp = VFSTONFS(vp->v_mount);
279	pages = ap->a_m;
280	count = ap->a_count;
281	rtvals = ap->a_rtvals;
282	npages = btoc(count);
283	offset = IDX_TO_OFF(pages[0]->pindex);
284
285	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
286	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
287		(void)nfs_fsinfo(nmp, vp, cred, td);
288	}
289
290	if (!nfs_directio_allow_mmap && (np->n_flag & NNONCACHE) &&
291	    (vp->v_type == VREG))
292		printf("nfs_putpages: called on noncache-able vnode??\n");
293
294	for (i = 0; i < npages; i++)
295		rtvals[i] = VM_PAGER_AGAIN;
296
297	/*
298	 * When putting pages, do not extend file past EOF.
299	 */
300
301	if (offset + count > np->n_size) {
302		count = np->n_size - offset;
303		if (count < 0)
304			count = 0;
305	}
306
307	/*
308	 * We use only the kva address for the buffer, but this is extremely
309	 * convienient and fast.
310	 */
311	bp = getpbuf(&nfs_pbuf_freecnt);
312
313	kva = (vm_offset_t) bp->b_data;
314	pmap_qenter(kva, pages, npages);
315	cnt.v_vnodeout++;
316	cnt.v_vnodepgsout += count;
317
318	iov.iov_base = (caddr_t) kva;
319	iov.iov_len = count;
320	uio.uio_iov = &iov;
321	uio.uio_iovcnt = 1;
322	uio.uio_offset = offset;
323	uio.uio_resid = count;
324	uio.uio_segflg = UIO_SYSSPACE;
325	uio.uio_rw = UIO_WRITE;
326	uio.uio_td = td;
327
328	if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
329	    iomode = NFSV3WRITE_UNSTABLE;
330	else
331	    iomode = NFSV3WRITE_FILESYNC;
332
333	error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred, &iomode, &must_commit);
334
335	pmap_qremove(kva, npages);
336	relpbuf(bp, &nfs_pbuf_freecnt);
337
338	if (!error) {
339		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
340		for (i = 0; i < nwritten; i++) {
341			rtvals[i] = VM_PAGER_OK;
342			vm_page_undirty(pages[i]);
343		}
344		if (must_commit) {
345			nfs_clearcommit(vp->v_mount);
346		}
347	}
348	return rtvals[0];
349}
350
351/*
352 * Vnode op for read using bio
353 */
354int
355nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
356{
357	struct nfsnode *np = VTONFS(vp);
358	int biosize, i;
359	struct buf *bp, *rabp;
360	struct vattr vattr;
361	struct thread *td;
362	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
363	daddr_t lbn, rabn;
364	int bcount;
365	int seqcount;
366	int nra, error = 0, n = 0, on = 0;
367
368#ifdef DIAGNOSTIC
369	if (uio->uio_rw != UIO_READ)
370		panic("nfs_read mode");
371#endif
372	if (uio->uio_resid == 0)
373		return (0);
374	if (uio->uio_offset < 0)	/* XXX VDIR cookies can be negative */
375		return (EINVAL);
376	td = uio->uio_td;
377
378	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
379	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
380		(void)nfs_fsinfo(nmp, vp, cred, td);
381	if (vp->v_type != VDIR &&
382	    (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
383		return (EFBIG);
384
385	if (nfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
386		/* No caching/ no readaheads. Just read data into the user buffer */
387		return nfs_readrpc(vp, uio, cred);
388
389	biosize = vp->v_mount->mnt_stat.f_iosize;
390	seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
391	/*
392	 * For nfs, cache consistency can only be maintained approximately.
393	 * Although RFC1094 does not specify the criteria, the following is
394	 * believed to be compatible with the reference port.
395	 * For nfs:
396	 * If the file's modify time on the server has changed since the
397	 * last read rpc or you have written to the file,
398	 * you may have lost data cache consistency with the
399	 * server, so flush all of the file's data out of the cache.
400	 * Then force a getattr rpc to ensure that you have up to date
401	 * attributes.
402	 * NB: This implies that cache data can be read when up to
403	 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
404	 * attributes this could be forced by setting n_attrstamp to 0 before
405	 * the VOP_GETATTR() call.
406	 */
407	if (np->n_flag & NMODIFIED) {
408		if (vp->v_type != VREG) {
409			if (vp->v_type != VDIR)
410				panic("nfs: bioread, not dir");
411			(nmp->nm_rpcops->nr_invaldir)(vp);
412			error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
413			if (error)
414				return (error);
415		}
416		np->n_attrstamp = 0;
417		error = VOP_GETATTR(vp, &vattr, cred, td);
418		if (error)
419			return (error);
420		np->n_mtime = vattr.va_mtime;
421	} else {
422		error = VOP_GETATTR(vp, &vattr, cred, td);
423		if (error)
424			return (error);
425		if ((np->n_flag & NSIZECHANGED)
426		    || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
427			if (vp->v_type == VDIR)
428				(nmp->nm_rpcops->nr_invaldir)(vp);
429			error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
430			if (error)
431				return (error);
432			np->n_mtime = vattr.va_mtime;
433			np->n_flag &= ~NSIZECHANGED;
434		}
435	}
436	do {
437	    switch (vp->v_type) {
438	    case VREG:
439		nfsstats.biocache_reads++;
440		lbn = uio->uio_offset / biosize;
441		on = uio->uio_offset & (biosize - 1);
442
443		/*
444		 * Start the read ahead(s), as required.
445		 * The readahead is kicked off only if sequential access
446		 * is detected, based on the readahead hint (ra_expect_lbn).
447		 */
448		if (nmp->nm_readahead > 0 && np->ra_expect_lbn == lbn) {
449		    for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
450			(off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) {
451			rabn = lbn + 1 + nra;
452			if (incore(&vp->v_bufobj, rabn) == NULL) {
453			    rabp = nfs_getcacheblk(vp, rabn, biosize, td);
454			    if (!rabp) {
455				error = nfs_sigintr(nmp, NULL, td);
456				return (error ? error : EINTR);
457			    }
458			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
459				rabp->b_flags |= B_ASYNC;
460				rabp->b_iocmd = BIO_READ;
461				vfs_busy_pages(rabp, 0);
462				if (nfs_asyncio(nmp, rabp, cred, td)) {
463				    rabp->b_flags |= B_INVAL;
464				    rabp->b_ioflags |= BIO_ERROR;
465				    vfs_unbusy_pages(rabp);
466				    brelse(rabp);
467				    break;
468				}
469			    } else {
470				brelse(rabp);
471			    }
472			}
473		    }
474		    np->ra_expect_lbn = lbn + 1;
475		}
476
477		/*
478		 * Obtain the buffer cache block.  Figure out the buffer size
479		 * when we are at EOF.  If we are modifying the size of the
480		 * buffer based on an EOF condition we need to hold
481		 * nfs_rslock() through obtaining the buffer to prevent
482		 * a potential writer-appender from messing with n_size.
483		 * Otherwise we may accidently truncate the buffer and
484		 * lose dirty data.
485		 *
486		 * Note that bcount is *not* DEV_BSIZE aligned.
487		 */
488
489again:
490		bcount = biosize;
491		if ((off_t)lbn * biosize >= np->n_size) {
492			bcount = 0;
493		} else if ((off_t)(lbn + 1) * biosize > np->n_size) {
494			bcount = np->n_size - (off_t)lbn * biosize;
495		}
496		if (bcount != biosize) {
497			switch(nfs_rslock(np, td)) {
498			case ENOLCK:
499				goto again;
500				/* not reached */
501			case EIO:
502				return (EIO);
503			case EINTR:
504			case ERESTART:
505				return(EINTR);
506				/* not reached */
507			default:
508				break;
509			}
510		}
511
512		bp = nfs_getcacheblk(vp, lbn, bcount, td);
513
514		if (bcount != biosize)
515			nfs_rsunlock(np, td);
516		if (!bp) {
517			error = nfs_sigintr(nmp, NULL, td);
518			return (error ? error : EINTR);
519		}
520
521		/*
522		 * If B_CACHE is not set, we must issue the read.  If this
523		 * fails, we return an error.
524		 */
525
526		if ((bp->b_flags & B_CACHE) == 0) {
527		    bp->b_iocmd = BIO_READ;
528		    vfs_busy_pages(bp, 0);
529		    error = nfs_doio(vp, bp, cred, td);
530		    if (error) {
531			brelse(bp);
532			return (error);
533		    }
534		}
535
536		/*
537		 * on is the offset into the current bp.  Figure out how many
538		 * bytes we can copy out of the bp.  Note that bcount is
539		 * NOT DEV_BSIZE aligned.
540		 *
541		 * Then figure out how many bytes we can copy into the uio.
542		 */
543
544		n = 0;
545		if (on < bcount)
546			n = min((unsigned)(bcount - on), uio->uio_resid);
547		break;
548	    case VLNK:
549		nfsstats.biocache_readlinks++;
550		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
551		if (!bp) {
552			error = nfs_sigintr(nmp, NULL, td);
553			return (error ? error : EINTR);
554		}
555		if ((bp->b_flags & B_CACHE) == 0) {
556		    bp->b_iocmd = BIO_READ;
557		    vfs_busy_pages(bp, 0);
558		    error = nfs_doio(vp, bp, cred, td);
559		    if (error) {
560			bp->b_ioflags |= BIO_ERROR;
561			brelse(bp);
562			return (error);
563		    }
564		}
565		n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
566		on = 0;
567		break;
568	    case VDIR:
569		nfsstats.biocache_readdirs++;
570		if (np->n_direofoffset
571		    && uio->uio_offset >= np->n_direofoffset) {
572		    return (0);
573		}
574		lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
575		on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
576		bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
577		if (!bp) {
578		    error = nfs_sigintr(nmp, NULL, td);
579		    return (error ? error : EINTR);
580		}
581		if ((bp->b_flags & B_CACHE) == 0) {
582		    bp->b_iocmd = BIO_READ;
583		    vfs_busy_pages(bp, 0);
584		    error = nfs_doio(vp, bp, cred, td);
585		    if (error) {
586			    brelse(bp);
587		    }
588		    while (error == NFSERR_BAD_COOKIE) {
589			(nmp->nm_rpcops->nr_invaldir)(vp);
590			error = nfs_vinvalbuf(vp, 0, td, 1);
591			/*
592			 * Yuck! The directory has been modified on the
593			 * server. The only way to get the block is by
594			 * reading from the beginning to get all the
595			 * offset cookies.
596			 *
597			 * Leave the last bp intact unless there is an error.
598			 * Loop back up to the while if the error is another
599			 * NFSERR_BAD_COOKIE (double yuch!).
600			 */
601			for (i = 0; i <= lbn && !error; i++) {
602			    if (np->n_direofoffset
603				&& (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
604				    return (0);
605			    bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
606			    if (!bp) {
607				error = nfs_sigintr(nmp, NULL, td);
608				return (error ? error : EINTR);
609			    }
610			    if ((bp->b_flags & B_CACHE) == 0) {
611				    bp->b_iocmd = BIO_READ;
612				    vfs_busy_pages(bp, 0);
613				    error = nfs_doio(vp, bp, cred, td);
614				    /*
615				     * no error + B_INVAL == directory EOF,
616				     * use the block.
617				     */
618				    if (error == 0 && (bp->b_flags & B_INVAL))
619					    break;
620			    }
621			    /*
622			     * An error will throw away the block and the
623			     * for loop will break out.  If no error and this
624			     * is not the block we want, we throw away the
625			     * block and go for the next one via the for loop.
626			     */
627			    if (error || i < lbn)
628				    brelse(bp);
629			}
630		    }
631		    /*
632		     * The above while is repeated if we hit another cookie
633		     * error.  If we hit an error and it wasn't a cookie error,
634		     * we give up.
635		     */
636		    if (error)
637			    return (error);
638		}
639
640		/*
641		 * If not eof and read aheads are enabled, start one.
642		 * (You need the current block first, so that you have the
643		 *  directory offset cookie of the next block.)
644		 */
645		if (nmp->nm_readahead > 0 &&
646		    (bp->b_flags & B_INVAL) == 0 &&
647		    (np->n_direofoffset == 0 ||
648		    (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
649		    incore(&vp->v_bufobj, lbn + 1) == NULL) {
650			rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
651			if (rabp) {
652			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
653				rabp->b_flags |= B_ASYNC;
654				rabp->b_iocmd = BIO_READ;
655				vfs_busy_pages(rabp, 0);
656				if (nfs_asyncio(nmp, rabp, cred, td)) {
657				    rabp->b_flags |= B_INVAL;
658				    rabp->b_ioflags |= BIO_ERROR;
659				    vfs_unbusy_pages(rabp);
660				    brelse(rabp);
661				}
662			    } else {
663				brelse(rabp);
664			    }
665			}
666		}
667		/*
668		 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
669		 * chopped for the EOF condition, we cannot tell how large
670		 * NFS directories are going to be until we hit EOF.  So
671		 * an NFS directory buffer is *not* chopped to its EOF.  Now,
672		 * it just so happens that b_resid will effectively chop it
673		 * to EOF.  *BUT* this information is lost if the buffer goes
674		 * away and is reconstituted into a B_CACHE state ( due to
675		 * being VMIO ) later.  So we keep track of the directory eof
676		 * in np->n_direofoffset and chop it off as an extra step
677		 * right here.
678		 */
679		n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
680		if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
681			n = np->n_direofoffset - uio->uio_offset;
682		break;
683	    default:
684		printf(" nfs_bioread: type %x unexpected\n", vp->v_type);
685		bp = NULL;
686		break;
687	    };
688
689	    if (n > 0) {
690		    error = uiomove(bp->b_data + on, (int)n, uio);
691	    }
692	    if (vp->v_type == VLNK)
693		n = 0;
694	    if (bp != NULL)
695		brelse(bp);
696	} while (error == 0 && uio->uio_resid > 0 && n > 0);
697	return (error);
698}
699
700/*
701 * The NFS write path cannot handle iovecs with len > 1. So we need to
702 * break up iovecs accordingly (restricting them to wsize).
703 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
704 * For the ASYNC case, 2 copies are needed. The first a copy from the
705 * user buffer to a staging buffer and then a second copy from the staging
706 * buffer to mbufs. This can be optimized by copying from the user buffer
707 * directly into mbufs and passing the chain down, but that requires a
708 * fair amount of re-working of the relevant codepaths (and can be done
709 * later).
710 */
711static int
712nfs_directio_write(vp, uiop, cred, ioflag)
713	struct vnode *vp;
714	struct uio *uiop;
715	struct ucred *cred;
716	int ioflag;
717{
718	int error;
719	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
720	struct thread *td = uiop->uio_td;
721	int size;
722
723	if (ioflag & IO_SYNC) {
724		int iomode, must_commit;
725		struct uio uio;
726		struct iovec iov;
727do_sync:
728		while (uiop->uio_resid > 0) {
729			size = min(uiop->uio_resid, nmp->nm_wsize);
730			size = min(uiop->uio_iov->iov_len, size);
731			iov.iov_base = uiop->uio_iov->iov_base;
732			iov.iov_len = size;
733			uio.uio_iov = &iov;
734			uio.uio_iovcnt = 1;
735			uio.uio_offset = uiop->uio_offset;
736			uio.uio_resid = size;
737			uio.uio_segflg = UIO_USERSPACE;
738			uio.uio_rw = UIO_WRITE;
739			uio.uio_td = td;
740			iomode = NFSV3WRITE_FILESYNC;
741			error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred,
742						      &iomode, &must_commit);
743			KASSERT((must_commit == 0),
744				("nfs_directio_write: Did not commit write"));
745			if (error)
746				return (error);
747			uiop->uio_offset += size;
748			uiop->uio_resid -= size;
749			if (uiop->uio_iov->iov_len <= size) {
750				uiop->uio_iovcnt--;
751				uiop->uio_iov++;
752			} else {
753				uiop->uio_iov->iov_base =
754					(char *)uiop->uio_iov->iov_base + size;
755				uiop->uio_iov->iov_len -= size;
756			}
757		}
758	} else {
759		struct uio *t_uio;
760		struct iovec *t_iov;
761		struct buf *bp;
762
763		/*
764		 * Break up the write into blocksize chunks and hand these
765		 * over to nfsiod's for write back.
766		 * Unfortunately, this incurs a copy of the data. Since
767		 * the user could modify the buffer before the write is
768		 * initiated.
769		 *
770		 * The obvious optimization here is that one of the 2 copies
771		 * in the async write path can be eliminated by copying the
772		 * data here directly into mbufs and passing the mbuf chain
773		 * down. But that will require a fair amount of re-working
774		 * of the code and can be done if there's enough interest
775		 * in NFS directio access.
776		 */
777		while (uiop->uio_resid > 0) {
778			size = min(uiop->uio_resid, nmp->nm_wsize);
779			size = min(uiop->uio_iov->iov_len, size);
780			bp = getpbuf(&nfs_pbuf_freecnt);
781			t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
782			t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
783			t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
784			t_iov->iov_len = size;
785			t_uio->uio_iov = t_iov;
786			t_uio->uio_iovcnt = 1;
787			t_uio->uio_offset = uiop->uio_offset;
788			t_uio->uio_resid = size;
789			t_uio->uio_segflg = UIO_SYSSPACE;
790			t_uio->uio_rw = UIO_WRITE;
791			t_uio->uio_td = td;
792			bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size);
793			bp->b_flags |= B_DIRECT;
794			bp->b_iocmd = BIO_WRITE;
795			if (cred != NOCRED) {
796				crhold(cred);
797				bp->b_wcred = cred;
798			} else
799				bp->b_wcred = NOCRED;
800			bp->b_caller1 = (void *)t_uio;
801			bp->b_vp = vp;
802			vhold(vp);
803			error = nfs_asyncio(nmp, bp, NOCRED, td);
804			if (error) {
805				free(t_iov->iov_base, M_NFSDIRECTIO);
806				free(t_iov, M_NFSDIRECTIO);
807				free(t_uio, M_NFSDIRECTIO);
808				vdrop(bp->b_vp);
809				bp->b_vp = NULL;
810				relpbuf(bp, &nfs_pbuf_freecnt);
811				if (error == EINTR)
812					return (error);
813				goto do_sync;
814			}
815			uiop->uio_offset += size;
816			uiop->uio_resid -= size;
817			if (uiop->uio_iov->iov_len <= size) {
818				uiop->uio_iovcnt--;
819				uiop->uio_iov++;
820			} else {
821				uiop->uio_iov->iov_base =
822					(char *)uiop->uio_iov->iov_base + size;
823				uiop->uio_iov->iov_len -= size;
824			}
825		}
826	}
827	return (0);
828}
829
830/*
831 * Vnode op for write using bio
832 */
833int
834nfs_write(struct vop_write_args *ap)
835{
836	int biosize;
837	struct uio *uio = ap->a_uio;
838	struct thread *td = uio->uio_td;
839	struct vnode *vp = ap->a_vp;
840	struct nfsnode *np = VTONFS(vp);
841	struct ucred *cred = ap->a_cred;
842	int ioflag = ap->a_ioflag;
843	struct buf *bp;
844	struct vattr vattr;
845	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
846	daddr_t lbn;
847	int bcount;
848	int n, on, error = 0;
849	int haverslock = 0;
850	struct proc *p = td?td->td_proc:NULL;
851
852	GIANT_REQUIRED;
853
854#ifdef DIAGNOSTIC
855	if (uio->uio_rw != UIO_WRITE)
856		panic("nfs_write mode");
857	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread)
858		panic("nfs_write proc");
859#endif
860	if (vp->v_type != VREG)
861		return (EIO);
862	if (np->n_flag & NWRITEERR) {
863		np->n_flag &= ~NWRITEERR;
864		return (np->n_error);
865	}
866	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
867	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
868		(void)nfs_fsinfo(nmp, vp, cred, td);
869
870	/*
871	 * Synchronously flush pending buffers if we are in synchronous
872	 * mode or if we are appending.
873	 */
874	if (ioflag & (IO_APPEND | IO_SYNC)) {
875		if (np->n_flag & NMODIFIED) {
876			np->n_attrstamp = 0;
877			error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
878			if (error)
879				return (error);
880		}
881	}
882
883	/*
884	 * If IO_APPEND then load uio_offset.  We restart here if we cannot
885	 * get the append lock.
886	 */
887restart:
888	if (ioflag & IO_APPEND) {
889		np->n_attrstamp = 0;
890		error = VOP_GETATTR(vp, &vattr, cred, td);
891		if (error)
892			return (error);
893		uio->uio_offset = np->n_size;
894	}
895
896	if (uio->uio_offset < 0)
897		return (EINVAL);
898	if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
899		return (EFBIG);
900	if (uio->uio_resid == 0)
901		return (0);
902
903	if (nfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
904		return nfs_directio_write(vp, uio, cred, ioflag);
905
906	/*
907	 * We need to obtain the rslock if we intend to modify np->n_size
908	 * in order to guarentee the append point with multiple contending
909	 * writers, to guarentee that no other appenders modify n_size
910	 * while we are trying to obtain a truncated buffer (i.e. to avoid
911	 * accidently truncating data written by another appender due to
912	 * the race), and to ensure that the buffer is populated prior to
913	 * our extending of the file.  We hold rslock through the entire
914	 * operation.
915	 *
916	 * Note that we do not synchronize the case where someone truncates
917	 * the file while we are appending to it because attempting to lock
918	 * this case may deadlock other parts of the system unexpectedly.
919	 */
920	if ((ioflag & IO_APPEND) ||
921	    uio->uio_offset + uio->uio_resid > np->n_size) {
922		switch(nfs_rslock(np, td)) {
923		case ENOLCK:
924			goto restart;
925			/* not reached */
926		case EIO:
927			return (EIO);
928		case EINTR:
929		case ERESTART:
930			return(EINTR);
931			/* not reached */
932		default:
933			break;
934		}
935		haverslock = 1;
936	}
937
938	/*
939	 * Maybe this should be above the vnode op call, but so long as
940	 * file servers have no limits, i don't think it matters
941	 */
942	if (p != NULL) {
943		PROC_LOCK(p);
944		if (uio->uio_offset + uio->uio_resid >
945		    lim_cur(p, RLIMIT_FSIZE)) {
946			psignal(p, SIGXFSZ);
947			PROC_UNLOCK(p);
948			if (haverslock)
949				nfs_rsunlock(np, td);
950			return (EFBIG);
951		}
952		PROC_UNLOCK(p);
953	}
954
955	biosize = vp->v_mount->mnt_stat.f_iosize;
956
957	do {
958		nfsstats.biocache_writes++;
959		lbn = uio->uio_offset / biosize;
960		on = uio->uio_offset & (biosize-1);
961		n = min((unsigned)(biosize - on), uio->uio_resid);
962again:
963		/*
964		 * Handle direct append and file extension cases, calculate
965		 * unaligned buffer size.
966		 */
967
968		if (uio->uio_offset == np->n_size && n) {
969			/*
970			 * Get the buffer (in its pre-append state to maintain
971			 * B_CACHE if it was previously set).  Resize the
972			 * nfsnode after we have locked the buffer to prevent
973			 * readers from reading garbage.
974			 */
975			bcount = on;
976			bp = nfs_getcacheblk(vp, lbn, bcount, td);
977
978			if (bp != NULL) {
979				long save;
980
981				np->n_size = uio->uio_offset + n;
982				np->n_flag |= NMODIFIED;
983				vnode_pager_setsize(vp, np->n_size);
984
985				save = bp->b_flags & B_CACHE;
986				bcount += n;
987				allocbuf(bp, bcount);
988				bp->b_flags |= save;
989			}
990		} else {
991			/*
992			 * Obtain the locked cache block first, and then
993			 * adjust the file's size as appropriate.
994			 */
995			bcount = on + n;
996			if ((off_t)lbn * biosize + bcount < np->n_size) {
997				if ((off_t)(lbn + 1) * biosize < np->n_size)
998					bcount = biosize;
999				else
1000					bcount = np->n_size - (off_t)lbn * biosize;
1001			}
1002			bp = nfs_getcacheblk(vp, lbn, bcount, td);
1003			if (uio->uio_offset + n > np->n_size) {
1004				np->n_size = uio->uio_offset + n;
1005				np->n_flag |= NMODIFIED;
1006				vnode_pager_setsize(vp, np->n_size);
1007			}
1008		}
1009
1010		if (!bp) {
1011			error = nfs_sigintr(nmp, NULL, td);
1012			if (!error)
1013				error = EINTR;
1014			break;
1015		}
1016
1017		/*
1018		 * Issue a READ if B_CACHE is not set.  In special-append
1019		 * mode, B_CACHE is based on the buffer prior to the write
1020		 * op and is typically set, avoiding the read.  If a read
1021		 * is required in special append mode, the server will
1022		 * probably send us a short-read since we extended the file
1023		 * on our end, resulting in b_resid == 0 and, thusly,
1024		 * B_CACHE getting set.
1025		 *
1026		 * We can also avoid issuing the read if the write covers
1027		 * the entire buffer.  We have to make sure the buffer state
1028		 * is reasonable in this case since we will not be initiating
1029		 * I/O.  See the comments in kern/vfs_bio.c's getblk() for
1030		 * more information.
1031		 *
1032		 * B_CACHE may also be set due to the buffer being cached
1033		 * normally.
1034		 */
1035
1036		if (on == 0 && n == bcount) {
1037			bp->b_flags |= B_CACHE;
1038			bp->b_flags &= ~B_INVAL;
1039			bp->b_ioflags &= ~BIO_ERROR;
1040		}
1041
1042		if ((bp->b_flags & B_CACHE) == 0) {
1043			bp->b_iocmd = BIO_READ;
1044			vfs_busy_pages(bp, 0);
1045			error = nfs_doio(vp, bp, cred, td);
1046			if (error) {
1047				brelse(bp);
1048				break;
1049			}
1050		}
1051		if (bp->b_wcred == NOCRED)
1052			bp->b_wcred = crhold(cred);
1053		np->n_flag |= NMODIFIED;
1054
1055		/*
1056		 * If dirtyend exceeds file size, chop it down.  This should
1057		 * not normally occur but there is an append race where it
1058		 * might occur XXX, so we log it.
1059		 *
1060		 * If the chopping creates a reverse-indexed or degenerate
1061		 * situation with dirtyoff/end, we 0 both of them.
1062		 */
1063
1064		if (bp->b_dirtyend > bcount) {
1065			printf("NFS append race @%lx:%d\n",
1066			    (long)bp->b_blkno * DEV_BSIZE,
1067			    bp->b_dirtyend - bcount);
1068			bp->b_dirtyend = bcount;
1069		}
1070
1071		if (bp->b_dirtyoff >= bp->b_dirtyend)
1072			bp->b_dirtyoff = bp->b_dirtyend = 0;
1073
1074		/*
1075		 * If the new write will leave a contiguous dirty
1076		 * area, just update the b_dirtyoff and b_dirtyend,
1077		 * otherwise force a write rpc of the old dirty area.
1078		 *
1079		 * While it is possible to merge discontiguous writes due to
1080		 * our having a B_CACHE buffer ( and thus valid read data
1081		 * for the hole), we don't because it could lead to
1082		 * significant cache coherency problems with multiple clients,
1083		 * especially if locking is implemented later on.
1084		 *
1085		 * as an optimization we could theoretically maintain
1086		 * a linked list of discontinuous areas, but we would still
1087		 * have to commit them separately so there isn't much
1088		 * advantage to it except perhaps a bit of asynchronization.
1089		 */
1090
1091		if (bp->b_dirtyend > 0 &&
1092		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1093			if (bwrite(bp) == EINTR) {
1094				error = EINTR;
1095				break;
1096			}
1097			goto again;
1098		}
1099
1100		error = uiomove((char *)bp->b_data + on, n, uio);
1101
1102		/*
1103		 * Since this block is being modified, it must be written
1104		 * again and not just committed.  Since write clustering does
1105		 * not work for the stage 1 data write, only the stage 2
1106		 * commit rpc, we have to clear B_CLUSTEROK as well.
1107		 */
1108		bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1109
1110		if (error) {
1111			bp->b_ioflags |= BIO_ERROR;
1112			brelse(bp);
1113			break;
1114		}
1115
1116		/*
1117		 * Only update dirtyoff/dirtyend if not a degenerate
1118		 * condition.
1119		 */
1120		if (n) {
1121			if (bp->b_dirtyend > 0) {
1122				bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1123				bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1124			} else {
1125				bp->b_dirtyoff = on;
1126				bp->b_dirtyend = on + n;
1127			}
1128			vfs_bio_set_validclean(bp, on, n);
1129		}
1130
1131		/*
1132		 * If IO_SYNC do bwrite().
1133		 *
1134		 * IO_INVAL appears to be unused.  The idea appears to be
1135		 * to turn off caching in this case.  Very odd.  XXX
1136		 */
1137		if ((ioflag & IO_SYNC)) {
1138			if (ioflag & IO_INVAL)
1139				bp->b_flags |= B_NOCACHE;
1140			error = bwrite(bp);
1141			if (error)
1142				break;
1143		} else if ((n + on) == biosize) {
1144			bp->b_flags |= B_ASYNC;
1145			(void) (nmp->nm_rpcops->nr_writebp)(bp, 0, 0);
1146		} else {
1147			bdwrite(bp);
1148		}
1149	} while (uio->uio_resid > 0 && n > 0);
1150
1151	if (haverslock)
1152		nfs_rsunlock(np, td);
1153
1154	return (error);
1155}
1156
1157/*
1158 * Get an nfs cache block.
1159 *
1160 * Allocate a new one if the block isn't currently in the cache
1161 * and return the block marked busy. If the calling process is
1162 * interrupted by a signal for an interruptible mount point, return
1163 * NULL.
1164 *
1165 * The caller must carefully deal with the possible B_INVAL state of
1166 * the buffer.  nfs_doio() clears B_INVAL (and nfs_asyncio() clears it
1167 * indirectly), so synchronous reads can be issued without worrying about
1168 * the B_INVAL state.  We have to be a little more careful when dealing
1169 * with writes (see comments in nfs_write()) when extending a file past
1170 * its EOF.
1171 */
1172static struct buf *
1173nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1174{
1175	struct buf *bp;
1176	struct mount *mp;
1177	struct nfsmount *nmp;
1178
1179	mp = vp->v_mount;
1180	nmp = VFSTONFS(mp);
1181
1182	if (nmp->nm_flag & NFSMNT_INT) {
1183 		sigset_t oldset;
1184
1185 		nfs_set_sigmask(td, &oldset);
1186		bp = getblk(vp, bn, size, PCATCH, 0, 0);
1187 		nfs_restore_sigmask(td, &oldset);
1188		while (bp == NULL) {
1189			if (nfs_sigintr(nmp, NULL, td))
1190				return (NULL);
1191			bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1192		}
1193	} else {
1194		bp = getblk(vp, bn, size, 0, 0, 0);
1195	}
1196
1197	if (vp->v_type == VREG) {
1198		int biosize;
1199
1200		biosize = mp->mnt_stat.f_iosize;
1201		bp->b_blkno = bn * (biosize / DEV_BSIZE);
1202	}
1203	return (bp);
1204}
1205
1206/*
1207 * Flush and invalidate all dirty buffers. If another process is already
1208 * doing the flush, just wait for completion.
1209 */
1210int
1211nfs_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1212{
1213	struct nfsnode *np = VTONFS(vp);
1214	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1215	int error = 0, slpflag, slptimeo;
1216 	int old_lock = 0;
1217
1218	ASSERT_VOP_LOCKED(vp, "nfs_vinvalbuf");
1219
1220	/*
1221	 * XXX This check stops us from needlessly doing a vinvalbuf when
1222	 * being called through vclean().  It is not clear that this is
1223	 * unsafe.
1224	 */
1225	if (vp->v_iflag & VI_DOOMED)
1226		return (0);
1227
1228	if ((nmp->nm_flag & NFSMNT_INT) == 0)
1229		intrflg = 0;
1230	if (intrflg) {
1231		slpflag = PCATCH;
1232		slptimeo = 2 * hz;
1233	} else {
1234		slpflag = 0;
1235		slptimeo = 0;
1236	}
1237
1238 	if ((old_lock = VOP_ISLOCKED(vp, td)) != LK_EXCLUSIVE) {
1239 		if (old_lock == LK_SHARED) {
1240 			/* Upgrade to exclusive lock, this might block */
1241 			vn_lock(vp, LK_UPGRADE | LK_RETRY, td);
1242 		} else {
1243 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1244 		}
1245  	}
1246
1247	/*
1248	 * Now, flush as required.
1249	 */
1250	error = vinvalbuf(vp, flags, td, slpflag, 0);
1251	while (error) {
1252		if (intrflg && (error = nfs_sigintr(nmp, NULL, td)))
1253			goto out;
1254		error = vinvalbuf(vp, flags, td, 0, slptimeo);
1255	}
1256	np->n_flag &= ~NMODIFIED;
1257out:
1258 	if (old_lock != LK_EXCLUSIVE) {
1259 		if (old_lock == LK_SHARED) {
1260 			/* Downgrade from exclusive lock, this might block */
1261 			vn_lock(vp, LK_DOWNGRADE, td);
1262 		} else {
1263 			VOP_UNLOCK(vp, 0, td);
1264 		}
1265  	}
1266	return error;
1267}
1268
1269/*
1270 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1271 * This is mainly to avoid queueing async I/O requests when the nfsiods
1272 * are all hung on a dead server.
1273 *
1274 * Note: nfs_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1275 * is eventually dequeued by the async daemon, nfs_doio() *will*.
1276 */
1277int
1278nfs_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1279{
1280	int iod;
1281	int gotiod;
1282	int slpflag = 0;
1283	int slptimeo = 0;
1284	int error, error2;
1285
1286	/*
1287	 * Commits are usually short and sweet so lets save some cpu and
1288	 * leave the async daemons for more important rpc's (such as reads
1289	 * and writes).
1290	 */
1291	if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1292	    (nmp->nm_bufqiods > nfs_numasync / 2)) {
1293		return(EIO);
1294	}
1295
1296again:
1297	if (nmp->nm_flag & NFSMNT_INT)
1298		slpflag = PCATCH;
1299	gotiod = FALSE;
1300
1301	/*
1302	 * Find a free iod to process this request.
1303	 */
1304	for (iod = 0; iod < nfs_numasync; iod++)
1305		if (nfs_iodwant[iod]) {
1306			gotiod = TRUE;
1307			break;
1308		}
1309
1310	/*
1311	 * Try to create one if none are free.
1312	 */
1313	if (!gotiod) {
1314		iod = nfs_nfsiodnew();
1315		if (iod != -1)
1316			gotiod = TRUE;
1317	}
1318
1319	if (gotiod) {
1320		/*
1321		 * Found one, so wake it up and tell it which
1322		 * mount to process.
1323		 */
1324		NFS_DPF(ASYNCIO, ("nfs_asyncio: waking iod %d for mount %p\n",
1325		    iod, nmp));
1326		nfs_iodwant[iod] = NULL;
1327		nfs_iodmount[iod] = nmp;
1328		nmp->nm_bufqiods++;
1329		wakeup(&nfs_iodwant[iod]);
1330	}
1331
1332	/*
1333	 * If none are free, we may already have an iod working on this mount
1334	 * point.  If so, it will process our request.
1335	 */
1336	if (!gotiod) {
1337		if (nmp->nm_bufqiods > 0) {
1338			NFS_DPF(ASYNCIO,
1339				("nfs_asyncio: %d iods are already processing mount %p\n",
1340				 nmp->nm_bufqiods, nmp));
1341			gotiod = TRUE;
1342		}
1343	}
1344
1345	/*
1346	 * If we have an iod which can process the request, then queue
1347	 * the buffer.
1348	 */
1349	if (gotiod) {
1350		/*
1351		 * Ensure that the queue never grows too large.  We still want
1352		 * to asynchronize so we block rather then return EIO.
1353		 */
1354		while (nmp->nm_bufqlen >= 2*nfs_numasync) {
1355			NFS_DPF(ASYNCIO,
1356				("nfs_asyncio: waiting for mount %p queue to drain\n", nmp));
1357			nmp->nm_bufqwant = TRUE;
1358 			error = nfs_tsleep(td, &nmp->nm_bufq, slpflag | PRIBIO,
1359 					   "nfsaio", slptimeo);
1360			if (error) {
1361				error2 = nfs_sigintr(nmp, NULL, td);
1362				if (error2)
1363					return (error2);
1364				if (slpflag == PCATCH) {
1365					slpflag = 0;
1366					slptimeo = 2 * hz;
1367				}
1368			}
1369			/*
1370			 * We might have lost our iod while sleeping,
1371			 * so check and loop if nescessary.
1372			 */
1373			if (nmp->nm_bufqiods == 0) {
1374				NFS_DPF(ASYNCIO,
1375					("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1376				goto again;
1377			}
1378		}
1379
1380		if (bp->b_iocmd == BIO_READ) {
1381			if (bp->b_rcred == NOCRED && cred != NOCRED)
1382				bp->b_rcred = crhold(cred);
1383		} else {
1384			if (bp->b_wcred == NOCRED && cred != NOCRED)
1385				bp->b_wcred = crhold(cred);
1386		}
1387
1388		if (bp->b_flags & B_REMFREE)
1389			bremfreef(bp);
1390		BUF_KERNPROC(bp);
1391		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1392		nmp->nm_bufqlen++;
1393		return (0);
1394	}
1395
1396	/*
1397	 * All the iods are busy on other mounts, so return EIO to
1398	 * force the caller to process the i/o synchronously.
1399	 */
1400	NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n"));
1401	return (EIO);
1402}
1403
1404void
1405nfs_doio_directwrite(struct buf *bp)
1406{
1407	int iomode, must_commit;
1408	struct uio *uiop = (struct uio *)bp->b_caller1;
1409	char *iov_base = uiop->uio_iov->iov_base;
1410	struct nfsmount *nmp = VFSTONFS(bp->b_vp->v_mount);
1411
1412	iomode = NFSV3WRITE_FILESYNC;
1413	uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1414	(nmp->nm_rpcops->nr_writerpc)(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit);
1415	KASSERT((must_commit == 0), ("nfs_doio_directwrite: Did not commit write"));
1416	free(iov_base, M_NFSDIRECTIO);
1417	free(uiop->uio_iov, M_NFSDIRECTIO);
1418	free(uiop, M_NFSDIRECTIO);
1419	vdrop(bp->b_vp);
1420	bp->b_vp = NULL;
1421	relpbuf(bp, &nfs_pbuf_freecnt);
1422}
1423
1424/*
1425 * Do an I/O operation to/from a cache block. This may be called
1426 * synchronously or from an nfsiod.
1427 */
1428int
1429nfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
1430{
1431	struct uio *uiop;
1432	struct nfsnode *np;
1433	struct nfsmount *nmp;
1434	int error = 0, iomode, must_commit = 0;
1435	struct uio uio;
1436	struct iovec io;
1437	struct proc *p = td ? td->td_proc : NULL;
1438
1439	np = VTONFS(vp);
1440	nmp = VFSTONFS(vp->v_mount);
1441	uiop = &uio;
1442	uiop->uio_iov = &io;
1443	uiop->uio_iovcnt = 1;
1444	uiop->uio_segflg = UIO_SYSSPACE;
1445	uiop->uio_td = td;
1446
1447	/*
1448	 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O.  We
1449	 * do this here so we do not have to do it in all the code that
1450	 * calls us.
1451	 */
1452	bp->b_flags &= ~B_INVAL;
1453	bp->b_ioflags &= ~BIO_ERROR;
1454
1455	KASSERT(!(bp->b_flags & B_DONE), ("nfs_doio: bp %p already marked done", bp));
1456
1457	if (bp->b_iocmd == BIO_READ) {
1458	    io.iov_len = uiop->uio_resid = bp->b_bcount;
1459	    io.iov_base = bp->b_data;
1460	    uiop->uio_rw = UIO_READ;
1461
1462	    switch (vp->v_type) {
1463	    case VREG:
1464		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1465		nfsstats.read_bios++;
1466		error = (nmp->nm_rpcops->nr_readrpc)(vp, uiop, cr);
1467
1468		if (!error) {
1469		    if (uiop->uio_resid) {
1470			/*
1471			 * If we had a short read with no error, we must have
1472			 * hit a file hole.  We should zero-fill the remainder.
1473			 * This can also occur if the server hits the file EOF.
1474			 *
1475			 * Holes used to be able to occur due to pending
1476			 * writes, but that is not possible any longer.
1477			 */
1478			int nread = bp->b_bcount - uiop->uio_resid;
1479			int left  = uiop->uio_resid;
1480
1481			if (left > 0)
1482				bzero((char *)bp->b_data + nread, left);
1483			uiop->uio_resid = 0;
1484		    }
1485		}
1486		/* ASSERT_VOP_LOCKED(vp, "nfs_doio"); */
1487		if (p && (vp->v_vflag & VV_TEXT) &&
1488		    (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.va_mtime))) {
1489			PROC_LOCK(p);
1490			killproc(p, "text file modification");
1491			PROC_UNLOCK(p);
1492		}
1493		break;
1494	    case VLNK:
1495		uiop->uio_offset = (off_t)0;
1496		nfsstats.readlink_bios++;
1497		error = (nmp->nm_rpcops->nr_readlinkrpc)(vp, uiop, cr);
1498		break;
1499	    case VDIR:
1500		nfsstats.readdir_bios++;
1501		uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1502		if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
1503			error = nfs4_readdirrpc(vp, uiop, cr);
1504		else {
1505			if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1506				error = nfs_readdirplusrpc(vp, uiop, cr);
1507				if (error == NFSERR_NOTSUPP)
1508					nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1509			}
1510			if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1511				error = nfs_readdirrpc(vp, uiop, cr);
1512		}
1513		/*
1514		 * end-of-directory sets B_INVAL but does not generate an
1515		 * error.
1516		 */
1517		if (error == 0 && uiop->uio_resid == bp->b_bcount)
1518			bp->b_flags |= B_INVAL;
1519		break;
1520	    default:
1521		printf("nfs_doio:  type %x unexpected\n", vp->v_type);
1522		break;
1523	    };
1524	    if (error) {
1525		bp->b_ioflags |= BIO_ERROR;
1526		bp->b_error = error;
1527	    }
1528	} else {
1529	    /*
1530	     * If we only need to commit, try to commit
1531	     */
1532	    if (bp->b_flags & B_NEEDCOMMIT) {
1533		    int retv;
1534		    off_t off;
1535
1536		    off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1537		    retv = (nmp->nm_rpcops->nr_commit)(
1538				vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1539				bp->b_wcred, td);
1540		    if (retv == 0) {
1541			    bp->b_dirtyoff = bp->b_dirtyend = 0;
1542			    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1543			    bp->b_resid = 0;
1544			    bufdone(bp);
1545			    return (0);
1546		    }
1547		    if (retv == NFSERR_STALEWRITEVERF) {
1548			    nfs_clearcommit(vp->v_mount);
1549		    }
1550	    }
1551
1552	    /*
1553	     * Setup for actual write
1554	     */
1555
1556	    if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1557		bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1558
1559	    if (bp->b_dirtyend > bp->b_dirtyoff) {
1560		io.iov_len = uiop->uio_resid = bp->b_dirtyend
1561		    - bp->b_dirtyoff;
1562		uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1563		    + bp->b_dirtyoff;
1564		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1565		uiop->uio_rw = UIO_WRITE;
1566		nfsstats.write_bios++;
1567
1568		if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1569		    iomode = NFSV3WRITE_UNSTABLE;
1570		else
1571		    iomode = NFSV3WRITE_FILESYNC;
1572
1573		error = (nmp->nm_rpcops->nr_writerpc)(vp, uiop, cr, &iomode, &must_commit);
1574
1575		/*
1576		 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1577		 * to cluster the buffers needing commit.  This will allow
1578		 * the system to submit a single commit rpc for the whole
1579		 * cluster.  We can do this even if the buffer is not 100%
1580		 * dirty (relative to the NFS blocksize), so we optimize the
1581		 * append-to-file-case.
1582		 *
1583		 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1584		 * cleared because write clustering only works for commit
1585		 * rpc's, not for the data portion of the write).
1586		 */
1587
1588		if (!error && iomode == NFSV3WRITE_UNSTABLE) {
1589		    bp->b_flags |= B_NEEDCOMMIT;
1590		    if (bp->b_dirtyoff == 0
1591			&& bp->b_dirtyend == bp->b_bcount)
1592			bp->b_flags |= B_CLUSTEROK;
1593		} else {
1594		    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1595		}
1596
1597		/*
1598		 * For an interrupted write, the buffer is still valid
1599		 * and the write hasn't been pushed to the server yet,
1600		 * so we can't set BIO_ERROR and report the interruption
1601		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1602		 * is not relevant, so the rpc attempt is essentially
1603		 * a noop.  For the case of a V3 write rpc not being
1604		 * committed to stable storage, the block is still
1605		 * dirty and requires either a commit rpc or another
1606		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1607		 * the block is reused. This is indicated by setting
1608		 * the B_DELWRI and B_NEEDCOMMIT flags.
1609		 *
1610		 * If the buffer is marked B_PAGING, it does not reside on
1611		 * the vp's paging queues so we cannot call bdirty().  The
1612		 * bp in this case is not an NFS cache block so we should
1613		 * be safe. XXX
1614		 */
1615    		if (error == EINTR || error == EIO
1616		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1617			int s;
1618
1619			s = splbio();
1620			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1621			if ((bp->b_flags & B_PAGING) == 0) {
1622			    bdirty(bp);
1623			    bp->b_flags &= ~B_DONE;
1624			}
1625			if (error && (bp->b_flags & B_ASYNC) == 0)
1626			    bp->b_flags |= B_EINTR;
1627			splx(s);
1628	    	} else {
1629		    if (error) {
1630			bp->b_ioflags |= BIO_ERROR;
1631			bp->b_error = np->n_error = error;
1632			np->n_flag |= NWRITEERR;
1633		    }
1634		    bp->b_dirtyoff = bp->b_dirtyend = 0;
1635		}
1636	    } else {
1637		bp->b_resid = 0;
1638		bufdone(bp);
1639		return (0);
1640	    }
1641	}
1642	bp->b_resid = uiop->uio_resid;
1643	if (must_commit)
1644	    nfs_clearcommit(vp->v_mount);
1645	bufdone(bp);
1646	return (error);
1647}
1648
1649/*
1650 * Used to aid in handling ftruncate() operations on the NFS client side.
1651 * Truncation creates a number of special problems for NFS.  We have to
1652 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1653 * we have to properly handle VM pages or (potentially dirty) buffers
1654 * that straddle the truncation point.
1655 */
1656
1657int
1658nfs_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1659{
1660	struct nfsnode *np = VTONFS(vp);
1661	u_quad_t tsize = np->n_size;
1662	int biosize = vp->v_mount->mnt_stat.f_iosize;
1663	int error = 0;
1664
1665	np->n_size = nsize;
1666
1667	if (np->n_size < tsize) {
1668		struct buf *bp;
1669		daddr_t lbn;
1670		int bufsize;
1671
1672		/*
1673		 * vtruncbuf() doesn't get the buffer overlapping the
1674		 * truncation point.  We may have a B_DELWRI and/or B_CACHE
1675		 * buffer that now needs to be truncated.
1676		 */
1677		error = vtruncbuf(vp, cred, td, nsize, biosize);
1678		lbn = nsize / biosize;
1679		bufsize = nsize & (biosize - 1);
1680		bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1681 		if (!bp)
1682 			return EINTR;
1683		if (bp->b_dirtyoff > bp->b_bcount)
1684			bp->b_dirtyoff = bp->b_bcount;
1685		if (bp->b_dirtyend > bp->b_bcount)
1686			bp->b_dirtyend = bp->b_bcount;
1687		bp->b_flags |= B_RELBUF;  /* don't leave garbage around */
1688		brelse(bp);
1689	} else {
1690		vnode_pager_setsize(vp, nsize);
1691	}
1692	return(error);
1693}
1694
1695