nfs_bio.c revision 207746
1/*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 33 */ 34 35#include <sys/cdefs.h> 36__FBSDID("$FreeBSD: head/sys/nfsclient/nfs_bio.c 207746 2010-05-07 15:49:43Z alc $"); 37 38#include "opt_kdtrace.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/bio.h> 43#include <sys/buf.h> 44#include <sys/kernel.h> 45#include <sys/mbuf.h> 46#include <sys/mount.h> 47#include <sys/proc.h> 48#include <sys/vmmeter.h> 49#include <sys/vnode.h> 50 51#include <vm/vm.h> 52#include <vm/vm_extern.h> 53#include <vm/vm_page.h> 54#include <vm/vm_object.h> 55#include <vm/vm_pager.h> 56#include <vm/vnode_pager.h> 57 58#include <nfs/nfsproto.h> 59#include <nfsclient/nfs.h> 60#include <nfsclient/nfsmount.h> 61#include <nfsclient/nfsnode.h> 62#include <nfsclient/nfs_kdtrace.h> 63 64static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, 65 struct thread *td); 66static int nfs_directio_write(struct vnode *vp, struct uio *uiop, 67 struct ucred *cred, int ioflag); 68 69extern int nfs_directio_enable; 70extern int nfs_directio_allow_mmap; 71 72/* 73 * Vnode op for VM getpages. 74 */ 75int 76nfs_getpages(struct vop_getpages_args *ap) 77{ 78 int i, error, nextoff, size, toff, count, npages; 79 struct uio uio; 80 struct iovec iov; 81 vm_offset_t kva; 82 struct buf *bp; 83 struct vnode *vp; 84 struct thread *td; 85 struct ucred *cred; 86 struct nfsmount *nmp; 87 vm_object_t object; 88 vm_page_t *pages; 89 struct nfsnode *np; 90 91 vp = ap->a_vp; 92 np = VTONFS(vp); 93 td = curthread; /* XXX */ 94 cred = curthread->td_ucred; /* XXX */ 95 nmp = VFSTONFS(vp->v_mount); 96 pages = ap->a_m; 97 count = ap->a_count; 98 99 if ((object = vp->v_object) == NULL) { 100 nfs_printf("nfs_getpages: called with non-merged cache vnode??\n"); 101 return (VM_PAGER_ERROR); 102 } 103 104 if (nfs_directio_enable && !nfs_directio_allow_mmap) { 105 mtx_lock(&np->n_mtx); 106 if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { 107 mtx_unlock(&np->n_mtx); 108 nfs_printf("nfs_getpages: called on non-cacheable vnode??\n"); 109 return (VM_PAGER_ERROR); 110 } else 111 mtx_unlock(&np->n_mtx); 112 } 113 114 mtx_lock(&nmp->nm_mtx); 115 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 116 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 117 mtx_unlock(&nmp->nm_mtx); 118 /* We'll never get here for v4, because we always have fsinfo */ 119 (void)nfs_fsinfo(nmp, vp, cred, td); 120 } else 121 mtx_unlock(&nmp->nm_mtx); 122 123 npages = btoc(count); 124 125 /* 126 * If the requested page is partially valid, just return it and 127 * allow the pager to zero-out the blanks. Partially valid pages 128 * can only occur at the file EOF. 129 */ 130 VM_OBJECT_LOCK(object); 131 if (pages[ap->a_reqpage]->valid != 0) { 132 for (i = 0; i < npages; ++i) { 133 if (i != ap->a_reqpage) { 134 vm_page_lock(pages[i]); 135 vm_page_free(pages[i]); 136 vm_page_unlock(pages[i]); 137 } 138 } 139 VM_OBJECT_UNLOCK(object); 140 return (0); 141 } 142 VM_OBJECT_UNLOCK(object); 143 144 /* 145 * We use only the kva address for the buffer, but this is extremely 146 * convienient and fast. 147 */ 148 bp = getpbuf(&nfs_pbuf_freecnt); 149 150 kva = (vm_offset_t) bp->b_data; 151 pmap_qenter(kva, pages, npages); 152 PCPU_INC(cnt.v_vnodein); 153 PCPU_ADD(cnt.v_vnodepgsin, npages); 154 155 iov.iov_base = (caddr_t) kva; 156 iov.iov_len = count; 157 uio.uio_iov = &iov; 158 uio.uio_iovcnt = 1; 159 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 160 uio.uio_resid = count; 161 uio.uio_segflg = UIO_SYSSPACE; 162 uio.uio_rw = UIO_READ; 163 uio.uio_td = td; 164 165 error = (nmp->nm_rpcops->nr_readrpc)(vp, &uio, cred); 166 pmap_qremove(kva, npages); 167 168 relpbuf(bp, &nfs_pbuf_freecnt); 169 170 if (error && (uio.uio_resid == count)) { 171 nfs_printf("nfs_getpages: error %d\n", error); 172 VM_OBJECT_LOCK(object); 173 for (i = 0; i < npages; ++i) { 174 if (i != ap->a_reqpage) { 175 vm_page_lock(pages[i]); 176 vm_page_free(pages[i]); 177 vm_page_unlock(pages[i]); 178 } 179 } 180 VM_OBJECT_UNLOCK(object); 181 return (VM_PAGER_ERROR); 182 } 183 184 /* 185 * Calculate the number of bytes read and validate only that number 186 * of bytes. Note that due to pending writes, size may be 0. This 187 * does not mean that the remaining data is invalid! 188 */ 189 190 size = count - uio.uio_resid; 191 VM_OBJECT_LOCK(object); 192 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 193 vm_page_t m; 194 nextoff = toff + PAGE_SIZE; 195 m = pages[i]; 196 197 if (nextoff <= size) { 198 /* 199 * Read operation filled an entire page 200 */ 201 m->valid = VM_PAGE_BITS_ALL; 202 KASSERT(m->dirty == 0, 203 ("nfs_getpages: page %p is dirty", m)); 204 } else if (size > toff) { 205 /* 206 * Read operation filled a partial page. 207 */ 208 m->valid = 0; 209 vm_page_set_valid(m, 0, size - toff); 210 KASSERT(m->dirty == 0, 211 ("nfs_getpages: page %p is dirty", m)); 212 } else { 213 /* 214 * Read operation was short. If no error occured 215 * we may have hit a zero-fill section. We simply 216 * leave valid set to 0. 217 */ 218 ; 219 } 220 if (i != ap->a_reqpage) { 221 /* 222 * Whether or not to leave the page activated is up in 223 * the air, but we should put the page on a page queue 224 * somewhere (it already is in the object). Result: 225 * It appears that emperical results show that 226 * deactivating pages is best. 227 */ 228 229 /* 230 * Just in case someone was asking for this page we 231 * now tell them that it is ok to use. 232 */ 233 if (!error) { 234 if (m->oflags & VPO_WANTED) { 235 vm_page_lock(m); 236 vm_page_activate(m); 237 vm_page_unlock(m); 238 } else { 239 vm_page_lock(m); 240 vm_page_deactivate(m); 241 vm_page_unlock(m); 242 } 243 vm_page_wakeup(m); 244 } else { 245 vm_page_lock(m); 246 vm_page_free(m); 247 vm_page_unlock(m); 248 } 249 } 250 } 251 VM_OBJECT_UNLOCK(object); 252 return (0); 253} 254 255/* 256 * Vnode op for VM putpages. 257 */ 258int 259nfs_putpages(struct vop_putpages_args *ap) 260{ 261 struct uio uio; 262 struct iovec iov; 263 vm_offset_t kva; 264 struct buf *bp; 265 int iomode, must_commit, i, error, npages, count; 266 off_t offset; 267 int *rtvals; 268 struct vnode *vp; 269 struct thread *td; 270 struct ucred *cred; 271 struct nfsmount *nmp; 272 struct nfsnode *np; 273 vm_page_t *pages; 274 275 vp = ap->a_vp; 276 np = VTONFS(vp); 277 td = curthread; /* XXX */ 278 cred = curthread->td_ucred; /* XXX */ 279 nmp = VFSTONFS(vp->v_mount); 280 pages = ap->a_m; 281 count = ap->a_count; 282 rtvals = ap->a_rtvals; 283 npages = btoc(count); 284 offset = IDX_TO_OFF(pages[0]->pindex); 285 286 mtx_lock(&nmp->nm_mtx); 287 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 288 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 289 mtx_unlock(&nmp->nm_mtx); 290 (void)nfs_fsinfo(nmp, vp, cred, td); 291 } else 292 mtx_unlock(&nmp->nm_mtx); 293 294 mtx_lock(&np->n_mtx); 295 if (nfs_directio_enable && !nfs_directio_allow_mmap && 296 (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { 297 mtx_unlock(&np->n_mtx); 298 nfs_printf("nfs_putpages: called on noncache-able vnode??\n"); 299 mtx_lock(&np->n_mtx); 300 } 301 302 for (i = 0; i < npages; i++) 303 rtvals[i] = VM_PAGER_AGAIN; 304 305 /* 306 * When putting pages, do not extend file past EOF. 307 */ 308 if (offset + count > np->n_size) { 309 count = np->n_size - offset; 310 if (count < 0) 311 count = 0; 312 } 313 mtx_unlock(&np->n_mtx); 314 315 /* 316 * We use only the kva address for the buffer, but this is extremely 317 * convienient and fast. 318 */ 319 bp = getpbuf(&nfs_pbuf_freecnt); 320 321 kva = (vm_offset_t) bp->b_data; 322 pmap_qenter(kva, pages, npages); 323 PCPU_INC(cnt.v_vnodeout); 324 PCPU_ADD(cnt.v_vnodepgsout, count); 325 326 iov.iov_base = (caddr_t) kva; 327 iov.iov_len = count; 328 uio.uio_iov = &iov; 329 uio.uio_iovcnt = 1; 330 uio.uio_offset = offset; 331 uio.uio_resid = count; 332 uio.uio_segflg = UIO_SYSSPACE; 333 uio.uio_rw = UIO_WRITE; 334 uio.uio_td = td; 335 336 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0) 337 iomode = NFSV3WRITE_UNSTABLE; 338 else 339 iomode = NFSV3WRITE_FILESYNC; 340 341 error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred, &iomode, &must_commit); 342 343 pmap_qremove(kva, npages); 344 relpbuf(bp, &nfs_pbuf_freecnt); 345 346 if (!error) { 347 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE; 348 for (i = 0; i < nwritten; i++) { 349 rtvals[i] = VM_PAGER_OK; 350 vm_page_undirty(pages[i]); 351 } 352 if (must_commit) { 353 nfs_clearcommit(vp->v_mount); 354 } 355 } 356 return rtvals[0]; 357} 358 359/* 360 * For nfs, cache consistency can only be maintained approximately. 361 * Although RFC1094 does not specify the criteria, the following is 362 * believed to be compatible with the reference port. 363 * For nfs: 364 * If the file's modify time on the server has changed since the 365 * last read rpc or you have written to the file, 366 * you may have lost data cache consistency with the 367 * server, so flush all of the file's data out of the cache. 368 * Then force a getattr rpc to ensure that you have up to date 369 * attributes. 370 * NB: This implies that cache data can be read when up to 371 * NFS_ATTRTIMEO seconds out of date. If you find that you need current 372 * attributes this could be forced by setting n_attrstamp to 0 before 373 * the VOP_GETATTR() call. 374 */ 375static inline int 376nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred) 377{ 378 int error = 0; 379 struct vattr vattr; 380 struct nfsnode *np = VTONFS(vp); 381 int old_lock; 382 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 383 384 /* 385 * Grab the exclusive lock before checking whether the cache is 386 * consistent. 387 * XXX - We can make this cheaper later (by acquiring cheaper locks). 388 * But for now, this suffices. 389 */ 390 old_lock = nfs_upgrade_vnlock(vp); 391 if (vp->v_iflag & VI_DOOMED) { 392 nfs_downgrade_vnlock(vp, old_lock); 393 return (EBADF); 394 } 395 396 mtx_lock(&np->n_mtx); 397 if (np->n_flag & NMODIFIED) { 398 mtx_unlock(&np->n_mtx); 399 if (vp->v_type != VREG) { 400 if (vp->v_type != VDIR) 401 panic("nfs: bioread, not dir"); 402 (nmp->nm_rpcops->nr_invaldir)(vp); 403 error = nfs_vinvalbuf(vp, V_SAVE, td, 1); 404 if (error) 405 goto out; 406 } 407 np->n_attrstamp = 0; 408 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 409 error = VOP_GETATTR(vp, &vattr, cred); 410 if (error) 411 goto out; 412 mtx_lock(&np->n_mtx); 413 np->n_mtime = vattr.va_mtime; 414 mtx_unlock(&np->n_mtx); 415 } else { 416 mtx_unlock(&np->n_mtx); 417 error = VOP_GETATTR(vp, &vattr, cred); 418 if (error) 419 return (error); 420 mtx_lock(&np->n_mtx); 421 if ((np->n_flag & NSIZECHANGED) 422 || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) { 423 mtx_unlock(&np->n_mtx); 424 if (vp->v_type == VDIR) 425 (nmp->nm_rpcops->nr_invaldir)(vp); 426 error = nfs_vinvalbuf(vp, V_SAVE, td, 1); 427 if (error) 428 goto out; 429 mtx_lock(&np->n_mtx); 430 np->n_mtime = vattr.va_mtime; 431 np->n_flag &= ~NSIZECHANGED; 432 } 433 mtx_unlock(&np->n_mtx); 434 } 435out: 436 nfs_downgrade_vnlock(vp, old_lock); 437 return error; 438} 439 440/* 441 * Vnode op for read using bio 442 */ 443int 444nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) 445{ 446 struct nfsnode *np = VTONFS(vp); 447 int biosize, i; 448 struct buf *bp, *rabp; 449 struct thread *td; 450 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 451 daddr_t lbn, rabn; 452 int bcount; 453 int seqcount; 454 int nra, error = 0, n = 0, on = 0; 455 456#ifdef DIAGNOSTIC 457 if (uio->uio_rw != UIO_READ) 458 panic("nfs_read mode"); 459#endif 460 if (uio->uio_resid == 0) 461 return (0); 462 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ 463 return (EINVAL); 464 td = uio->uio_td; 465 466 mtx_lock(&nmp->nm_mtx); 467 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 468 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 469 mtx_unlock(&nmp->nm_mtx); 470 (void)nfs_fsinfo(nmp, vp, cred, td); 471 } else 472 mtx_unlock(&nmp->nm_mtx); 473 474 if (vp->v_type != VDIR && 475 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) 476 return (EFBIG); 477 478 if (nfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG)) 479 /* No caching/ no readaheads. Just read data into the user buffer */ 480 return nfs_readrpc(vp, uio, cred); 481 482 biosize = vp->v_mount->mnt_stat.f_iosize; 483 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); 484 485 error = nfs_bioread_check_cons(vp, td, cred); 486 if (error) 487 return error; 488 489 do { 490 u_quad_t nsize; 491 492 mtx_lock(&np->n_mtx); 493 nsize = np->n_size; 494 mtx_unlock(&np->n_mtx); 495 496 switch (vp->v_type) { 497 case VREG: 498 nfsstats.biocache_reads++; 499 lbn = uio->uio_offset / biosize; 500 on = uio->uio_offset & (biosize - 1); 501 502 /* 503 * Start the read ahead(s), as required. 504 */ 505 if (nmp->nm_readahead > 0) { 506 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && 507 (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) { 508 rabn = lbn + 1 + nra; 509 if (incore(&vp->v_bufobj, rabn) == NULL) { 510 rabp = nfs_getcacheblk(vp, rabn, biosize, td); 511 if (!rabp) { 512 error = nfs_sigintr(nmp, td); 513 return (error ? error : EINTR); 514 } 515 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 516 rabp->b_flags |= B_ASYNC; 517 rabp->b_iocmd = BIO_READ; 518 vfs_busy_pages(rabp, 0); 519 if (nfs_asyncio(nmp, rabp, cred, td)) { 520 rabp->b_flags |= B_INVAL; 521 rabp->b_ioflags |= BIO_ERROR; 522 vfs_unbusy_pages(rabp); 523 brelse(rabp); 524 break; 525 } 526 } else { 527 brelse(rabp); 528 } 529 } 530 } 531 } 532 533 /* Note that bcount is *not* DEV_BSIZE aligned. */ 534 bcount = biosize; 535 if ((off_t)lbn * biosize >= nsize) { 536 bcount = 0; 537 } else if ((off_t)(lbn + 1) * biosize > nsize) { 538 bcount = nsize - (off_t)lbn * biosize; 539 } 540 bp = nfs_getcacheblk(vp, lbn, bcount, td); 541 542 if (!bp) { 543 error = nfs_sigintr(nmp, td); 544 return (error ? error : EINTR); 545 } 546 547 /* 548 * If B_CACHE is not set, we must issue the read. If this 549 * fails, we return an error. 550 */ 551 552 if ((bp->b_flags & B_CACHE) == 0) { 553 bp->b_iocmd = BIO_READ; 554 vfs_busy_pages(bp, 0); 555 error = nfs_doio(vp, bp, cred, td); 556 if (error) { 557 brelse(bp); 558 return (error); 559 } 560 } 561 562 /* 563 * on is the offset into the current bp. Figure out how many 564 * bytes we can copy out of the bp. Note that bcount is 565 * NOT DEV_BSIZE aligned. 566 * 567 * Then figure out how many bytes we can copy into the uio. 568 */ 569 570 n = 0; 571 if (on < bcount) 572 n = min((unsigned)(bcount - on), uio->uio_resid); 573 break; 574 case VLNK: 575 nfsstats.biocache_readlinks++; 576 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td); 577 if (!bp) { 578 error = nfs_sigintr(nmp, td); 579 return (error ? error : EINTR); 580 } 581 if ((bp->b_flags & B_CACHE) == 0) { 582 bp->b_iocmd = BIO_READ; 583 vfs_busy_pages(bp, 0); 584 error = nfs_doio(vp, bp, cred, td); 585 if (error) { 586 bp->b_ioflags |= BIO_ERROR; 587 brelse(bp); 588 return (error); 589 } 590 } 591 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 592 on = 0; 593 break; 594 case VDIR: 595 nfsstats.biocache_readdirs++; 596 if (np->n_direofoffset 597 && uio->uio_offset >= np->n_direofoffset) { 598 return (0); 599 } 600 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; 601 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); 602 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td); 603 if (!bp) { 604 error = nfs_sigintr(nmp, td); 605 return (error ? error : EINTR); 606 } 607 if ((bp->b_flags & B_CACHE) == 0) { 608 bp->b_iocmd = BIO_READ; 609 vfs_busy_pages(bp, 0); 610 error = nfs_doio(vp, bp, cred, td); 611 if (error) { 612 brelse(bp); 613 } 614 while (error == NFSERR_BAD_COOKIE) { 615 (nmp->nm_rpcops->nr_invaldir)(vp); 616 error = nfs_vinvalbuf(vp, 0, td, 1); 617 /* 618 * Yuck! The directory has been modified on the 619 * server. The only way to get the block is by 620 * reading from the beginning to get all the 621 * offset cookies. 622 * 623 * Leave the last bp intact unless there is an error. 624 * Loop back up to the while if the error is another 625 * NFSERR_BAD_COOKIE (double yuch!). 626 */ 627 for (i = 0; i <= lbn && !error; i++) { 628 if (np->n_direofoffset 629 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) 630 return (0); 631 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td); 632 if (!bp) { 633 error = nfs_sigintr(nmp, td); 634 return (error ? error : EINTR); 635 } 636 if ((bp->b_flags & B_CACHE) == 0) { 637 bp->b_iocmd = BIO_READ; 638 vfs_busy_pages(bp, 0); 639 error = nfs_doio(vp, bp, cred, td); 640 /* 641 * no error + B_INVAL == directory EOF, 642 * use the block. 643 */ 644 if (error == 0 && (bp->b_flags & B_INVAL)) 645 break; 646 } 647 /* 648 * An error will throw away the block and the 649 * for loop will break out. If no error and this 650 * is not the block we want, we throw away the 651 * block and go for the next one via the for loop. 652 */ 653 if (error || i < lbn) 654 brelse(bp); 655 } 656 } 657 /* 658 * The above while is repeated if we hit another cookie 659 * error. If we hit an error and it wasn't a cookie error, 660 * we give up. 661 */ 662 if (error) 663 return (error); 664 } 665 666 /* 667 * If not eof and read aheads are enabled, start one. 668 * (You need the current block first, so that you have the 669 * directory offset cookie of the next block.) 670 */ 671 if (nmp->nm_readahead > 0 && 672 (bp->b_flags & B_INVAL) == 0 && 673 (np->n_direofoffset == 0 || 674 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && 675 incore(&vp->v_bufobj, lbn + 1) == NULL) { 676 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); 677 if (rabp) { 678 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 679 rabp->b_flags |= B_ASYNC; 680 rabp->b_iocmd = BIO_READ; 681 vfs_busy_pages(rabp, 0); 682 if (nfs_asyncio(nmp, rabp, cred, td)) { 683 rabp->b_flags |= B_INVAL; 684 rabp->b_ioflags |= BIO_ERROR; 685 vfs_unbusy_pages(rabp); 686 brelse(rabp); 687 } 688 } else { 689 brelse(rabp); 690 } 691 } 692 } 693 /* 694 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is 695 * chopped for the EOF condition, we cannot tell how large 696 * NFS directories are going to be until we hit EOF. So 697 * an NFS directory buffer is *not* chopped to its EOF. Now, 698 * it just so happens that b_resid will effectively chop it 699 * to EOF. *BUT* this information is lost if the buffer goes 700 * away and is reconstituted into a B_CACHE state ( due to 701 * being VMIO ) later. So we keep track of the directory eof 702 * in np->n_direofoffset and chop it off as an extra step 703 * right here. 704 */ 705 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); 706 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) 707 n = np->n_direofoffset - uio->uio_offset; 708 break; 709 default: 710 nfs_printf(" nfs_bioread: type %x unexpected\n", vp->v_type); 711 bp = NULL; 712 break; 713 }; 714 715 if (n > 0) { 716 error = uiomove(bp->b_data + on, (int)n, uio); 717 } 718 if (vp->v_type == VLNK) 719 n = 0; 720 if (bp != NULL) 721 brelse(bp); 722 } while (error == 0 && uio->uio_resid > 0 && n > 0); 723 return (error); 724} 725 726/* 727 * The NFS write path cannot handle iovecs with len > 1. So we need to 728 * break up iovecs accordingly (restricting them to wsize). 729 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf). 730 * For the ASYNC case, 2 copies are needed. The first a copy from the 731 * user buffer to a staging buffer and then a second copy from the staging 732 * buffer to mbufs. This can be optimized by copying from the user buffer 733 * directly into mbufs and passing the chain down, but that requires a 734 * fair amount of re-working of the relevant codepaths (and can be done 735 * later). 736 */ 737static int 738nfs_directio_write(vp, uiop, cred, ioflag) 739 struct vnode *vp; 740 struct uio *uiop; 741 struct ucred *cred; 742 int ioflag; 743{ 744 int error; 745 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 746 struct thread *td = uiop->uio_td; 747 int size; 748 int wsize; 749 750 mtx_lock(&nmp->nm_mtx); 751 wsize = nmp->nm_wsize; 752 mtx_unlock(&nmp->nm_mtx); 753 if (ioflag & IO_SYNC) { 754 int iomode, must_commit; 755 struct uio uio; 756 struct iovec iov; 757do_sync: 758 while (uiop->uio_resid > 0) { 759 size = min(uiop->uio_resid, wsize); 760 size = min(uiop->uio_iov->iov_len, size); 761 iov.iov_base = uiop->uio_iov->iov_base; 762 iov.iov_len = size; 763 uio.uio_iov = &iov; 764 uio.uio_iovcnt = 1; 765 uio.uio_offset = uiop->uio_offset; 766 uio.uio_resid = size; 767 uio.uio_segflg = UIO_USERSPACE; 768 uio.uio_rw = UIO_WRITE; 769 uio.uio_td = td; 770 iomode = NFSV3WRITE_FILESYNC; 771 error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred, 772 &iomode, &must_commit); 773 KASSERT((must_commit == 0), 774 ("nfs_directio_write: Did not commit write")); 775 if (error) 776 return (error); 777 uiop->uio_offset += size; 778 uiop->uio_resid -= size; 779 if (uiop->uio_iov->iov_len <= size) { 780 uiop->uio_iovcnt--; 781 uiop->uio_iov++; 782 } else { 783 uiop->uio_iov->iov_base = 784 (char *)uiop->uio_iov->iov_base + size; 785 uiop->uio_iov->iov_len -= size; 786 } 787 } 788 } else { 789 struct uio *t_uio; 790 struct iovec *t_iov; 791 struct buf *bp; 792 793 /* 794 * Break up the write into blocksize chunks and hand these 795 * over to nfsiod's for write back. 796 * Unfortunately, this incurs a copy of the data. Since 797 * the user could modify the buffer before the write is 798 * initiated. 799 * 800 * The obvious optimization here is that one of the 2 copies 801 * in the async write path can be eliminated by copying the 802 * data here directly into mbufs and passing the mbuf chain 803 * down. But that will require a fair amount of re-working 804 * of the code and can be done if there's enough interest 805 * in NFS directio access. 806 */ 807 while (uiop->uio_resid > 0) { 808 size = min(uiop->uio_resid, wsize); 809 size = min(uiop->uio_iov->iov_len, size); 810 bp = getpbuf(&nfs_pbuf_freecnt); 811 t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK); 812 t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK); 813 t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK); 814 t_iov->iov_len = size; 815 t_uio->uio_iov = t_iov; 816 t_uio->uio_iovcnt = 1; 817 t_uio->uio_offset = uiop->uio_offset; 818 t_uio->uio_resid = size; 819 t_uio->uio_segflg = UIO_SYSSPACE; 820 t_uio->uio_rw = UIO_WRITE; 821 t_uio->uio_td = td; 822 bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size); 823 bp->b_flags |= B_DIRECT; 824 bp->b_iocmd = BIO_WRITE; 825 if (cred != NOCRED) { 826 crhold(cred); 827 bp->b_wcred = cred; 828 } else 829 bp->b_wcred = NOCRED; 830 bp->b_caller1 = (void *)t_uio; 831 bp->b_vp = vp; 832 error = nfs_asyncio(nmp, bp, NOCRED, td); 833 if (error) { 834 free(t_iov->iov_base, M_NFSDIRECTIO); 835 free(t_iov, M_NFSDIRECTIO); 836 free(t_uio, M_NFSDIRECTIO); 837 bp->b_vp = NULL; 838 relpbuf(bp, &nfs_pbuf_freecnt); 839 if (error == EINTR) 840 return (error); 841 goto do_sync; 842 } 843 uiop->uio_offset += size; 844 uiop->uio_resid -= size; 845 if (uiop->uio_iov->iov_len <= size) { 846 uiop->uio_iovcnt--; 847 uiop->uio_iov++; 848 } else { 849 uiop->uio_iov->iov_base = 850 (char *)uiop->uio_iov->iov_base + size; 851 uiop->uio_iov->iov_len -= size; 852 } 853 } 854 } 855 return (0); 856} 857 858/* 859 * Vnode op for write using bio 860 */ 861int 862nfs_write(struct vop_write_args *ap) 863{ 864 int biosize; 865 struct uio *uio = ap->a_uio; 866 struct thread *td = uio->uio_td; 867 struct vnode *vp = ap->a_vp; 868 struct nfsnode *np = VTONFS(vp); 869 struct ucred *cred = ap->a_cred; 870 int ioflag = ap->a_ioflag; 871 struct buf *bp; 872 struct vattr vattr; 873 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 874 daddr_t lbn; 875 int bcount; 876 int n, on, error = 0; 877 878#ifdef DIAGNOSTIC 879 if (uio->uio_rw != UIO_WRITE) 880 panic("nfs_write mode"); 881 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread) 882 panic("nfs_write proc"); 883#endif 884 if (vp->v_type != VREG) 885 return (EIO); 886 mtx_lock(&np->n_mtx); 887 if (np->n_flag & NWRITEERR) { 888 np->n_flag &= ~NWRITEERR; 889 mtx_unlock(&np->n_mtx); 890 return (np->n_error); 891 } else 892 mtx_unlock(&np->n_mtx); 893 mtx_lock(&nmp->nm_mtx); 894 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 895 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 896 mtx_unlock(&nmp->nm_mtx); 897 (void)nfs_fsinfo(nmp, vp, cred, td); 898 } else 899 mtx_unlock(&nmp->nm_mtx); 900 901 /* 902 * Synchronously flush pending buffers if we are in synchronous 903 * mode or if we are appending. 904 */ 905 if (ioflag & (IO_APPEND | IO_SYNC)) { 906 mtx_lock(&np->n_mtx); 907 if (np->n_flag & NMODIFIED) { 908 mtx_unlock(&np->n_mtx); 909#ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */ 910 /* 911 * Require non-blocking, synchronous writes to 912 * dirty files to inform the program it needs 913 * to fsync(2) explicitly. 914 */ 915 if (ioflag & IO_NDELAY) 916 return (EAGAIN); 917#endif 918flush_and_restart: 919 np->n_attrstamp = 0; 920 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 921 error = nfs_vinvalbuf(vp, V_SAVE, td, 1); 922 if (error) 923 return (error); 924 } else 925 mtx_unlock(&np->n_mtx); 926 } 927 928 /* 929 * If IO_APPEND then load uio_offset. We restart here if we cannot 930 * get the append lock. 931 */ 932 if (ioflag & IO_APPEND) { 933 np->n_attrstamp = 0; 934 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 935 error = VOP_GETATTR(vp, &vattr, cred); 936 if (error) 937 return (error); 938 mtx_lock(&np->n_mtx); 939 uio->uio_offset = np->n_size; 940 mtx_unlock(&np->n_mtx); 941 } 942 943 if (uio->uio_offset < 0) 944 return (EINVAL); 945 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) 946 return (EFBIG); 947 if (uio->uio_resid == 0) 948 return (0); 949 950 if (nfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG) 951 return nfs_directio_write(vp, uio, cred, ioflag); 952 953 /* 954 * Maybe this should be above the vnode op call, but so long as 955 * file servers have no limits, i don't think it matters 956 */ 957 if (vn_rlimit_fsize(vp, uio, td)) 958 return (EFBIG); 959 960 biosize = vp->v_mount->mnt_stat.f_iosize; 961 /* 962 * Find all of this file's B_NEEDCOMMIT buffers. If our writes 963 * would exceed the local maximum per-file write commit size when 964 * combined with those, we must decide whether to flush, 965 * go synchronous, or return error. We don't bother checking 966 * IO_UNIT -- we just make all writes atomic anyway, as there's 967 * no point optimizing for something that really won't ever happen. 968 */ 969 if (!(ioflag & IO_SYNC)) { 970 int nflag; 971 972 mtx_lock(&np->n_mtx); 973 nflag = np->n_flag; 974 mtx_unlock(&np->n_mtx); 975 int needrestart = 0; 976 if (nmp->nm_wcommitsize < uio->uio_resid) { 977 /* 978 * If this request could not possibly be completed 979 * without exceeding the maximum outstanding write 980 * commit size, see if we can convert it into a 981 * synchronous write operation. 982 */ 983 if (ioflag & IO_NDELAY) 984 return (EAGAIN); 985 ioflag |= IO_SYNC; 986 if (nflag & NMODIFIED) 987 needrestart = 1; 988 } else if (nflag & NMODIFIED) { 989 int wouldcommit = 0; 990 BO_LOCK(&vp->v_bufobj); 991 if (vp->v_bufobj.bo_dirty.bv_cnt != 0) { 992 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, 993 b_bobufs) { 994 if (bp->b_flags & B_NEEDCOMMIT) 995 wouldcommit += bp->b_bcount; 996 } 997 } 998 BO_UNLOCK(&vp->v_bufobj); 999 /* 1000 * Since we're not operating synchronously and 1001 * bypassing the buffer cache, we are in a commit 1002 * and holding all of these buffers whether 1003 * transmitted or not. If not limited, this 1004 * will lead to the buffer cache deadlocking, 1005 * as no one else can flush our uncommitted buffers. 1006 */ 1007 wouldcommit += uio->uio_resid; 1008 /* 1009 * If we would initially exceed the maximum 1010 * outstanding write commit size, flush and restart. 1011 */ 1012 if (wouldcommit > nmp->nm_wcommitsize) 1013 needrestart = 1; 1014 } 1015 if (needrestart) 1016 goto flush_and_restart; 1017 } 1018 1019 do { 1020 nfsstats.biocache_writes++; 1021 lbn = uio->uio_offset / biosize; 1022 on = uio->uio_offset & (biosize-1); 1023 n = min((unsigned)(biosize - on), uio->uio_resid); 1024again: 1025 /* 1026 * Handle direct append and file extension cases, calculate 1027 * unaligned buffer size. 1028 */ 1029 mtx_lock(&np->n_mtx); 1030 if (uio->uio_offset == np->n_size && n) { 1031 mtx_unlock(&np->n_mtx); 1032 /* 1033 * Get the buffer (in its pre-append state to maintain 1034 * B_CACHE if it was previously set). Resize the 1035 * nfsnode after we have locked the buffer to prevent 1036 * readers from reading garbage. 1037 */ 1038 bcount = on; 1039 bp = nfs_getcacheblk(vp, lbn, bcount, td); 1040 1041 if (bp != NULL) { 1042 long save; 1043 1044 mtx_lock(&np->n_mtx); 1045 np->n_size = uio->uio_offset + n; 1046 np->n_flag |= NMODIFIED; 1047 vnode_pager_setsize(vp, np->n_size); 1048 mtx_unlock(&np->n_mtx); 1049 1050 save = bp->b_flags & B_CACHE; 1051 bcount += n; 1052 allocbuf(bp, bcount); 1053 bp->b_flags |= save; 1054 } 1055 } else { 1056 /* 1057 * Obtain the locked cache block first, and then 1058 * adjust the file's size as appropriate. 1059 */ 1060 bcount = on + n; 1061 if ((off_t)lbn * biosize + bcount < np->n_size) { 1062 if ((off_t)(lbn + 1) * biosize < np->n_size) 1063 bcount = biosize; 1064 else 1065 bcount = np->n_size - (off_t)lbn * biosize; 1066 } 1067 mtx_unlock(&np->n_mtx); 1068 bp = nfs_getcacheblk(vp, lbn, bcount, td); 1069 mtx_lock(&np->n_mtx); 1070 if (uio->uio_offset + n > np->n_size) { 1071 np->n_size = uio->uio_offset + n; 1072 np->n_flag |= NMODIFIED; 1073 vnode_pager_setsize(vp, np->n_size); 1074 } 1075 mtx_unlock(&np->n_mtx); 1076 } 1077 1078 if (!bp) { 1079 error = nfs_sigintr(nmp, td); 1080 if (!error) 1081 error = EINTR; 1082 break; 1083 } 1084 1085 /* 1086 * Issue a READ if B_CACHE is not set. In special-append 1087 * mode, B_CACHE is based on the buffer prior to the write 1088 * op and is typically set, avoiding the read. If a read 1089 * is required in special append mode, the server will 1090 * probably send us a short-read since we extended the file 1091 * on our end, resulting in b_resid == 0 and, thusly, 1092 * B_CACHE getting set. 1093 * 1094 * We can also avoid issuing the read if the write covers 1095 * the entire buffer. We have to make sure the buffer state 1096 * is reasonable in this case since we will not be initiating 1097 * I/O. See the comments in kern/vfs_bio.c's getblk() for 1098 * more information. 1099 * 1100 * B_CACHE may also be set due to the buffer being cached 1101 * normally. 1102 */ 1103 1104 if (on == 0 && n == bcount) { 1105 bp->b_flags |= B_CACHE; 1106 bp->b_flags &= ~B_INVAL; 1107 bp->b_ioflags &= ~BIO_ERROR; 1108 } 1109 1110 if ((bp->b_flags & B_CACHE) == 0) { 1111 bp->b_iocmd = BIO_READ; 1112 vfs_busy_pages(bp, 0); 1113 error = nfs_doio(vp, bp, cred, td); 1114 if (error) { 1115 brelse(bp); 1116 break; 1117 } 1118 } 1119 if (bp->b_wcred == NOCRED) 1120 bp->b_wcred = crhold(cred); 1121 mtx_lock(&np->n_mtx); 1122 np->n_flag |= NMODIFIED; 1123 mtx_unlock(&np->n_mtx); 1124 1125 /* 1126 * If dirtyend exceeds file size, chop it down. This should 1127 * not normally occur but there is an append race where it 1128 * might occur XXX, so we log it. 1129 * 1130 * If the chopping creates a reverse-indexed or degenerate 1131 * situation with dirtyoff/end, we 0 both of them. 1132 */ 1133 1134 if (bp->b_dirtyend > bcount) { 1135 nfs_printf("NFS append race @%lx:%d\n", 1136 (long)bp->b_blkno * DEV_BSIZE, 1137 bp->b_dirtyend - bcount); 1138 bp->b_dirtyend = bcount; 1139 } 1140 1141 if (bp->b_dirtyoff >= bp->b_dirtyend) 1142 bp->b_dirtyoff = bp->b_dirtyend = 0; 1143 1144 /* 1145 * If the new write will leave a contiguous dirty 1146 * area, just update the b_dirtyoff and b_dirtyend, 1147 * otherwise force a write rpc of the old dirty area. 1148 * 1149 * While it is possible to merge discontiguous writes due to 1150 * our having a B_CACHE buffer ( and thus valid read data 1151 * for the hole), we don't because it could lead to 1152 * significant cache coherency problems with multiple clients, 1153 * especially if locking is implemented later on. 1154 * 1155 * as an optimization we could theoretically maintain 1156 * a linked list of discontinuous areas, but we would still 1157 * have to commit them separately so there isn't much 1158 * advantage to it except perhaps a bit of asynchronization. 1159 */ 1160 1161 if (bp->b_dirtyend > 0 && 1162 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 1163 if (bwrite(bp) == EINTR) { 1164 error = EINTR; 1165 break; 1166 } 1167 goto again; 1168 } 1169 1170 error = uiomove((char *)bp->b_data + on, n, uio); 1171 1172 /* 1173 * Since this block is being modified, it must be written 1174 * again and not just committed. Since write clustering does 1175 * not work for the stage 1 data write, only the stage 2 1176 * commit rpc, we have to clear B_CLUSTEROK as well. 1177 */ 1178 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1179 1180 if (error) { 1181 bp->b_ioflags |= BIO_ERROR; 1182 brelse(bp); 1183 break; 1184 } 1185 1186 /* 1187 * Only update dirtyoff/dirtyend if not a degenerate 1188 * condition. 1189 */ 1190 if (n) { 1191 if (bp->b_dirtyend > 0) { 1192 bp->b_dirtyoff = min(on, bp->b_dirtyoff); 1193 bp->b_dirtyend = max((on + n), bp->b_dirtyend); 1194 } else { 1195 bp->b_dirtyoff = on; 1196 bp->b_dirtyend = on + n; 1197 } 1198 vfs_bio_set_valid(bp, on, n); 1199 } 1200 1201 /* 1202 * If IO_SYNC do bwrite(). 1203 * 1204 * IO_INVAL appears to be unused. The idea appears to be 1205 * to turn off caching in this case. Very odd. XXX 1206 */ 1207 if ((ioflag & IO_SYNC)) { 1208 if (ioflag & IO_INVAL) 1209 bp->b_flags |= B_NOCACHE; 1210 error = bwrite(bp); 1211 if (error) 1212 break; 1213 } else if ((n + on) == biosize) { 1214 bp->b_flags |= B_ASYNC; 1215 (void) (nmp->nm_rpcops->nr_writebp)(bp, 0, NULL); 1216 } else { 1217 bdwrite(bp); 1218 } 1219 } while (uio->uio_resid > 0 && n > 0); 1220 1221 return (error); 1222} 1223 1224/* 1225 * Get an nfs cache block. 1226 * 1227 * Allocate a new one if the block isn't currently in the cache 1228 * and return the block marked busy. If the calling process is 1229 * interrupted by a signal for an interruptible mount point, return 1230 * NULL. 1231 * 1232 * The caller must carefully deal with the possible B_INVAL state of 1233 * the buffer. nfs_doio() clears B_INVAL (and nfs_asyncio() clears it 1234 * indirectly), so synchronous reads can be issued without worrying about 1235 * the B_INVAL state. We have to be a little more careful when dealing 1236 * with writes (see comments in nfs_write()) when extending a file past 1237 * its EOF. 1238 */ 1239static struct buf * 1240nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td) 1241{ 1242 struct buf *bp; 1243 struct mount *mp; 1244 struct nfsmount *nmp; 1245 1246 mp = vp->v_mount; 1247 nmp = VFSTONFS(mp); 1248 1249 if (nmp->nm_flag & NFSMNT_INT) { 1250 sigset_t oldset; 1251 1252 nfs_set_sigmask(td, &oldset); 1253 bp = getblk(vp, bn, size, NFS_PCATCH, 0, 0); 1254 nfs_restore_sigmask(td, &oldset); 1255 while (bp == NULL) { 1256 if (nfs_sigintr(nmp, td)) 1257 return (NULL); 1258 bp = getblk(vp, bn, size, 0, 2 * hz, 0); 1259 } 1260 } else { 1261 bp = getblk(vp, bn, size, 0, 0, 0); 1262 } 1263 1264 if (vp->v_type == VREG) { 1265 int biosize; 1266 1267 biosize = mp->mnt_stat.f_iosize; 1268 bp->b_blkno = bn * (biosize / DEV_BSIZE); 1269 } 1270 return (bp); 1271} 1272 1273/* 1274 * Flush and invalidate all dirty buffers. If another process is already 1275 * doing the flush, just wait for completion. 1276 */ 1277int 1278nfs_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg) 1279{ 1280 struct nfsnode *np = VTONFS(vp); 1281 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1282 int error = 0, slpflag, slptimeo; 1283 int old_lock = 0; 1284 1285 ASSERT_VOP_LOCKED(vp, "nfs_vinvalbuf"); 1286 1287 if ((nmp->nm_flag & NFSMNT_INT) == 0) 1288 intrflg = 0; 1289 if (intrflg) { 1290 slpflag = NFS_PCATCH; 1291 slptimeo = 2 * hz; 1292 } else { 1293 slpflag = 0; 1294 slptimeo = 0; 1295 } 1296 1297 old_lock = nfs_upgrade_vnlock(vp); 1298 if (vp->v_iflag & VI_DOOMED) { 1299 /* 1300 * Since vgonel() uses the generic vinvalbuf() to flush 1301 * dirty buffers and it does not call this function, it 1302 * is safe to just return OK when VI_DOOMED is set. 1303 */ 1304 nfs_downgrade_vnlock(vp, old_lock); 1305 return (0); 1306 } 1307 1308 /* 1309 * Now, flush as required. 1310 */ 1311 if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) { 1312 VM_OBJECT_LOCK(vp->v_bufobj.bo_object); 1313 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); 1314 VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object); 1315 /* 1316 * If the page clean was interrupted, fail the invalidation. 1317 * Not doing so, we run the risk of losing dirty pages in the 1318 * vinvalbuf() call below. 1319 */ 1320 if (intrflg && (error = nfs_sigintr(nmp, td))) 1321 goto out; 1322 } 1323 1324 error = vinvalbuf(vp, flags, slpflag, 0); 1325 while (error) { 1326 if (intrflg && (error = nfs_sigintr(nmp, td))) 1327 goto out; 1328 error = vinvalbuf(vp, flags, 0, slptimeo); 1329 } 1330 mtx_lock(&np->n_mtx); 1331 if (np->n_directio_asyncwr == 0) 1332 np->n_flag &= ~NMODIFIED; 1333 mtx_unlock(&np->n_mtx); 1334out: 1335 nfs_downgrade_vnlock(vp, old_lock); 1336 return error; 1337} 1338 1339/* 1340 * Initiate asynchronous I/O. Return an error if no nfsiods are available. 1341 * This is mainly to avoid queueing async I/O requests when the nfsiods 1342 * are all hung on a dead server. 1343 * 1344 * Note: nfs_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp 1345 * is eventually dequeued by the async daemon, nfs_doio() *will*. 1346 */ 1347int 1348nfs_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td) 1349{ 1350 int iod; 1351 int gotiod; 1352 int slpflag = 0; 1353 int slptimeo = 0; 1354 int error, error2; 1355 1356 /* 1357 * Commits are usually short and sweet so lets save some cpu and 1358 * leave the async daemons for more important rpc's (such as reads 1359 * and writes). 1360 */ 1361 mtx_lock(&nfs_iod_mtx); 1362 if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) && 1363 (nmp->nm_bufqiods > nfs_numasync / 2)) { 1364 mtx_unlock(&nfs_iod_mtx); 1365 return(EIO); 1366 } 1367again: 1368 if (nmp->nm_flag & NFSMNT_INT) 1369 slpflag = NFS_PCATCH; 1370 gotiod = FALSE; 1371 1372 /* 1373 * Find a free iod to process this request. 1374 */ 1375 for (iod = 0; iod < nfs_numasync; iod++) 1376 if (nfs_iodwant[iod] == NFSIOD_AVAILABLE) { 1377 gotiod = TRUE; 1378 break; 1379 } 1380 1381 /* 1382 * Try to create one if none are free. 1383 */ 1384 if (!gotiod) { 1385 iod = nfs_nfsiodnew(1); 1386 if (iod != -1) 1387 gotiod = TRUE; 1388 } 1389 1390 if (gotiod) { 1391 /* 1392 * Found one, so wake it up and tell it which 1393 * mount to process. 1394 */ 1395 NFS_DPF(ASYNCIO, ("nfs_asyncio: waking iod %d for mount %p\n", 1396 iod, nmp)); 1397 nfs_iodwant[iod] = NFSIOD_NOT_AVAILABLE; 1398 nfs_iodmount[iod] = nmp; 1399 nmp->nm_bufqiods++; 1400 wakeup(&nfs_iodwant[iod]); 1401 } 1402 1403 /* 1404 * If none are free, we may already have an iod working on this mount 1405 * point. If so, it will process our request. 1406 */ 1407 if (!gotiod) { 1408 if (nmp->nm_bufqiods > 0) { 1409 NFS_DPF(ASYNCIO, 1410 ("nfs_asyncio: %d iods are already processing mount %p\n", 1411 nmp->nm_bufqiods, nmp)); 1412 gotiod = TRUE; 1413 } 1414 } 1415 1416 /* 1417 * If we have an iod which can process the request, then queue 1418 * the buffer. 1419 */ 1420 if (gotiod) { 1421 /* 1422 * Ensure that the queue never grows too large. We still want 1423 * to asynchronize so we block rather then return EIO. 1424 */ 1425 while (nmp->nm_bufqlen >= 2*nfs_numasync) { 1426 NFS_DPF(ASYNCIO, 1427 ("nfs_asyncio: waiting for mount %p queue to drain\n", nmp)); 1428 nmp->nm_bufqwant = TRUE; 1429 error = nfs_msleep(td, &nmp->nm_bufq, &nfs_iod_mtx, 1430 slpflag | PRIBIO, 1431 "nfsaio", slptimeo); 1432 if (error) { 1433 error2 = nfs_sigintr(nmp, td); 1434 if (error2) { 1435 mtx_unlock(&nfs_iod_mtx); 1436 return (error2); 1437 } 1438 if (slpflag == NFS_PCATCH) { 1439 slpflag = 0; 1440 slptimeo = 2 * hz; 1441 } 1442 } 1443 /* 1444 * We might have lost our iod while sleeping, 1445 * so check and loop if nescessary. 1446 */ 1447 if (nmp->nm_bufqiods == 0) { 1448 NFS_DPF(ASYNCIO, 1449 ("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); 1450 goto again; 1451 } 1452 } 1453 1454 /* We might have lost our nfsiod */ 1455 if (nmp->nm_bufqiods == 0) { 1456 NFS_DPF(ASYNCIO, 1457 ("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); 1458 goto again; 1459 } 1460 1461 if (bp->b_iocmd == BIO_READ) { 1462 if (bp->b_rcred == NOCRED && cred != NOCRED) 1463 bp->b_rcred = crhold(cred); 1464 } else { 1465 if (bp->b_wcred == NOCRED && cred != NOCRED) 1466 bp->b_wcred = crhold(cred); 1467 } 1468 1469 if (bp->b_flags & B_REMFREE) 1470 bremfreef(bp); 1471 BUF_KERNPROC(bp); 1472 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); 1473 nmp->nm_bufqlen++; 1474 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { 1475 mtx_lock(&(VTONFS(bp->b_vp))->n_mtx); 1476 VTONFS(bp->b_vp)->n_flag |= NMODIFIED; 1477 VTONFS(bp->b_vp)->n_directio_asyncwr++; 1478 mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx); 1479 } 1480 mtx_unlock(&nfs_iod_mtx); 1481 return (0); 1482 } 1483 1484 mtx_unlock(&nfs_iod_mtx); 1485 1486 /* 1487 * All the iods are busy on other mounts, so return EIO to 1488 * force the caller to process the i/o synchronously. 1489 */ 1490 NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n")); 1491 return (EIO); 1492} 1493 1494void 1495nfs_doio_directwrite(struct buf *bp) 1496{ 1497 int iomode, must_commit; 1498 struct uio *uiop = (struct uio *)bp->b_caller1; 1499 char *iov_base = uiop->uio_iov->iov_base; 1500 struct nfsmount *nmp = VFSTONFS(bp->b_vp->v_mount); 1501 1502 iomode = NFSV3WRITE_FILESYNC; 1503 uiop->uio_td = NULL; /* NULL since we're in nfsiod */ 1504 (nmp->nm_rpcops->nr_writerpc)(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit); 1505 KASSERT((must_commit == 0), ("nfs_doio_directwrite: Did not commit write")); 1506 free(iov_base, M_NFSDIRECTIO); 1507 free(uiop->uio_iov, M_NFSDIRECTIO); 1508 free(uiop, M_NFSDIRECTIO); 1509 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { 1510 struct nfsnode *np = VTONFS(bp->b_vp); 1511 mtx_lock(&np->n_mtx); 1512 np->n_directio_asyncwr--; 1513 if (np->n_directio_asyncwr == 0) { 1514 VTONFS(bp->b_vp)->n_flag &= ~NMODIFIED; 1515 if ((np->n_flag & NFSYNCWAIT)) { 1516 np->n_flag &= ~NFSYNCWAIT; 1517 wakeup((caddr_t)&np->n_directio_asyncwr); 1518 } 1519 } 1520 mtx_unlock(&np->n_mtx); 1521 } 1522 bp->b_vp = NULL; 1523 relpbuf(bp, &nfs_pbuf_freecnt); 1524} 1525 1526/* 1527 * Do an I/O operation to/from a cache block. This may be called 1528 * synchronously or from an nfsiod. 1529 */ 1530int 1531nfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td) 1532{ 1533 struct uio *uiop; 1534 struct nfsnode *np; 1535 struct nfsmount *nmp; 1536 int error = 0, iomode, must_commit = 0; 1537 struct uio uio; 1538 struct iovec io; 1539 struct proc *p = td ? td->td_proc : NULL; 1540 uint8_t iocmd; 1541 1542 np = VTONFS(vp); 1543 nmp = VFSTONFS(vp->v_mount); 1544 uiop = &uio; 1545 uiop->uio_iov = &io; 1546 uiop->uio_iovcnt = 1; 1547 uiop->uio_segflg = UIO_SYSSPACE; 1548 uiop->uio_td = td; 1549 1550 /* 1551 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We 1552 * do this here so we do not have to do it in all the code that 1553 * calls us. 1554 */ 1555 bp->b_flags &= ~B_INVAL; 1556 bp->b_ioflags &= ~BIO_ERROR; 1557 1558 KASSERT(!(bp->b_flags & B_DONE), ("nfs_doio: bp %p already marked done", bp)); 1559 iocmd = bp->b_iocmd; 1560 if (iocmd == BIO_READ) { 1561 io.iov_len = uiop->uio_resid = bp->b_bcount; 1562 io.iov_base = bp->b_data; 1563 uiop->uio_rw = UIO_READ; 1564 1565 switch (vp->v_type) { 1566 case VREG: 1567 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 1568 nfsstats.read_bios++; 1569 error = (nmp->nm_rpcops->nr_readrpc)(vp, uiop, cr); 1570 1571 if (!error) { 1572 if (uiop->uio_resid) { 1573 /* 1574 * If we had a short read with no error, we must have 1575 * hit a file hole. We should zero-fill the remainder. 1576 * This can also occur if the server hits the file EOF. 1577 * 1578 * Holes used to be able to occur due to pending 1579 * writes, but that is not possible any longer. 1580 */ 1581 int nread = bp->b_bcount - uiop->uio_resid; 1582 int left = uiop->uio_resid; 1583 1584 if (left > 0) 1585 bzero((char *)bp->b_data + nread, left); 1586 uiop->uio_resid = 0; 1587 } 1588 } 1589 /* ASSERT_VOP_LOCKED(vp, "nfs_doio"); */ 1590 if (p && (vp->v_vflag & VV_TEXT)) { 1591 mtx_lock(&np->n_mtx); 1592 if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.va_mtime)) { 1593 mtx_unlock(&np->n_mtx); 1594 PROC_LOCK(p); 1595 killproc(p, "text file modification"); 1596 PROC_UNLOCK(p); 1597 } else 1598 mtx_unlock(&np->n_mtx); 1599 } 1600 break; 1601 case VLNK: 1602 uiop->uio_offset = (off_t)0; 1603 nfsstats.readlink_bios++; 1604 error = (nmp->nm_rpcops->nr_readlinkrpc)(vp, uiop, cr); 1605 break; 1606 case VDIR: 1607 nfsstats.readdir_bios++; 1608 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; 1609 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) { 1610 error = nfs_readdirplusrpc(vp, uiop, cr); 1611 if (error == NFSERR_NOTSUPP) 1612 nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 1613 } 1614 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 1615 error = nfs_readdirrpc(vp, uiop, cr); 1616 /* 1617 * end-of-directory sets B_INVAL but does not generate an 1618 * error. 1619 */ 1620 if (error == 0 && uiop->uio_resid == bp->b_bcount) 1621 bp->b_flags |= B_INVAL; 1622 break; 1623 default: 1624 nfs_printf("nfs_doio: type %x unexpected\n", vp->v_type); 1625 break; 1626 }; 1627 if (error) { 1628 bp->b_ioflags |= BIO_ERROR; 1629 bp->b_error = error; 1630 } 1631 } else { 1632 /* 1633 * If we only need to commit, try to commit 1634 */ 1635 if (bp->b_flags & B_NEEDCOMMIT) { 1636 int retv; 1637 off_t off; 1638 1639 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 1640 retv = (nmp->nm_rpcops->nr_commit)( 1641 vp, off, bp->b_dirtyend-bp->b_dirtyoff, 1642 bp->b_wcred, td); 1643 if (retv == 0) { 1644 bp->b_dirtyoff = bp->b_dirtyend = 0; 1645 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1646 bp->b_resid = 0; 1647 bufdone(bp); 1648 return (0); 1649 } 1650 if (retv == NFSERR_STALEWRITEVERF) { 1651 nfs_clearcommit(vp->v_mount); 1652 } 1653 } 1654 1655 /* 1656 * Setup for actual write 1657 */ 1658 mtx_lock(&np->n_mtx); 1659 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size) 1660 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE; 1661 mtx_unlock(&np->n_mtx); 1662 1663 if (bp->b_dirtyend > bp->b_dirtyoff) { 1664 io.iov_len = uiop->uio_resid = bp->b_dirtyend 1665 - bp->b_dirtyoff; 1666 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE 1667 + bp->b_dirtyoff; 1668 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 1669 uiop->uio_rw = UIO_WRITE; 1670 nfsstats.write_bios++; 1671 1672 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC) 1673 iomode = NFSV3WRITE_UNSTABLE; 1674 else 1675 iomode = NFSV3WRITE_FILESYNC; 1676 1677 error = (nmp->nm_rpcops->nr_writerpc)(vp, uiop, cr, &iomode, &must_commit); 1678 1679 /* 1680 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try 1681 * to cluster the buffers needing commit. This will allow 1682 * the system to submit a single commit rpc for the whole 1683 * cluster. We can do this even if the buffer is not 100% 1684 * dirty (relative to the NFS blocksize), so we optimize the 1685 * append-to-file-case. 1686 * 1687 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be 1688 * cleared because write clustering only works for commit 1689 * rpc's, not for the data portion of the write). 1690 */ 1691 1692 if (!error && iomode == NFSV3WRITE_UNSTABLE) { 1693 bp->b_flags |= B_NEEDCOMMIT; 1694 if (bp->b_dirtyoff == 0 1695 && bp->b_dirtyend == bp->b_bcount) 1696 bp->b_flags |= B_CLUSTEROK; 1697 } else { 1698 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1699 } 1700 1701 /* 1702 * For an interrupted write, the buffer is still valid 1703 * and the write hasn't been pushed to the server yet, 1704 * so we can't set BIO_ERROR and report the interruption 1705 * by setting B_EINTR. For the B_ASYNC case, B_EINTR 1706 * is not relevant, so the rpc attempt is essentially 1707 * a noop. For the case of a V3 write rpc not being 1708 * committed to stable storage, the block is still 1709 * dirty and requires either a commit rpc or another 1710 * write rpc with iomode == NFSV3WRITE_FILESYNC before 1711 * the block is reused. This is indicated by setting 1712 * the B_DELWRI and B_NEEDCOMMIT flags. 1713 * 1714 * If the buffer is marked B_PAGING, it does not reside on 1715 * the vp's paging queues so we cannot call bdirty(). The 1716 * bp in this case is not an NFS cache block so we should 1717 * be safe. XXX 1718 * 1719 * The logic below breaks up errors into recoverable and 1720 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE 1721 * and keep the buffer around for potential write retries. 1722 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL) 1723 * and save the error in the nfsnode. This is less than ideal 1724 * but necessary. Keeping such buffers around could potentially 1725 * cause buffer exhaustion eventually (they can never be written 1726 * out, so will get constantly be re-dirtied). It also causes 1727 * all sorts of vfs panics. For non-recoverable write errors, 1728 * also invalidate the attrcache, so we'll be forced to go over 1729 * the wire for this object, returning an error to user on next 1730 * call (most of the time). 1731 */ 1732 if (error == EINTR || error == EIO || error == ETIMEDOUT 1733 || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 1734 int s; 1735 1736 s = splbio(); 1737 bp->b_flags &= ~(B_INVAL|B_NOCACHE); 1738 if ((bp->b_flags & B_PAGING) == 0) { 1739 bdirty(bp); 1740 bp->b_flags &= ~B_DONE; 1741 } 1742 if (error && (bp->b_flags & B_ASYNC) == 0) 1743 bp->b_flags |= B_EINTR; 1744 splx(s); 1745 } else { 1746 if (error) { 1747 bp->b_ioflags |= BIO_ERROR; 1748 bp->b_flags |= B_INVAL; 1749 bp->b_error = np->n_error = error; 1750 mtx_lock(&np->n_mtx); 1751 np->n_flag |= NWRITEERR; 1752 np->n_attrstamp = 0; 1753 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 1754 mtx_unlock(&np->n_mtx); 1755 } 1756 bp->b_dirtyoff = bp->b_dirtyend = 0; 1757 } 1758 } else { 1759 bp->b_resid = 0; 1760 bufdone(bp); 1761 return (0); 1762 } 1763 } 1764 bp->b_resid = uiop->uio_resid; 1765 if (must_commit) 1766 nfs_clearcommit(vp->v_mount); 1767 bufdone(bp); 1768 return (error); 1769} 1770 1771/* 1772 * Used to aid in handling ftruncate() operations on the NFS client side. 1773 * Truncation creates a number of special problems for NFS. We have to 1774 * throw away VM pages and buffer cache buffers that are beyond EOF, and 1775 * we have to properly handle VM pages or (potentially dirty) buffers 1776 * that straddle the truncation point. 1777 */ 1778 1779int 1780nfs_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize) 1781{ 1782 struct nfsnode *np = VTONFS(vp); 1783 u_quad_t tsize; 1784 int biosize = vp->v_mount->mnt_stat.f_iosize; 1785 int error = 0; 1786 1787 mtx_lock(&np->n_mtx); 1788 tsize = np->n_size; 1789 np->n_size = nsize; 1790 mtx_unlock(&np->n_mtx); 1791 1792 if (nsize < tsize) { 1793 struct buf *bp; 1794 daddr_t lbn; 1795 int bufsize; 1796 1797 /* 1798 * vtruncbuf() doesn't get the buffer overlapping the 1799 * truncation point. We may have a B_DELWRI and/or B_CACHE 1800 * buffer that now needs to be truncated. 1801 */ 1802 error = vtruncbuf(vp, cred, td, nsize, biosize); 1803 lbn = nsize / biosize; 1804 bufsize = nsize & (biosize - 1); 1805 bp = nfs_getcacheblk(vp, lbn, bufsize, td); 1806 if (!bp) 1807 return EINTR; 1808 if (bp->b_dirtyoff > bp->b_bcount) 1809 bp->b_dirtyoff = bp->b_bcount; 1810 if (bp->b_dirtyend > bp->b_bcount) 1811 bp->b_dirtyend = bp->b_bcount; 1812 bp->b_flags |= B_RELBUF; /* don't leave garbage around */ 1813 brelse(bp); 1814 } else { 1815 vnode_pager_setsize(vp, nsize); 1816 } 1817 return(error); 1818} 1819 1820