nfs_bio.c revision 84827
1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 37 */ 38 39#include <sys/cdefs.h> 40__FBSDID("$FreeBSD: head/sys/nfsclient/nfs_bio.c 84827 2001-10-11 23:38:17Z jhb $"); 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/bio.h> 45#include <sys/buf.h> 46#include <sys/kernel.h> 47#include <sys/mount.h> 48#include <sys/proc.h> 49#include <sys/resourcevar.h> 50#include <sys/signalvar.h> 51#include <sys/vmmeter.h> 52#include <sys/vnode.h> 53 54#include <vm/vm.h> 55#include <vm/vm_extern.h> 56#include <vm/vm_page.h> 57#include <vm/vm_object.h> 58#include <vm/vm_pager.h> 59#include <vm/vnode_pager.h> 60 61#include <nfs/rpcv2.h> 62#include <nfs/nfsproto.h> 63#include <nfsclient/nfs.h> 64#include <nfsclient/nfsmount.h> 65#include <nfsclient/nfsnode.h> 66 67/* 68 * Just call nfs_writebp() with the force argument set to 1. 69 * 70 * NOTE: B_DONE may or may not be set in a_bp on call. 71 */ 72static int 73nfs_bwrite(struct buf *bp) 74{ 75 76 return (nfs_writebp(bp, 1, curthread)); 77} 78 79struct buf_ops buf_ops_nfs = { 80 "buf_ops_nfs", 81 nfs_bwrite 82}; 83 84static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, 85 struct thread *td); 86 87/* 88 * Vnode op for VM getpages. 89 */ 90int 91nfs_getpages(struct vop_getpages_args *ap) 92{ 93 int i, error, nextoff, size, toff, count, npages; 94 struct uio uio; 95 struct iovec iov; 96 vm_offset_t kva; 97 struct buf *bp; 98 struct vnode *vp; 99 struct thread *td; 100 struct ucred *cred; 101 struct nfsmount *nmp; 102 vm_page_t *pages; 103 104 GIANT_REQUIRED; 105 106 vp = ap->a_vp; 107 td = curthread; /* XXX */ 108 cred = curthread->td_proc->p_ucred; /* XXX */ 109 nmp = VFSTONFS(vp->v_mount); 110 pages = ap->a_m; 111 count = ap->a_count; 112 113 if (vp->v_object == NULL) { 114 printf("nfs_getpages: called with non-merged cache vnode??\n"); 115 return VM_PAGER_ERROR; 116 } 117 118 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 119 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 120 (void)nfs_fsinfo(nmp, vp, cred, td); 121 } 122 123 npages = btoc(count); 124 125 /* 126 * If the requested page is partially valid, just return it and 127 * allow the pager to zero-out the blanks. Partially valid pages 128 * can only occur at the file EOF. 129 */ 130 131 { 132 vm_page_t m = pages[ap->a_reqpage]; 133 134 if (m->valid != 0) { 135 /* handled by vm_fault now */ 136 /* vm_page_zero_invalid(m, TRUE); */ 137 for (i = 0; i < npages; ++i) { 138 if (i != ap->a_reqpage) 139 vm_page_free(pages[i]); 140 } 141 return(0); 142 } 143 } 144 145 /* 146 * We use only the kva address for the buffer, but this is extremely 147 * convienient and fast. 148 */ 149 bp = getpbuf(&nfs_pbuf_freecnt); 150 151 kva = (vm_offset_t) bp->b_data; 152 pmap_qenter(kva, pages, npages); 153 cnt.v_vnodein++; 154 cnt.v_vnodepgsin += npages; 155 156 iov.iov_base = (caddr_t) kva; 157 iov.iov_len = count; 158 uio.uio_iov = &iov; 159 uio.uio_iovcnt = 1; 160 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 161 uio.uio_resid = count; 162 uio.uio_segflg = UIO_SYSSPACE; 163 uio.uio_rw = UIO_READ; 164 uio.uio_td = td; 165 166 error = nfs_readrpc(vp, &uio, cred); 167 pmap_qremove(kva, npages); 168 169 relpbuf(bp, &nfs_pbuf_freecnt); 170 171 if (error && (uio.uio_resid == count)) { 172 printf("nfs_getpages: error %d\n", error); 173 for (i = 0; i < npages; ++i) { 174 if (i != ap->a_reqpage) 175 vm_page_free(pages[i]); 176 } 177 return VM_PAGER_ERROR; 178 } 179 180 /* 181 * Calculate the number of bytes read and validate only that number 182 * of bytes. Note that due to pending writes, size may be 0. This 183 * does not mean that the remaining data is invalid! 184 */ 185 186 size = count - uio.uio_resid; 187 188 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 189 vm_page_t m; 190 nextoff = toff + PAGE_SIZE; 191 m = pages[i]; 192 193 m->flags &= ~PG_ZERO; 194 195 if (nextoff <= size) { 196 /* 197 * Read operation filled an entire page 198 */ 199 m->valid = VM_PAGE_BITS_ALL; 200 vm_page_undirty(m); 201 } else if (size > toff) { 202 /* 203 * Read operation filled a partial page. 204 */ 205 m->valid = 0; 206 vm_page_set_validclean(m, 0, size - toff); 207 /* handled by vm_fault now */ 208 /* vm_page_zero_invalid(m, TRUE); */ 209 } 210 211 if (i != ap->a_reqpage) { 212 /* 213 * Whether or not to leave the page activated is up in 214 * the air, but we should put the page on a page queue 215 * somewhere (it already is in the object). Result: 216 * It appears that emperical results show that 217 * deactivating pages is best. 218 */ 219 220 /* 221 * Just in case someone was asking for this page we 222 * now tell them that it is ok to use. 223 */ 224 if (!error) { 225 if (m->flags & PG_WANTED) 226 vm_page_activate(m); 227 else 228 vm_page_deactivate(m); 229 vm_page_wakeup(m); 230 } else { 231 vm_page_free(m); 232 } 233 } 234 } 235 return 0; 236} 237 238/* 239 * Vnode op for VM putpages. 240 */ 241int 242nfs_putpages(struct vop_putpages_args *ap) 243{ 244 struct uio uio; 245 struct iovec iov; 246 vm_offset_t kva; 247 struct buf *bp; 248 int iomode, must_commit, i, error, npages, count; 249 off_t offset; 250 int *rtvals; 251 struct vnode *vp; 252 struct thread *td; 253 struct ucred *cred; 254 struct nfsmount *nmp; 255 struct nfsnode *np; 256 vm_page_t *pages; 257 258 GIANT_REQUIRED; 259 260 vp = ap->a_vp; 261 np = VTONFS(vp); 262 td = curthread; /* XXX */ 263 cred = curthread->td_proc->p_ucred; /* XXX */ 264 nmp = VFSTONFS(vp->v_mount); 265 pages = ap->a_m; 266 count = ap->a_count; 267 rtvals = ap->a_rtvals; 268 npages = btoc(count); 269 offset = IDX_TO_OFF(pages[0]->pindex); 270 271 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 272 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 273 (void)nfs_fsinfo(nmp, vp, cred, td); 274 } 275 276 for (i = 0; i < npages; i++) 277 rtvals[i] = VM_PAGER_AGAIN; 278 279 /* 280 * When putting pages, do not extend file past EOF. 281 */ 282 283 if (offset + count > np->n_size) { 284 count = np->n_size - offset; 285 if (count < 0) 286 count = 0; 287 } 288 289 /* 290 * We use only the kva address for the buffer, but this is extremely 291 * convienient and fast. 292 */ 293 bp = getpbuf(&nfs_pbuf_freecnt); 294 295 kva = (vm_offset_t) bp->b_data; 296 pmap_qenter(kva, pages, npages); 297 cnt.v_vnodeout++; 298 cnt.v_vnodepgsout += count; 299 300 iov.iov_base = (caddr_t) kva; 301 iov.iov_len = count; 302 uio.uio_iov = &iov; 303 uio.uio_iovcnt = 1; 304 uio.uio_offset = offset; 305 uio.uio_resid = count; 306 uio.uio_segflg = UIO_SYSSPACE; 307 uio.uio_rw = UIO_WRITE; 308 uio.uio_td = td; 309 310 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0) 311 iomode = NFSV3WRITE_UNSTABLE; 312 else 313 iomode = NFSV3WRITE_FILESYNC; 314 315 error = nfs_writerpc(vp, &uio, cred, &iomode, &must_commit); 316 317 pmap_qremove(kva, npages); 318 relpbuf(bp, &nfs_pbuf_freecnt); 319 320 if (!error) { 321 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE; 322 for (i = 0; i < nwritten; i++) { 323 rtvals[i] = VM_PAGER_OK; 324 vm_page_undirty(pages[i]); 325 } 326 if (must_commit) { 327 nfs_clearcommit(vp->v_mount); 328 } 329 } 330 return rtvals[0]; 331} 332 333/* 334 * Vnode op for read using bio 335 */ 336int 337nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) 338{ 339 struct nfsnode *np = VTONFS(vp); 340 int biosize, i; 341 struct buf *bp = 0, *rabp; 342 struct vattr vattr; 343 struct thread *td; 344 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 345 daddr_t lbn, rabn; 346 int bcount; 347 int seqcount; 348 int nra, error = 0, n = 0, on = 0; 349 350#ifdef DIAGNOSTIC 351 if (uio->uio_rw != UIO_READ) 352 panic("nfs_read mode"); 353#endif 354 if (uio->uio_resid == 0) 355 return (0); 356 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ 357 return (EINVAL); 358 td = uio->uio_td; 359 360 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 361 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) 362 (void)nfs_fsinfo(nmp, vp, cred, td); 363 if (vp->v_type != VDIR && 364 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) 365 return (EFBIG); 366 biosize = vp->v_mount->mnt_stat.f_iosize; 367 seqcount = (int)((off_t)(ioflag >> 16) * biosize / BKVASIZE); 368 /* 369 * For nfs, cache consistency can only be maintained approximately. 370 * Although RFC1094 does not specify the criteria, the following is 371 * believed to be compatible with the reference port. 372 * For nfs: 373 * If the file's modify time on the server has changed since the 374 * last read rpc or you have written to the file, 375 * you may have lost data cache consistency with the 376 * server, so flush all of the file's data out of the cache. 377 * Then force a getattr rpc to ensure that you have up to date 378 * attributes. 379 * NB: This implies that cache data can be read when up to 380 * NFS_ATTRTIMEO seconds out of date. If you find that you need current 381 * attributes this could be forced by setting n_attrstamp to 0 before 382 * the VOP_GETATTR() call. 383 */ 384 if (np->n_flag & NMODIFIED) { 385 if (vp->v_type != VREG) { 386 if (vp->v_type != VDIR) 387 panic("nfs: bioread, not dir"); 388 nfs_invaldir(vp); 389 error = nfs_vinvalbuf(vp, V_SAVE, cred, td, 1); 390 if (error) 391 return (error); 392 } 393 np->n_attrstamp = 0; 394 error = VOP_GETATTR(vp, &vattr, cred, td); 395 if (error) 396 return (error); 397 np->n_mtime = vattr.va_mtime.tv_sec; 398 } else { 399 error = VOP_GETATTR(vp, &vattr, cred, td); 400 if (error) 401 return (error); 402 if (np->n_mtime != vattr.va_mtime.tv_sec) { 403 if (vp->v_type == VDIR) 404 nfs_invaldir(vp); 405 error = nfs_vinvalbuf(vp, V_SAVE, cred, td, 1); 406 if (error) 407 return (error); 408 np->n_mtime = vattr.va_mtime.tv_sec; 409 } 410 } 411 do { 412 switch (vp->v_type) { 413 case VREG: 414 nfsstats.biocache_reads++; 415 lbn = uio->uio_offset / biosize; 416 on = uio->uio_offset & (biosize - 1); 417 418 /* 419 * Start the read ahead(s), as required. 420 */ 421 if (nfs_numasync > 0 && nmp->nm_readahead > 0) { 422 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && 423 (off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) { 424 rabn = lbn + 1 + nra; 425 if (!incore(vp, rabn)) { 426 rabp = nfs_getcacheblk(vp, rabn, biosize, td); 427 if (!rabp) 428 return (EINTR); 429 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 430 rabp->b_flags |= B_ASYNC; 431 rabp->b_iocmd = BIO_READ; 432 vfs_busy_pages(rabp, 0); 433 if (nfs_asyncio(rabp, cred, td)) { 434 rabp->b_flags |= B_INVAL; 435 rabp->b_ioflags |= BIO_ERROR; 436 vfs_unbusy_pages(rabp); 437 brelse(rabp); 438 break; 439 } 440 } else { 441 brelse(rabp); 442 } 443 } 444 } 445 } 446 447 /* 448 * Obtain the buffer cache block. Figure out the buffer size 449 * when we are at EOF. If we are modifying the size of the 450 * buffer based on an EOF condition we need to hold 451 * nfs_rslock() through obtaining the buffer to prevent 452 * a potential writer-appender from messing with n_size. 453 * Otherwise we may accidently truncate the buffer and 454 * lose dirty data. 455 * 456 * Note that bcount is *not* DEV_BSIZE aligned. 457 */ 458 459again: 460 bcount = biosize; 461 if ((off_t)lbn * biosize >= np->n_size) { 462 bcount = 0; 463 } else if ((off_t)(lbn + 1) * biosize > np->n_size) { 464 bcount = np->n_size - (off_t)lbn * biosize; 465 } 466 if (bcount != biosize) { 467 switch(nfs_rslock(np, td)) { 468 case ENOLCK: 469 goto again; 470 /* not reached */ 471 case EINTR: 472 case ERESTART: 473 return(EINTR); 474 /* not reached */ 475 default: 476 break; 477 } 478 } 479 480 bp = nfs_getcacheblk(vp, lbn, bcount, td); 481 482 if (bcount != biosize) 483 nfs_rsunlock(np, td); 484 if (!bp) 485 return (EINTR); 486 487 /* 488 * If B_CACHE is not set, we must issue the read. If this 489 * fails, we return an error. 490 */ 491 492 if ((bp->b_flags & B_CACHE) == 0) { 493 bp->b_iocmd = BIO_READ; 494 vfs_busy_pages(bp, 0); 495 error = nfs_doio(bp, cred, td); 496 if (error) { 497 brelse(bp); 498 return (error); 499 } 500 } 501 502 /* 503 * on is the offset into the current bp. Figure out how many 504 * bytes we can copy out of the bp. Note that bcount is 505 * NOT DEV_BSIZE aligned. 506 * 507 * Then figure out how many bytes we can copy into the uio. 508 */ 509 510 n = 0; 511 if (on < bcount) 512 n = min((unsigned)(bcount - on), uio->uio_resid); 513 break; 514 case VLNK: 515 nfsstats.biocache_readlinks++; 516 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td); 517 if (!bp) 518 return (EINTR); 519 if ((bp->b_flags & B_CACHE) == 0) { 520 bp->b_iocmd = BIO_READ; 521 vfs_busy_pages(bp, 0); 522 error = nfs_doio(bp, cred, td); 523 if (error) { 524 bp->b_ioflags |= BIO_ERROR; 525 brelse(bp); 526 return (error); 527 } 528 } 529 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 530 on = 0; 531 break; 532 case VDIR: 533 nfsstats.biocache_readdirs++; 534 if (np->n_direofoffset 535 && uio->uio_offset >= np->n_direofoffset) { 536 return (0); 537 } 538 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; 539 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); 540 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td); 541 if (!bp) 542 return (EINTR); 543 if ((bp->b_flags & B_CACHE) == 0) { 544 bp->b_iocmd = BIO_READ; 545 vfs_busy_pages(bp, 0); 546 error = nfs_doio(bp, cred, td); 547 if (error) { 548 brelse(bp); 549 } 550 while (error == NFSERR_BAD_COOKIE) { 551 printf("got bad cookie vp %p bp %p\n", vp, bp); 552 nfs_invaldir(vp); 553 error = nfs_vinvalbuf(vp, 0, cred, td, 1); 554 /* 555 * Yuck! The directory has been modified on the 556 * server. The only way to get the block is by 557 * reading from the beginning to get all the 558 * offset cookies. 559 * 560 * Leave the last bp intact unless there is an error. 561 * Loop back up to the while if the error is another 562 * NFSERR_BAD_COOKIE (double yuch!). 563 */ 564 for (i = 0; i <= lbn && !error; i++) { 565 if (np->n_direofoffset 566 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) 567 return (0); 568 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td); 569 if (!bp) 570 return (EINTR); 571 if ((bp->b_flags & B_CACHE) == 0) { 572 bp->b_iocmd = BIO_READ; 573 vfs_busy_pages(bp, 0); 574 error = nfs_doio(bp, cred, td); 575 /* 576 * no error + B_INVAL == directory EOF, 577 * use the block. 578 */ 579 if (error == 0 && (bp->b_flags & B_INVAL)) 580 break; 581 } 582 /* 583 * An error will throw away the block and the 584 * for loop will break out. If no error and this 585 * is not the block we want, we throw away the 586 * block and go for the next one via the for loop. 587 */ 588 if (error || i < lbn) 589 brelse(bp); 590 } 591 } 592 /* 593 * The above while is repeated if we hit another cookie 594 * error. If we hit an error and it wasn't a cookie error, 595 * we give up. 596 */ 597 if (error) 598 return (error); 599 } 600 601 /* 602 * If not eof and read aheads are enabled, start one. 603 * (You need the current block first, so that you have the 604 * directory offset cookie of the next block.) 605 */ 606 if (nfs_numasync > 0 && nmp->nm_readahead > 0 && 607 (bp->b_flags & B_INVAL) == 0 && 608 (np->n_direofoffset == 0 || 609 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && 610 !incore(vp, lbn + 1)) { 611 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); 612 if (rabp) { 613 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 614 rabp->b_flags |= B_ASYNC; 615 rabp->b_iocmd = BIO_READ; 616 vfs_busy_pages(rabp, 0); 617 if (nfs_asyncio(rabp, cred, td)) { 618 rabp->b_flags |= B_INVAL; 619 rabp->b_ioflags |= BIO_ERROR; 620 vfs_unbusy_pages(rabp); 621 brelse(rabp); 622 } 623 } else { 624 brelse(rabp); 625 } 626 } 627 } 628 /* 629 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is 630 * chopped for the EOF condition, we cannot tell how large 631 * NFS directories are going to be until we hit EOF. So 632 * an NFS directory buffer is *not* chopped to its EOF. Now, 633 * it just so happens that b_resid will effectively chop it 634 * to EOF. *BUT* this information is lost if the buffer goes 635 * away and is reconstituted into a B_CACHE state ( due to 636 * being VMIO ) later. So we keep track of the directory eof 637 * in np->n_direofoffset and chop it off as an extra step 638 * right here. 639 */ 640 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); 641 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) 642 n = np->n_direofoffset - uio->uio_offset; 643 break; 644 default: 645 printf(" nfs_bioread: type %x unexpected\n", vp->v_type); 646 break; 647 }; 648 649 if (n > 0) { 650 error = uiomove(bp->b_data + on, (int)n, uio); 651 } 652 switch (vp->v_type) { 653 case VREG: 654 break; 655 case VLNK: 656 n = 0; 657 break; 658 case VDIR: 659 break; 660 default: 661 printf(" nfs_bioread: type %x unexpected\n", vp->v_type); 662 } 663 brelse(bp); 664 } while (error == 0 && uio->uio_resid > 0 && n > 0); 665 return (error); 666} 667 668/* 669 * Vnode op for write using bio 670 */ 671int 672nfs_write(struct vop_write_args *ap) 673{ 674 int biosize; 675 struct uio *uio = ap->a_uio; 676 struct thread *td = uio->uio_td; 677 struct vnode *vp = ap->a_vp; 678 struct nfsnode *np = VTONFS(vp); 679 struct ucred *cred = ap->a_cred; 680 int ioflag = ap->a_ioflag; 681 struct buf *bp; 682 struct vattr vattr; 683 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 684 daddr_t lbn; 685 int bcount; 686 int n, on, error = 0; 687 int haverslock = 0; 688 struct proc *p = td?td->td_proc:NULL; 689 690 GIANT_REQUIRED; 691 692#ifdef DIAGNOSTIC 693 if (uio->uio_rw != UIO_WRITE) 694 panic("nfs_write mode"); 695 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread) 696 panic("nfs_write proc"); 697#endif 698 if (vp->v_type != VREG) 699 return (EIO); 700 if (np->n_flag & NWRITEERR) { 701 np->n_flag &= ~NWRITEERR; 702 return (np->n_error); 703 } 704 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 705 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) 706 (void)nfs_fsinfo(nmp, vp, cred, td); 707 708 /* 709 * Synchronously flush pending buffers if we are in synchronous 710 * mode or if we are appending. 711 */ 712 if (ioflag & (IO_APPEND | IO_SYNC)) { 713 if (np->n_flag & NMODIFIED) { 714 np->n_attrstamp = 0; 715 error = nfs_vinvalbuf(vp, V_SAVE, cred, td, 1); 716 if (error) 717 return (error); 718 } 719 } 720 721 /* 722 * If IO_APPEND then load uio_offset. We restart here if we cannot 723 * get the append lock. 724 */ 725restart: 726 if (ioflag & IO_APPEND) { 727 np->n_attrstamp = 0; 728 error = VOP_GETATTR(vp, &vattr, cred, td); 729 if (error) 730 return (error); 731 uio->uio_offset = np->n_size; 732 } 733 734 if (uio->uio_offset < 0) 735 return (EINVAL); 736 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) 737 return (EFBIG); 738 if (uio->uio_resid == 0) 739 return (0); 740 741 /* 742 * We need to obtain the rslock if we intend to modify np->n_size 743 * in order to guarentee the append point with multiple contending 744 * writers, to guarentee that no other appenders modify n_size 745 * while we are trying to obtain a truncated buffer (i.e. to avoid 746 * accidently truncating data written by another appender due to 747 * the race), and to ensure that the buffer is populated prior to 748 * our extending of the file. We hold rslock through the entire 749 * operation. 750 * 751 * Note that we do not synchronize the case where someone truncates 752 * the file while we are appending to it because attempting to lock 753 * this case may deadlock other parts of the system unexpectedly. 754 */ 755 if ((ioflag & IO_APPEND) || 756 uio->uio_offset + uio->uio_resid > np->n_size) { 757 switch(nfs_rslock(np, td)) { 758 case ENOLCK: 759 goto restart; 760 /* not reached */ 761 case EINTR: 762 case ERESTART: 763 return(EINTR); 764 /* not reached */ 765 default: 766 break; 767 } 768 haverslock = 1; 769 } 770 771 /* 772 * Maybe this should be above the vnode op call, but so long as 773 * file servers have no limits, i don't think it matters 774 */ 775 if (p && uio->uio_offset + uio->uio_resid > 776 p->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 777 PROC_LOCK(p); 778 psignal(p, SIGXFSZ); 779 PROC_UNLOCK(p); 780 if (haverslock) 781 nfs_rsunlock(np, td); 782 return (EFBIG); 783 } 784 785 biosize = vp->v_mount->mnt_stat.f_iosize; 786 787 do { 788 nfsstats.biocache_writes++; 789 lbn = uio->uio_offset / biosize; 790 on = uio->uio_offset & (biosize-1); 791 n = min((unsigned)(biosize - on), uio->uio_resid); 792again: 793 /* 794 * Handle direct append and file extension cases, calculate 795 * unaligned buffer size. 796 */ 797 798 if (uio->uio_offset == np->n_size && n) { 799 /* 800 * Get the buffer (in its pre-append state to maintain 801 * B_CACHE if it was previously set). Resize the 802 * nfsnode after we have locked the buffer to prevent 803 * readers from reading garbage. 804 */ 805 bcount = on; 806 bp = nfs_getcacheblk(vp, lbn, bcount, td); 807 808 if (bp != NULL) { 809 long save; 810 811 np->n_size = uio->uio_offset + n; 812 np->n_flag |= NMODIFIED; 813 vnode_pager_setsize(vp, np->n_size); 814 815 save = bp->b_flags & B_CACHE; 816 bcount += n; 817 allocbuf(bp, bcount); 818 bp->b_flags |= save; 819 bp->b_magic = B_MAGIC_NFS; 820 bp->b_op = &buf_ops_nfs; 821 } 822 } else { 823 /* 824 * Obtain the locked cache block first, and then 825 * adjust the file's size as appropriate. 826 */ 827 bcount = on + n; 828 if ((off_t)lbn * biosize + bcount < np->n_size) { 829 if ((off_t)(lbn + 1) * biosize < np->n_size) 830 bcount = biosize; 831 else 832 bcount = np->n_size - (off_t)lbn * biosize; 833 } 834 835 bp = nfs_getcacheblk(vp, lbn, bcount, td); 836 837 if (uio->uio_offset + n > np->n_size) { 838 np->n_size = uio->uio_offset + n; 839 np->n_flag |= NMODIFIED; 840 vnode_pager_setsize(vp, np->n_size); 841 } 842 } 843 844 if (!bp) { 845 error = EINTR; 846 break; 847 } 848 849 /* 850 * Issue a READ if B_CACHE is not set. In special-append 851 * mode, B_CACHE is based on the buffer prior to the write 852 * op and is typically set, avoiding the read. If a read 853 * is required in special append mode, the server will 854 * probably send us a short-read since we extended the file 855 * on our end, resulting in b_resid == 0 and, thusly, 856 * B_CACHE getting set. 857 * 858 * We can also avoid issuing the read if the write covers 859 * the entire buffer. We have to make sure the buffer state 860 * is reasonable in this case since we will not be initiating 861 * I/O. See the comments in kern/vfs_bio.c's getblk() for 862 * more information. 863 * 864 * B_CACHE may also be set due to the buffer being cached 865 * normally. 866 */ 867 868 if (on == 0 && n == bcount) { 869 bp->b_flags |= B_CACHE; 870 bp->b_flags &= ~B_INVAL; 871 bp->b_ioflags &= ~BIO_ERROR; 872 } 873 874 if ((bp->b_flags & B_CACHE) == 0) { 875 bp->b_iocmd = BIO_READ; 876 vfs_busy_pages(bp, 0); 877 error = nfs_doio(bp, cred, td); 878 if (error) { 879 brelse(bp); 880 break; 881 } 882 } 883 if (!bp) { 884 error = EINTR; 885 break; 886 } 887 if (bp->b_wcred == NOCRED) 888 bp->b_wcred = crhold(cred); 889 np->n_flag |= NMODIFIED; 890 891 /* 892 * If dirtyend exceeds file size, chop it down. This should 893 * not normally occur but there is an append race where it 894 * might occur XXX, so we log it. 895 * 896 * If the chopping creates a reverse-indexed or degenerate 897 * situation with dirtyoff/end, we 0 both of them. 898 */ 899 900 if (bp->b_dirtyend > bcount) { 901 printf("NFS append race @%lx:%d\n", 902 (long)bp->b_blkno * DEV_BSIZE, 903 bp->b_dirtyend - bcount); 904 bp->b_dirtyend = bcount; 905 } 906 907 if (bp->b_dirtyoff >= bp->b_dirtyend) 908 bp->b_dirtyoff = bp->b_dirtyend = 0; 909 910 /* 911 * If the new write will leave a contiguous dirty 912 * area, just update the b_dirtyoff and b_dirtyend, 913 * otherwise force a write rpc of the old dirty area. 914 * 915 * While it is possible to merge discontiguous writes due to 916 * our having a B_CACHE buffer ( and thus valid read data 917 * for the hole), we don't because it could lead to 918 * significant cache coherency problems with multiple clients, 919 * especially if locking is implemented later on. 920 * 921 * as an optimization we could theoretically maintain 922 * a linked list of discontinuous areas, but we would still 923 * have to commit them separately so there isn't much 924 * advantage to it except perhaps a bit of asynchronization. 925 */ 926 927 if (bp->b_dirtyend > 0 && 928 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 929 if (BUF_WRITE(bp) == EINTR) 930 return (EINTR); 931 goto again; 932 } 933 934 error = uiomove((char *)bp->b_data + on, n, uio); 935 936 /* 937 * Since this block is being modified, it must be written 938 * again and not just committed. Since write clustering does 939 * not work for the stage 1 data write, only the stage 2 940 * commit rpc, we have to clear B_CLUSTEROK as well. 941 */ 942 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 943 944 if (error) { 945 bp->b_ioflags |= BIO_ERROR; 946 brelse(bp); 947 break; 948 } 949 950 /* 951 * Only update dirtyoff/dirtyend if not a degenerate 952 * condition. 953 */ 954 if (n) { 955 if (bp->b_dirtyend > 0) { 956 bp->b_dirtyoff = min(on, bp->b_dirtyoff); 957 bp->b_dirtyend = max((on + n), bp->b_dirtyend); 958 } else { 959 bp->b_dirtyoff = on; 960 bp->b_dirtyend = on + n; 961 } 962 vfs_bio_set_validclean(bp, on, n); 963 } 964 965 /* 966 * If IO_SYNC do bwrite(). 967 * 968 * IO_INVAL appears to be unused. The idea appears to be 969 * to turn off caching in this case. Very odd. XXX 970 */ 971 if ((ioflag & IO_SYNC)) { 972 if (ioflag & IO_INVAL) 973 bp->b_flags |= B_NOCACHE; 974 error = BUF_WRITE(bp); 975 if (error) 976 break; 977 } else if ((n + on) == biosize) { 978 bp->b_flags |= B_ASYNC; 979 (void)nfs_writebp(bp, 0, 0); 980 } else { 981 bdwrite(bp); 982 } 983 } while (uio->uio_resid > 0 && n > 0); 984 985 if (haverslock) 986 nfs_rsunlock(np, td); 987 988 return (error); 989} 990 991/* 992 * Get an nfs cache block. 993 * 994 * Allocate a new one if the block isn't currently in the cache 995 * and return the block marked busy. If the calling process is 996 * interrupted by a signal for an interruptible mount point, return 997 * NULL. 998 * 999 * The caller must carefully deal with the possible B_INVAL state of 1000 * the buffer. nfs_doio() clears B_INVAL (and nfs_asyncio() clears it 1001 * indirectly), so synchronous reads can be issued without worrying about 1002 * the B_INVAL state. We have to be a little more careful when dealing 1003 * with writes (see comments in nfs_write()) when extending a file past 1004 * its EOF. 1005 */ 1006static struct buf * 1007nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td) 1008{ 1009 struct buf *bp; 1010 struct mount *mp; 1011 struct nfsmount *nmp; 1012 1013 mp = vp->v_mount; 1014 nmp = VFSTONFS(mp); 1015 1016 if (nmp->nm_flag & NFSMNT_INT) { 1017 bp = getblk(vp, bn, size, PCATCH, 0); 1018 while (bp == (struct buf *)0) { 1019 if (nfs_sigintr(nmp, (struct nfsreq *)0, td->td_proc)) 1020 return ((struct buf *)0); 1021 bp = getblk(vp, bn, size, 0, 2 * hz); 1022 } 1023 } else { 1024 bp = getblk(vp, bn, size, 0, 0); 1025 } 1026 1027 if (vp->v_type == VREG) { 1028 int biosize; 1029 1030 biosize = mp->mnt_stat.f_iosize; 1031 bp->b_blkno = bn * (biosize / DEV_BSIZE); 1032 } 1033 return (bp); 1034} 1035 1036/* 1037 * Flush and invalidate all dirty buffers. If another process is already 1038 * doing the flush, just wait for completion. 1039 */ 1040int 1041nfs_vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, 1042 struct thread *td, int intrflg) 1043{ 1044 struct nfsnode *np = VTONFS(vp); 1045 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1046 int error = 0, slpflag, slptimeo; 1047 1048 if (vp->v_flag & VXLOCK) { 1049 return (0); 1050 } 1051 1052 if ((nmp->nm_flag & NFSMNT_INT) == 0) 1053 intrflg = 0; 1054 if (intrflg) { 1055 slpflag = PCATCH; 1056 slptimeo = 2 * hz; 1057 } else { 1058 slpflag = 0; 1059 slptimeo = 0; 1060 } 1061 /* 1062 * First wait for any other process doing a flush to complete. 1063 */ 1064 while (np->n_flag & NFLUSHINPROG) { 1065 np->n_flag |= NFLUSHWANT; 1066 error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval", 1067 slptimeo); 1068 if (error && intrflg && 1069 nfs_sigintr(nmp, (struct nfsreq *)0, td->td_proc)) 1070 return (EINTR); 1071 } 1072 1073 /* 1074 * Now, flush as required. 1075 */ 1076 np->n_flag |= NFLUSHINPROG; 1077 error = vinvalbuf(vp, flags, cred, td, slpflag, 0); 1078 while (error) { 1079 if (intrflg && 1080 nfs_sigintr(nmp, (struct nfsreq *)0, td->td_proc)) { 1081 np->n_flag &= ~NFLUSHINPROG; 1082 if (np->n_flag & NFLUSHWANT) { 1083 np->n_flag &= ~NFLUSHWANT; 1084 wakeup((caddr_t)&np->n_flag); 1085 } 1086 return (EINTR); 1087 } 1088 error = vinvalbuf(vp, flags, cred, td, 0, slptimeo); 1089 } 1090 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG); 1091 if (np->n_flag & NFLUSHWANT) { 1092 np->n_flag &= ~NFLUSHWANT; 1093 wakeup((caddr_t)&np->n_flag); 1094 } 1095 return (0); 1096} 1097 1098/* 1099 * Initiate asynchronous I/O. Return an error if no nfsiods are available. 1100 * This is mainly to avoid queueing async I/O requests when the nfsiods 1101 * are all hung on a dead server. 1102 * 1103 * Note: nfs_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp 1104 * is eventually dequeued by the async daemon, nfs_doio() *will*. 1105 */ 1106int 1107nfs_asyncio(struct buf *bp, struct ucred *cred, struct thread *td) 1108{ 1109 struct nfsmount *nmp; 1110 int i; 1111 int gotiod; 1112 int slpflag = 0; 1113 int slptimeo = 0; 1114 int error; 1115 1116 /* 1117 * If no async daemons then return EIO to force caller to run the rpc 1118 * synchronously. 1119 */ 1120 if (nfs_numasync == 0) 1121 return (EIO); 1122 1123 nmp = VFSTONFS(bp->b_vp->v_mount); 1124 1125 /* 1126 * Commits are usually short and sweet so lets save some cpu and 1127 * leave the async daemons for more important rpc's (such as reads 1128 * and writes). 1129 */ 1130 if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) && 1131 (nmp->nm_bufqiods > nfs_numasync / 2)) { 1132 return(EIO); 1133 } 1134 1135again: 1136 if (nmp->nm_flag & NFSMNT_INT) 1137 slpflag = PCATCH; 1138 gotiod = FALSE; 1139 1140 /* 1141 * Find a free iod to process this request. 1142 */ 1143 for (i = 0; i < NFS_MAXASYNCDAEMON; i++) 1144 if (nfs_iodwant[i]) { 1145 /* 1146 * Found one, so wake it up and tell it which 1147 * mount to process. 1148 */ 1149 NFS_DPF(ASYNCIO, 1150 ("nfs_asyncio: waking iod %d for mount %p\n", 1151 i, nmp)); 1152 nfs_iodwant[i] = (struct proc *)0; 1153 nfs_iodmount[i] = nmp; 1154 nmp->nm_bufqiods++; 1155 wakeup((caddr_t)&nfs_iodwant[i]); 1156 gotiod = TRUE; 1157 break; 1158 } 1159 1160 /* 1161 * If none are free, we may already have an iod working on this mount 1162 * point. If so, it will process our request. 1163 */ 1164 if (!gotiod) { 1165 if (nmp->nm_bufqiods > 0) { 1166 NFS_DPF(ASYNCIO, 1167 ("nfs_asyncio: %d iods are already processing mount %p\n", 1168 nmp->nm_bufqiods, nmp)); 1169 gotiod = TRUE; 1170 } 1171 } 1172 1173 /* 1174 * If we have an iod which can process the request, then queue 1175 * the buffer. 1176 */ 1177 if (gotiod) { 1178 /* 1179 * Ensure that the queue never grows too large. We still want 1180 * to asynchronize so we block rather then return EIO. 1181 */ 1182 while (nmp->nm_bufqlen >= 2*nfs_numasync) { 1183 NFS_DPF(ASYNCIO, 1184 ("nfs_asyncio: waiting for mount %p queue to drain\n", nmp)); 1185 nmp->nm_bufqwant = TRUE; 1186 error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO, 1187 "nfsaio", slptimeo); 1188 if (error) { 1189 if (nfs_sigintr(nmp, NULL, td ? td->td_proc : NULL)) 1190 return (EINTR); 1191 if (slpflag == PCATCH) { 1192 slpflag = 0; 1193 slptimeo = 2 * hz; 1194 } 1195 } 1196 /* 1197 * We might have lost our iod while sleeping, 1198 * so check and loop if nescessary. 1199 */ 1200 if (nmp->nm_bufqiods == 0) { 1201 NFS_DPF(ASYNCIO, 1202 ("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); 1203 goto again; 1204 } 1205 } 1206 1207 if (bp->b_iocmd == BIO_READ) { 1208 if (bp->b_rcred == NOCRED && cred != NOCRED) 1209 bp->b_rcred = crhold(cred); 1210 } else { 1211 bp->b_flags |= B_WRITEINPROG; 1212 if (bp->b_wcred == NOCRED && cred != NOCRED) 1213 bp->b_wcred = crhold(cred); 1214 } 1215 1216 BUF_KERNPROC(bp); 1217 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); 1218 nmp->nm_bufqlen++; 1219 return (0); 1220 } 1221 1222 /* 1223 * All the iods are busy on other mounts, so return EIO to 1224 * force the caller to process the i/o synchronously. 1225 */ 1226 NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n")); 1227 return (EIO); 1228} 1229 1230/* 1231 * Do an I/O operation to/from a cache block. This may be called 1232 * synchronously or from an nfsiod. 1233 */ 1234int 1235nfs_doio(struct buf *bp, struct ucred *cr, struct thread *td) 1236{ 1237 struct uio *uiop; 1238 struct vnode *vp; 1239 struct nfsnode *np; 1240 struct nfsmount *nmp; 1241 int error = 0, iomode, must_commit = 0; 1242 struct uio uio; 1243 struct iovec io; 1244 struct proc *p = td ? td->td_proc : NULL; 1245 1246 vp = bp->b_vp; 1247 np = VTONFS(vp); 1248 nmp = VFSTONFS(vp->v_mount); 1249 uiop = &uio; 1250 uiop->uio_iov = &io; 1251 uiop->uio_iovcnt = 1; 1252 uiop->uio_segflg = UIO_SYSSPACE; 1253 uiop->uio_td = td; 1254 1255 /* 1256 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We 1257 * do this here so we do not have to do it in all the code that 1258 * calls us. 1259 */ 1260 bp->b_flags &= ~B_INVAL; 1261 bp->b_ioflags &= ~BIO_ERROR; 1262 1263 KASSERT(!(bp->b_flags & B_DONE), ("nfs_doio: bp %p already marked done", bp)); 1264 1265 /* 1266 * Historically, paging was done with physio, but no more. 1267 */ 1268 if (bp->b_flags & B_PHYS) { 1269 /* 1270 * ...though reading /dev/drum still gets us here. 1271 */ 1272 io.iov_len = uiop->uio_resid = bp->b_bcount; 1273 /* mapping was done by vmapbuf() */ 1274 io.iov_base = bp->b_data; 1275 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 1276 if (bp->b_iocmd == BIO_READ) { 1277 uiop->uio_rw = UIO_READ; 1278 nfsstats.read_physios++; 1279 error = nfs_readrpc(vp, uiop, cr); 1280 } else { 1281 int com; 1282 1283 iomode = NFSV3WRITE_DATASYNC; 1284 uiop->uio_rw = UIO_WRITE; 1285 nfsstats.write_physios++; 1286 error = nfs_writerpc(vp, uiop, cr, &iomode, &com); 1287 } 1288 if (error) { 1289 bp->b_ioflags |= BIO_ERROR; 1290 bp->b_error = error; 1291 } 1292 } else if (bp->b_iocmd == BIO_READ) { 1293 io.iov_len = uiop->uio_resid = bp->b_bcount; 1294 io.iov_base = bp->b_data; 1295 uiop->uio_rw = UIO_READ; 1296 switch (vp->v_type) { 1297 case VREG: 1298 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 1299 nfsstats.read_bios++; 1300 error = nfs_readrpc(vp, uiop, cr); 1301 if (!error) { 1302 if (uiop->uio_resid) { 1303 /* 1304 * If we had a short read with no error, we must have 1305 * hit a file hole. We should zero-fill the remainder. 1306 * This can also occur if the server hits the file EOF. 1307 * 1308 * Holes used to be able to occur due to pending 1309 * writes, but that is not possible any longer. 1310 */ 1311 int nread = bp->b_bcount - uiop->uio_resid; 1312 int left = bp->b_bcount - nread; 1313 1314 if (left > 0) 1315 bzero((char *)bp->b_data + nread, left); 1316 uiop->uio_resid = 0; 1317 } 1318 } 1319 if (p && (vp->v_flag & VTEXT) && 1320 (np->n_mtime != np->n_vattr.va_mtime.tv_sec)) { 1321 uprintf("Process killed due to text file modification\n"); 1322 PROC_LOCK(p); 1323 psignal(p, SIGKILL); 1324 _PHOLD(p); 1325 PROC_UNLOCK(p); 1326 } 1327 break; 1328 case VLNK: 1329 uiop->uio_offset = (off_t)0; 1330 nfsstats.readlink_bios++; 1331 error = nfs_readlinkrpc(vp, uiop, cr); 1332 break; 1333 case VDIR: 1334 nfsstats.readdir_bios++; 1335 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; 1336 if (nmp->nm_flag & NFSMNT_RDIRPLUS) { 1337 error = nfs_readdirplusrpc(vp, uiop, cr); 1338 if (error == NFSERR_NOTSUPP) 1339 nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 1340 } 1341 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 1342 error = nfs_readdirrpc(vp, uiop, cr); 1343 /* 1344 * end-of-directory sets B_INVAL but does not generate an 1345 * error. 1346 */ 1347 if (error == 0 && uiop->uio_resid == bp->b_bcount) 1348 bp->b_flags |= B_INVAL; 1349 break; 1350 default: 1351 printf("nfs_doio: type %x unexpected\n", vp->v_type); 1352 break; 1353 }; 1354 if (error) { 1355 bp->b_ioflags |= BIO_ERROR; 1356 bp->b_error = error; 1357 } 1358 } else { 1359 /* 1360 * If we only need to commit, try to commit 1361 */ 1362 if (bp->b_flags & B_NEEDCOMMIT) { 1363 int retv; 1364 off_t off; 1365 1366 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 1367 bp->b_flags |= B_WRITEINPROG; 1368 retv = nfs_commit( 1369 bp->b_vp, off, bp->b_dirtyend-bp->b_dirtyoff, 1370 bp->b_wcred, td); 1371 bp->b_flags &= ~B_WRITEINPROG; 1372 if (retv == 0) { 1373 bp->b_dirtyoff = bp->b_dirtyend = 0; 1374 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1375 bp->b_resid = 0; 1376 bufdone(bp); 1377 return (0); 1378 } 1379 if (retv == NFSERR_STALEWRITEVERF) { 1380 nfs_clearcommit(bp->b_vp->v_mount); 1381 } 1382 } 1383 1384 /* 1385 * Setup for actual write 1386 */ 1387 1388 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size) 1389 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE; 1390 1391 if (bp->b_dirtyend > bp->b_dirtyoff) { 1392 io.iov_len = uiop->uio_resid = bp->b_dirtyend 1393 - bp->b_dirtyoff; 1394 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE 1395 + bp->b_dirtyoff; 1396 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 1397 uiop->uio_rw = UIO_WRITE; 1398 nfsstats.write_bios++; 1399 1400 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC) 1401 iomode = NFSV3WRITE_UNSTABLE; 1402 else 1403 iomode = NFSV3WRITE_FILESYNC; 1404 1405 bp->b_flags |= B_WRITEINPROG; 1406 error = nfs_writerpc(vp, uiop, cr, &iomode, &must_commit); 1407 1408 /* 1409 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try 1410 * to cluster the buffers needing commit. This will allow 1411 * the system to submit a single commit rpc for the whole 1412 * cluster. We can do this even if the buffer is not 100% 1413 * dirty (relative to the NFS blocksize), so we optimize the 1414 * append-to-file-case. 1415 * 1416 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be 1417 * cleared because write clustering only works for commit 1418 * rpc's, not for the data portion of the write). 1419 */ 1420 1421 if (!error && iomode == NFSV3WRITE_UNSTABLE) { 1422 bp->b_flags |= B_NEEDCOMMIT; 1423 if (bp->b_dirtyoff == 0 1424 && bp->b_dirtyend == bp->b_bcount) 1425 bp->b_flags |= B_CLUSTEROK; 1426 } else { 1427 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1428 } 1429 bp->b_flags &= ~B_WRITEINPROG; 1430 1431 /* 1432 * For an interrupted write, the buffer is still valid 1433 * and the write hasn't been pushed to the server yet, 1434 * so we can't set BIO_ERROR and report the interruption 1435 * by setting B_EINTR. For the B_ASYNC case, B_EINTR 1436 * is not relevant, so the rpc attempt is essentially 1437 * a noop. For the case of a V3 write rpc not being 1438 * committed to stable storage, the block is still 1439 * dirty and requires either a commit rpc or another 1440 * write rpc with iomode == NFSV3WRITE_FILESYNC before 1441 * the block is reused. This is indicated by setting 1442 * the B_DELWRI and B_NEEDCOMMIT flags. 1443 * 1444 * If the buffer is marked B_PAGING, it does not reside on 1445 * the vp's paging queues so we cannot call bdirty(). The 1446 * bp in this case is not an NFS cache block so we should 1447 * be safe. XXX 1448 */ 1449 if (error == EINTR 1450 || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 1451 int s; 1452 1453 s = splbio(); 1454 bp->b_flags &= ~(B_INVAL|B_NOCACHE); 1455 if ((bp->b_flags & B_PAGING) == 0) { 1456 bdirty(bp); 1457 bp->b_flags &= ~B_DONE; 1458 } 1459 if (error && (bp->b_flags & B_ASYNC) == 0) 1460 bp->b_flags |= B_EINTR; 1461 splx(s); 1462 } else { 1463 if (error) { 1464 bp->b_ioflags |= BIO_ERROR; 1465 bp->b_error = np->n_error = error; 1466 np->n_flag |= NWRITEERR; 1467 } 1468 bp->b_dirtyoff = bp->b_dirtyend = 0; 1469 } 1470 } else { 1471 bp->b_resid = 0; 1472 bufdone(bp); 1473 return (0); 1474 } 1475 } 1476 bp->b_resid = uiop->uio_resid; 1477 if (must_commit) 1478 nfs_clearcommit(vp->v_mount); 1479 bufdone(bp); 1480 return (error); 1481} 1482