nfs_bio.c revision 157557
1/*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 33 */ 34 35#include <sys/cdefs.h> 36__FBSDID("$FreeBSD: head/sys/nfsclient/nfs_bio.c 157557 2006-04-06 01:20:30Z mohans $"); 37 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/bio.h> 41#include <sys/buf.h> 42#include <sys/kernel.h> 43#include <sys/mount.h> 44#include <sys/proc.h> 45#include <sys/resourcevar.h> 46#include <sys/signalvar.h> 47#include <sys/vmmeter.h> 48#include <sys/vnode.h> 49 50#include <vm/vm.h> 51#include <vm/vm_extern.h> 52#include <vm/vm_page.h> 53#include <vm/vm_object.h> 54#include <vm/vm_pager.h> 55#include <vm/vnode_pager.h> 56 57#include <rpc/rpcclnt.h> 58 59#include <nfs/rpcv2.h> 60#include <nfs/nfsproto.h> 61#include <nfsclient/nfs.h> 62#include <nfsclient/nfsmount.h> 63#include <nfsclient/nfsnode.h> 64 65#include <nfs4client/nfs4.h> 66 67static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, 68 struct thread *td); 69static int nfs_directio_write(struct vnode *vp, struct uio *uiop, 70 struct ucred *cred, int ioflag); 71 72extern int nfs_directio_enable; 73extern int nfs_directio_allow_mmap; 74/* 75 * Vnode op for VM getpages. 76 */ 77int 78nfs_getpages(struct vop_getpages_args *ap) 79{ 80 int i, error, nextoff, size, toff, count, npages; 81 struct uio uio; 82 struct iovec iov; 83 vm_offset_t kva; 84 struct buf *bp; 85 struct vnode *vp; 86 struct thread *td; 87 struct ucred *cred; 88 struct nfsmount *nmp; 89 vm_object_t object; 90 vm_page_t *pages; 91 struct nfsnode *np; 92 93 GIANT_REQUIRED; 94 95 vp = ap->a_vp; 96 np = VTONFS(vp); 97 td = curthread; /* XXX */ 98 cred = curthread->td_ucred; /* XXX */ 99 nmp = VFSTONFS(vp->v_mount); 100 pages = ap->a_m; 101 count = ap->a_count; 102 103 if ((object = vp->v_object) == NULL) { 104 printf("nfs_getpages: called with non-merged cache vnode??\n"); 105 return VM_PAGER_ERROR; 106 } 107 108 if (nfs_directio_enable && !nfs_directio_allow_mmap && 109 (np->n_flag & NNONCACHE) && 110 (vp->v_type == VREG)) { 111 printf("nfs_getpages: called on non-cacheable vnode??\n"); 112 return VM_PAGER_ERROR; 113 } 114 115 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 116 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 117 /* We'll never get here for v4, because we always have fsinfo */ 118 (void)nfs_fsinfo(nmp, vp, cred, td); 119 } 120 121 npages = btoc(count); 122 123 /* 124 * If the requested page is partially valid, just return it and 125 * allow the pager to zero-out the blanks. Partially valid pages 126 * can only occur at the file EOF. 127 */ 128 129 { 130 vm_page_t m = pages[ap->a_reqpage]; 131 132 VM_OBJECT_LOCK(object); 133 vm_page_lock_queues(); 134 if (m->valid != 0) { 135 /* handled by vm_fault now */ 136 /* vm_page_zero_invalid(m, TRUE); */ 137 for (i = 0; i < npages; ++i) { 138 if (i != ap->a_reqpage) 139 vm_page_free(pages[i]); 140 } 141 vm_page_unlock_queues(); 142 VM_OBJECT_UNLOCK(object); 143 return(0); 144 } 145 vm_page_unlock_queues(); 146 VM_OBJECT_UNLOCK(object); 147 } 148 149 /* 150 * We use only the kva address for the buffer, but this is extremely 151 * convienient and fast. 152 */ 153 bp = getpbuf(&nfs_pbuf_freecnt); 154 155 kva = (vm_offset_t) bp->b_data; 156 pmap_qenter(kva, pages, npages); 157 cnt.v_vnodein++; 158 cnt.v_vnodepgsin += npages; 159 160 iov.iov_base = (caddr_t) kva; 161 iov.iov_len = count; 162 uio.uio_iov = &iov; 163 uio.uio_iovcnt = 1; 164 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 165 uio.uio_resid = count; 166 uio.uio_segflg = UIO_SYSSPACE; 167 uio.uio_rw = UIO_READ; 168 uio.uio_td = td; 169 170 error = (nmp->nm_rpcops->nr_readrpc)(vp, &uio, cred); 171 pmap_qremove(kva, npages); 172 173 relpbuf(bp, &nfs_pbuf_freecnt); 174 175 if (error && (uio.uio_resid == count)) { 176 printf("nfs_getpages: error %d\n", error); 177 VM_OBJECT_LOCK(object); 178 vm_page_lock_queues(); 179 for (i = 0; i < npages; ++i) { 180 if (i != ap->a_reqpage) 181 vm_page_free(pages[i]); 182 } 183 vm_page_unlock_queues(); 184 VM_OBJECT_UNLOCK(object); 185 return VM_PAGER_ERROR; 186 } 187 188 /* 189 * Calculate the number of bytes read and validate only that number 190 * of bytes. Note that due to pending writes, size may be 0. This 191 * does not mean that the remaining data is invalid! 192 */ 193 194 size = count - uio.uio_resid; 195 VM_OBJECT_LOCK(object); 196 vm_page_lock_queues(); 197 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 198 vm_page_t m; 199 nextoff = toff + PAGE_SIZE; 200 m = pages[i]; 201 202 if (nextoff <= size) { 203 /* 204 * Read operation filled an entire page 205 */ 206 m->valid = VM_PAGE_BITS_ALL; 207 vm_page_undirty(m); 208 } else if (size > toff) { 209 /* 210 * Read operation filled a partial page. 211 */ 212 m->valid = 0; 213 vm_page_set_validclean(m, 0, size - toff); 214 /* handled by vm_fault now */ 215 /* vm_page_zero_invalid(m, TRUE); */ 216 } else { 217 /* 218 * Read operation was short. If no error occured 219 * we may have hit a zero-fill section. We simply 220 * leave valid set to 0. 221 */ 222 ; 223 } 224 if (i != ap->a_reqpage) { 225 /* 226 * Whether or not to leave the page activated is up in 227 * the air, but we should put the page on a page queue 228 * somewhere (it already is in the object). Result: 229 * It appears that emperical results show that 230 * deactivating pages is best. 231 */ 232 233 /* 234 * Just in case someone was asking for this page we 235 * now tell them that it is ok to use. 236 */ 237 if (!error) { 238 if (m->flags & PG_WANTED) 239 vm_page_activate(m); 240 else 241 vm_page_deactivate(m); 242 vm_page_wakeup(m); 243 } else { 244 vm_page_free(m); 245 } 246 } 247 } 248 vm_page_unlock_queues(); 249 VM_OBJECT_UNLOCK(object); 250 return 0; 251} 252 253/* 254 * Vnode op for VM putpages. 255 */ 256int 257nfs_putpages(struct vop_putpages_args *ap) 258{ 259 struct uio uio; 260 struct iovec iov; 261 vm_offset_t kva; 262 struct buf *bp; 263 int iomode, must_commit, i, error, npages, count; 264 off_t offset; 265 int *rtvals; 266 struct vnode *vp; 267 struct thread *td; 268 struct ucred *cred; 269 struct nfsmount *nmp; 270 struct nfsnode *np; 271 vm_page_t *pages; 272 273 GIANT_REQUIRED; 274 275 vp = ap->a_vp; 276 np = VTONFS(vp); 277 td = curthread; /* XXX */ 278 cred = curthread->td_ucred; /* XXX */ 279 nmp = VFSTONFS(vp->v_mount); 280 pages = ap->a_m; 281 count = ap->a_count; 282 rtvals = ap->a_rtvals; 283 npages = btoc(count); 284 offset = IDX_TO_OFF(pages[0]->pindex); 285 286 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 287 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 288 (void)nfs_fsinfo(nmp, vp, cred, td); 289 } 290 291 if (nfs_directio_enable && !nfs_directio_allow_mmap && 292 (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) 293 printf("nfs_putpages: called on noncache-able vnode??\n"); 294 295 for (i = 0; i < npages; i++) 296 rtvals[i] = VM_PAGER_AGAIN; 297 298 /* 299 * When putting pages, do not extend file past EOF. 300 */ 301 302 if (offset + count > np->n_size) { 303 count = np->n_size - offset; 304 if (count < 0) 305 count = 0; 306 } 307 308 /* 309 * We use only the kva address for the buffer, but this is extremely 310 * convienient and fast. 311 */ 312 bp = getpbuf(&nfs_pbuf_freecnt); 313 314 kva = (vm_offset_t) bp->b_data; 315 pmap_qenter(kva, pages, npages); 316 cnt.v_vnodeout++; 317 cnt.v_vnodepgsout += count; 318 319 iov.iov_base = (caddr_t) kva; 320 iov.iov_len = count; 321 uio.uio_iov = &iov; 322 uio.uio_iovcnt = 1; 323 uio.uio_offset = offset; 324 uio.uio_resid = count; 325 uio.uio_segflg = UIO_SYSSPACE; 326 uio.uio_rw = UIO_WRITE; 327 uio.uio_td = td; 328 329 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0) 330 iomode = NFSV3WRITE_UNSTABLE; 331 else 332 iomode = NFSV3WRITE_FILESYNC; 333 334 error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred, &iomode, &must_commit); 335 336 pmap_qremove(kva, npages); 337 relpbuf(bp, &nfs_pbuf_freecnt); 338 339 if (!error) { 340 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE; 341 for (i = 0; i < nwritten; i++) { 342 rtvals[i] = VM_PAGER_OK; 343 vm_page_undirty(pages[i]); 344 } 345 if (must_commit) { 346 nfs_clearcommit(vp->v_mount); 347 } 348 } 349 return rtvals[0]; 350} 351 352/* 353 * Vnode op for read using bio 354 */ 355int 356nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) 357{ 358 struct nfsnode *np = VTONFS(vp); 359 int biosize, i; 360 struct buf *bp, *rabp; 361 struct vattr vattr; 362 struct thread *td; 363 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 364 daddr_t lbn, rabn; 365 int bcount; 366 int seqcount; 367 int nra, error = 0, n = 0, on = 0; 368 369#ifdef DIAGNOSTIC 370 if (uio->uio_rw != UIO_READ) 371 panic("nfs_read mode"); 372#endif 373 if (uio->uio_resid == 0) 374 return (0); 375 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ 376 return (EINVAL); 377 td = uio->uio_td; 378 379 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 380 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) 381 (void)nfs_fsinfo(nmp, vp, cred, td); 382 if (vp->v_type != VDIR && 383 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) 384 return (EFBIG); 385 386 if (nfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG)) 387 /* No caching/ no readaheads. Just read data into the user buffer */ 388 return nfs_readrpc(vp, uio, cred); 389 390 biosize = vp->v_mount->mnt_stat.f_iosize; 391 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); 392 /* 393 * For nfs, cache consistency can only be maintained approximately. 394 * Although RFC1094 does not specify the criteria, the following is 395 * believed to be compatible with the reference port. 396 * For nfs: 397 * If the file's modify time on the server has changed since the 398 * last read rpc or you have written to the file, 399 * you may have lost data cache consistency with the 400 * server, so flush all of the file's data out of the cache. 401 * Then force a getattr rpc to ensure that you have up to date 402 * attributes. 403 * NB: This implies that cache data can be read when up to 404 * NFS_ATTRTIMEO seconds out of date. If you find that you need current 405 * attributes this could be forced by setting n_attrstamp to 0 before 406 * the VOP_GETATTR() call. 407 */ 408 if (np->n_flag & NMODIFIED) { 409 if (vp->v_type != VREG) { 410 if (vp->v_type != VDIR) 411 panic("nfs: bioread, not dir"); 412 (nmp->nm_rpcops->nr_invaldir)(vp); 413 error = nfs_vinvalbuf(vp, V_SAVE, td, 1); 414 if (error) 415 return (error); 416 } 417 np->n_attrstamp = 0; 418 error = VOP_GETATTR(vp, &vattr, cred, td); 419 if (error) 420 return (error); 421 np->n_mtime = vattr.va_mtime; 422 } else { 423 error = VOP_GETATTR(vp, &vattr, cred, td); 424 if (error) 425 return (error); 426 if ((np->n_flag & NSIZECHANGED) 427 || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) { 428 if (vp->v_type == VDIR) 429 (nmp->nm_rpcops->nr_invaldir)(vp); 430 error = nfs_vinvalbuf(vp, V_SAVE, td, 1); 431 if (error) 432 return (error); 433 np->n_mtime = vattr.va_mtime; 434 np->n_flag &= ~NSIZECHANGED; 435 } 436 } 437 do { 438 switch (vp->v_type) { 439 case VREG: 440 nfsstats.biocache_reads++; 441 lbn = uio->uio_offset / biosize; 442 on = uio->uio_offset & (biosize - 1); 443 444 /* 445 * Start the read ahead(s), as required. 446 * The readahead is kicked off only if sequential access 447 * is detected, based on the readahead hint (ra_expect_lbn). 448 */ 449 if (nmp->nm_readahead > 0 && np->ra_expect_lbn == lbn) { 450 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && 451 (off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) { 452 rabn = lbn + 1 + nra; 453 if (incore(&vp->v_bufobj, rabn) == NULL) { 454 rabp = nfs_getcacheblk(vp, rabn, biosize, td); 455 if (!rabp) { 456 error = nfs_sigintr(nmp, NULL, td); 457 return (error ? error : EINTR); 458 } 459 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 460 rabp->b_flags |= B_ASYNC; 461 rabp->b_iocmd = BIO_READ; 462 vfs_busy_pages(rabp, 0); 463 if (nfs_asyncio(nmp, rabp, cred, td)) { 464 rabp->b_flags |= B_INVAL; 465 rabp->b_ioflags |= BIO_ERROR; 466 vfs_unbusy_pages(rabp); 467 brelse(rabp); 468 break; 469 } 470 } else { 471 brelse(rabp); 472 } 473 } 474 } 475 np->ra_expect_lbn = lbn + 1; 476 } 477 478 /* Note that bcount is *not* DEV_BSIZE aligned. */ 479 bcount = biosize; 480 if ((off_t)lbn * biosize >= np->n_size) { 481 bcount = 0; 482 } else if ((off_t)(lbn + 1) * biosize > np->n_size) { 483 bcount = np->n_size - (off_t)lbn * biosize; 484 } 485 bp = nfs_getcacheblk(vp, lbn, bcount, td); 486 487 if (!bp) { 488 error = nfs_sigintr(nmp, NULL, td); 489 return (error ? error : EINTR); 490 } 491 492 /* 493 * If B_CACHE is not set, we must issue the read. If this 494 * fails, we return an error. 495 */ 496 497 if ((bp->b_flags & B_CACHE) == 0) { 498 bp->b_iocmd = BIO_READ; 499 vfs_busy_pages(bp, 0); 500 error = nfs_doio(vp, bp, cred, td); 501 if (error) { 502 brelse(bp); 503 return (error); 504 } 505 } 506 507 /* 508 * on is the offset into the current bp. Figure out how many 509 * bytes we can copy out of the bp. Note that bcount is 510 * NOT DEV_BSIZE aligned. 511 * 512 * Then figure out how many bytes we can copy into the uio. 513 */ 514 515 n = 0; 516 if (on < bcount) 517 n = min((unsigned)(bcount - on), uio->uio_resid); 518 break; 519 case VLNK: 520 nfsstats.biocache_readlinks++; 521 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td); 522 if (!bp) { 523 error = nfs_sigintr(nmp, NULL, td); 524 return (error ? error : EINTR); 525 } 526 if ((bp->b_flags & B_CACHE) == 0) { 527 bp->b_iocmd = BIO_READ; 528 vfs_busy_pages(bp, 0); 529 error = nfs_doio(vp, bp, cred, td); 530 if (error) { 531 bp->b_ioflags |= BIO_ERROR; 532 brelse(bp); 533 return (error); 534 } 535 } 536 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 537 on = 0; 538 break; 539 case VDIR: 540 nfsstats.biocache_readdirs++; 541 if (np->n_direofoffset 542 && uio->uio_offset >= np->n_direofoffset) { 543 return (0); 544 } 545 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; 546 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); 547 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td); 548 if (!bp) { 549 error = nfs_sigintr(nmp, NULL, td); 550 return (error ? error : EINTR); 551 } 552 if ((bp->b_flags & B_CACHE) == 0) { 553 bp->b_iocmd = BIO_READ; 554 vfs_busy_pages(bp, 0); 555 error = nfs_doio(vp, bp, cred, td); 556 if (error) { 557 brelse(bp); 558 } 559 while (error == NFSERR_BAD_COOKIE) { 560 (nmp->nm_rpcops->nr_invaldir)(vp); 561 error = nfs_vinvalbuf(vp, 0, td, 1); 562 /* 563 * Yuck! The directory has been modified on the 564 * server. The only way to get the block is by 565 * reading from the beginning to get all the 566 * offset cookies. 567 * 568 * Leave the last bp intact unless there is an error. 569 * Loop back up to the while if the error is another 570 * NFSERR_BAD_COOKIE (double yuch!). 571 */ 572 for (i = 0; i <= lbn && !error; i++) { 573 if (np->n_direofoffset 574 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) 575 return (0); 576 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td); 577 if (!bp) { 578 error = nfs_sigintr(nmp, NULL, td); 579 return (error ? error : EINTR); 580 } 581 if ((bp->b_flags & B_CACHE) == 0) { 582 bp->b_iocmd = BIO_READ; 583 vfs_busy_pages(bp, 0); 584 error = nfs_doio(vp, bp, cred, td); 585 /* 586 * no error + B_INVAL == directory EOF, 587 * use the block. 588 */ 589 if (error == 0 && (bp->b_flags & B_INVAL)) 590 break; 591 } 592 /* 593 * An error will throw away the block and the 594 * for loop will break out. If no error and this 595 * is not the block we want, we throw away the 596 * block and go for the next one via the for loop. 597 */ 598 if (error || i < lbn) 599 brelse(bp); 600 } 601 } 602 /* 603 * The above while is repeated if we hit another cookie 604 * error. If we hit an error and it wasn't a cookie error, 605 * we give up. 606 */ 607 if (error) 608 return (error); 609 } 610 611 /* 612 * If not eof and read aheads are enabled, start one. 613 * (You need the current block first, so that you have the 614 * directory offset cookie of the next block.) 615 */ 616 if (nmp->nm_readahead > 0 && 617 (bp->b_flags & B_INVAL) == 0 && 618 (np->n_direofoffset == 0 || 619 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && 620 incore(&vp->v_bufobj, lbn + 1) == NULL) { 621 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); 622 if (rabp) { 623 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 624 rabp->b_flags |= B_ASYNC; 625 rabp->b_iocmd = BIO_READ; 626 vfs_busy_pages(rabp, 0); 627 if (nfs_asyncio(nmp, rabp, cred, td)) { 628 rabp->b_flags |= B_INVAL; 629 rabp->b_ioflags |= BIO_ERROR; 630 vfs_unbusy_pages(rabp); 631 brelse(rabp); 632 } 633 } else { 634 brelse(rabp); 635 } 636 } 637 } 638 /* 639 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is 640 * chopped for the EOF condition, we cannot tell how large 641 * NFS directories are going to be until we hit EOF. So 642 * an NFS directory buffer is *not* chopped to its EOF. Now, 643 * it just so happens that b_resid will effectively chop it 644 * to EOF. *BUT* this information is lost if the buffer goes 645 * away and is reconstituted into a B_CACHE state ( due to 646 * being VMIO ) later. So we keep track of the directory eof 647 * in np->n_direofoffset and chop it off as an extra step 648 * right here. 649 */ 650 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); 651 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) 652 n = np->n_direofoffset - uio->uio_offset; 653 break; 654 default: 655 printf(" nfs_bioread: type %x unexpected\n", vp->v_type); 656 bp = NULL; 657 break; 658 }; 659 660 if (n > 0) { 661 error = uiomove(bp->b_data + on, (int)n, uio); 662 } 663 if (vp->v_type == VLNK) 664 n = 0; 665 if (bp != NULL) 666 brelse(bp); 667 } while (error == 0 && uio->uio_resid > 0 && n > 0); 668 return (error); 669} 670 671/* 672 * The NFS write path cannot handle iovecs with len > 1. So we need to 673 * break up iovecs accordingly (restricting them to wsize). 674 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf). 675 * For the ASYNC case, 2 copies are needed. The first a copy from the 676 * user buffer to a staging buffer and then a second copy from the staging 677 * buffer to mbufs. This can be optimized by copying from the user buffer 678 * directly into mbufs and passing the chain down, but that requires a 679 * fair amount of re-working of the relevant codepaths (and can be done 680 * later). 681 */ 682static int 683nfs_directio_write(vp, uiop, cred, ioflag) 684 struct vnode *vp; 685 struct uio *uiop; 686 struct ucred *cred; 687 int ioflag; 688{ 689 int error; 690 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 691 struct thread *td = uiop->uio_td; 692 int size; 693 694 if (ioflag & IO_SYNC) { 695 int iomode, must_commit; 696 struct uio uio; 697 struct iovec iov; 698do_sync: 699 while (uiop->uio_resid > 0) { 700 size = min(uiop->uio_resid, nmp->nm_wsize); 701 size = min(uiop->uio_iov->iov_len, size); 702 iov.iov_base = uiop->uio_iov->iov_base; 703 iov.iov_len = size; 704 uio.uio_iov = &iov; 705 uio.uio_iovcnt = 1; 706 uio.uio_offset = uiop->uio_offset; 707 uio.uio_resid = size; 708 uio.uio_segflg = UIO_USERSPACE; 709 uio.uio_rw = UIO_WRITE; 710 uio.uio_td = td; 711 iomode = NFSV3WRITE_FILESYNC; 712 error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred, 713 &iomode, &must_commit); 714 KASSERT((must_commit == 0), 715 ("nfs_directio_write: Did not commit write")); 716 if (error) 717 return (error); 718 uiop->uio_offset += size; 719 uiop->uio_resid -= size; 720 if (uiop->uio_iov->iov_len <= size) { 721 uiop->uio_iovcnt--; 722 uiop->uio_iov++; 723 } else { 724 uiop->uio_iov->iov_base = 725 (char *)uiop->uio_iov->iov_base + size; 726 uiop->uio_iov->iov_len -= size; 727 } 728 } 729 } else { 730 struct uio *t_uio; 731 struct iovec *t_iov; 732 struct buf *bp; 733 734 /* 735 * Break up the write into blocksize chunks and hand these 736 * over to nfsiod's for write back. 737 * Unfortunately, this incurs a copy of the data. Since 738 * the user could modify the buffer before the write is 739 * initiated. 740 * 741 * The obvious optimization here is that one of the 2 copies 742 * in the async write path can be eliminated by copying the 743 * data here directly into mbufs and passing the mbuf chain 744 * down. But that will require a fair amount of re-working 745 * of the code and can be done if there's enough interest 746 * in NFS directio access. 747 */ 748 while (uiop->uio_resid > 0) { 749 size = min(uiop->uio_resid, nmp->nm_wsize); 750 size = min(uiop->uio_iov->iov_len, size); 751 bp = getpbuf(&nfs_pbuf_freecnt); 752 t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK); 753 t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK); 754 t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK); 755 t_iov->iov_len = size; 756 t_uio->uio_iov = t_iov; 757 t_uio->uio_iovcnt = 1; 758 t_uio->uio_offset = uiop->uio_offset; 759 t_uio->uio_resid = size; 760 t_uio->uio_segflg = UIO_SYSSPACE; 761 t_uio->uio_rw = UIO_WRITE; 762 t_uio->uio_td = td; 763 bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size); 764 bp->b_flags |= B_DIRECT; 765 bp->b_iocmd = BIO_WRITE; 766 if (cred != NOCRED) { 767 crhold(cred); 768 bp->b_wcred = cred; 769 } else 770 bp->b_wcred = NOCRED; 771 bp->b_caller1 = (void *)t_uio; 772 bp->b_vp = vp; 773 vhold(vp); 774 error = nfs_asyncio(nmp, bp, NOCRED, td); 775 if (error) { 776 free(t_iov->iov_base, M_NFSDIRECTIO); 777 free(t_iov, M_NFSDIRECTIO); 778 free(t_uio, M_NFSDIRECTIO); 779 vdrop(bp->b_vp); 780 bp->b_vp = NULL; 781 relpbuf(bp, &nfs_pbuf_freecnt); 782 if (error == EINTR) 783 return (error); 784 goto do_sync; 785 } 786 uiop->uio_offset += size; 787 uiop->uio_resid -= size; 788 if (uiop->uio_iov->iov_len <= size) { 789 uiop->uio_iovcnt--; 790 uiop->uio_iov++; 791 } else { 792 uiop->uio_iov->iov_base = 793 (char *)uiop->uio_iov->iov_base + size; 794 uiop->uio_iov->iov_len -= size; 795 } 796 } 797 } 798 return (0); 799} 800 801/* 802 * Vnode op for write using bio 803 */ 804int 805nfs_write(struct vop_write_args *ap) 806{ 807 int biosize; 808 struct uio *uio = ap->a_uio; 809 struct thread *td = uio->uio_td; 810 struct vnode *vp = ap->a_vp; 811 struct nfsnode *np = VTONFS(vp); 812 struct ucred *cred = ap->a_cred; 813 int ioflag = ap->a_ioflag; 814 struct buf *bp; 815 struct vattr vattr; 816 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 817 daddr_t lbn; 818 int bcount; 819 int n, on, error = 0; 820 struct proc *p = td?td->td_proc:NULL; 821 822 GIANT_REQUIRED; 823 824#ifdef DIAGNOSTIC 825 if (uio->uio_rw != UIO_WRITE) 826 panic("nfs_write mode"); 827 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread) 828 panic("nfs_write proc"); 829#endif 830 if (vp->v_type != VREG) 831 return (EIO); 832 if (np->n_flag & NWRITEERR) { 833 np->n_flag &= ~NWRITEERR; 834 return (np->n_error); 835 } 836 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 837 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) 838 (void)nfs_fsinfo(nmp, vp, cred, td); 839 840 /* 841 * Synchronously flush pending buffers if we are in synchronous 842 * mode or if we are appending. 843 */ 844 if (ioflag & (IO_APPEND | IO_SYNC)) { 845 if (np->n_flag & NMODIFIED) { 846#ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */ 847 /* 848 * Require non-blocking, synchronous writes to 849 * dirty files to inform the program it needs 850 * to fsync(2) explicitly. 851 */ 852 if (ioflag & IO_NDELAY) 853 return (EAGAIN); 854#endif 855flush_and_restart: 856 np->n_attrstamp = 0; 857 error = nfs_vinvalbuf(vp, V_SAVE, td, 1); 858 if (error) 859 return (error); 860 } 861 } 862 863 /* 864 * If IO_APPEND then load uio_offset. We restart here if we cannot 865 * get the append lock. 866 */ 867 if (ioflag & IO_APPEND) { 868 np->n_attrstamp = 0; 869 error = VOP_GETATTR(vp, &vattr, cred, td); 870 if (error) 871 return (error); 872 uio->uio_offset = np->n_size; 873 } 874 875 if (uio->uio_offset < 0) 876 return (EINVAL); 877 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) 878 return (EFBIG); 879 if (uio->uio_resid == 0) 880 return (0); 881 882 if (nfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG) 883 return nfs_directio_write(vp, uio, cred, ioflag); 884 885 /* 886 * Maybe this should be above the vnode op call, but so long as 887 * file servers have no limits, i don't think it matters 888 */ 889 if (p != NULL) { 890 PROC_LOCK(p); 891 if (uio->uio_offset + uio->uio_resid > 892 lim_cur(p, RLIMIT_FSIZE)) { 893 psignal(p, SIGXFSZ); 894 PROC_UNLOCK(p); 895 return (EFBIG); 896 } 897 PROC_UNLOCK(p); 898 } 899 900 biosize = vp->v_mount->mnt_stat.f_iosize; 901 /* 902 * Find all of this file's B_NEEDCOMMIT buffers. If our writes 903 * would exceed the local maximum per-file write commit size when 904 * combined with those, we must decide whether to flush, 905 * go synchronous, or return error. We don't bother checking 906 * IO_UNIT -- we just make all writes atomic anyway, as there's 907 * no point optimizing for something that really won't ever happen. 908 */ 909 if (!(ioflag & IO_SYNC)) { 910 int needrestart = 0; 911 if (nmp->nm_wcommitsize < uio->uio_resid) { 912 /* 913 * If this request could not possibly be completed 914 * without exceeding the maximum outstanding write 915 * commit size, see if we can convert it into a 916 * synchronous write operation. 917 */ 918 if (ioflag & IO_NDELAY) 919 return (EAGAIN); 920 ioflag |= IO_SYNC; 921 if (np->n_flag & NMODIFIED) 922 needrestart = 1; 923 } else if (np->n_flag & NMODIFIED) { 924 int wouldcommit = 0; 925 BO_LOCK(&vp->v_bufobj); 926 if (vp->v_bufobj.bo_dirty.bv_cnt != 0) { 927 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, 928 b_bobufs) { 929 if (bp->b_flags & B_NEEDCOMMIT) 930 wouldcommit += bp->b_bcount; 931 } 932 } 933 BO_UNLOCK(&vp->v_bufobj); 934 /* 935 * Since we're not operating synchronously and 936 * bypassing the buffer cache, we are in a commit 937 * and holding all of these buffers whether 938 * transmitted or not. If not limited, this 939 * will lead to the buffer cache deadlocking, 940 * as no one else can flush our uncommitted buffers. 941 */ 942 wouldcommit += uio->uio_resid; 943 /* 944 * If we would initially exceed the maximum 945 * outstanding write commit size, flush and restart. 946 */ 947 if (wouldcommit > nmp->nm_wcommitsize) 948 needrestart = 1; 949 } 950 if (needrestart) 951 goto flush_and_restart; 952 } 953 954 do { 955 nfsstats.biocache_writes++; 956 lbn = uio->uio_offset / biosize; 957 on = uio->uio_offset & (biosize-1); 958 n = min((unsigned)(biosize - on), uio->uio_resid); 959again: 960 /* 961 * Handle direct append and file extension cases, calculate 962 * unaligned buffer size. 963 */ 964 965 if (uio->uio_offset == np->n_size && n) { 966 /* 967 * Get the buffer (in its pre-append state to maintain 968 * B_CACHE if it was previously set). Resize the 969 * nfsnode after we have locked the buffer to prevent 970 * readers from reading garbage. 971 */ 972 bcount = on; 973 bp = nfs_getcacheblk(vp, lbn, bcount, td); 974 975 if (bp != NULL) { 976 long save; 977 978 np->n_size = uio->uio_offset + n; 979 np->n_flag |= NMODIFIED; 980 vnode_pager_setsize(vp, np->n_size); 981 982 save = bp->b_flags & B_CACHE; 983 bcount += n; 984 allocbuf(bp, bcount); 985 bp->b_flags |= save; 986 } 987 } else { 988 /* 989 * Obtain the locked cache block first, and then 990 * adjust the file's size as appropriate. 991 */ 992 bcount = on + n; 993 if ((off_t)lbn * biosize + bcount < np->n_size) { 994 if ((off_t)(lbn + 1) * biosize < np->n_size) 995 bcount = biosize; 996 else 997 bcount = np->n_size - (off_t)lbn * biosize; 998 } 999 bp = nfs_getcacheblk(vp, lbn, bcount, td); 1000 if (uio->uio_offset + n > np->n_size) { 1001 np->n_size = uio->uio_offset + n; 1002 np->n_flag |= NMODIFIED; 1003 vnode_pager_setsize(vp, np->n_size); 1004 } 1005 } 1006 1007 if (!bp) { 1008 error = nfs_sigintr(nmp, NULL, td); 1009 if (!error) 1010 error = EINTR; 1011 break; 1012 } 1013 1014 /* 1015 * Issue a READ if B_CACHE is not set. In special-append 1016 * mode, B_CACHE is based on the buffer prior to the write 1017 * op and is typically set, avoiding the read. If a read 1018 * is required in special append mode, the server will 1019 * probably send us a short-read since we extended the file 1020 * on our end, resulting in b_resid == 0 and, thusly, 1021 * B_CACHE getting set. 1022 * 1023 * We can also avoid issuing the read if the write covers 1024 * the entire buffer. We have to make sure the buffer state 1025 * is reasonable in this case since we will not be initiating 1026 * I/O. See the comments in kern/vfs_bio.c's getblk() for 1027 * more information. 1028 * 1029 * B_CACHE may also be set due to the buffer being cached 1030 * normally. 1031 */ 1032 1033 if (on == 0 && n == bcount) { 1034 bp->b_flags |= B_CACHE; 1035 bp->b_flags &= ~B_INVAL; 1036 bp->b_ioflags &= ~BIO_ERROR; 1037 } 1038 1039 if ((bp->b_flags & B_CACHE) == 0) { 1040 bp->b_iocmd = BIO_READ; 1041 vfs_busy_pages(bp, 0); 1042 error = nfs_doio(vp, bp, cred, td); 1043 if (error) { 1044 brelse(bp); 1045 break; 1046 } 1047 } 1048 if (bp->b_wcred == NOCRED) 1049 bp->b_wcred = crhold(cred); 1050 np->n_flag |= NMODIFIED; 1051 1052 /* 1053 * If dirtyend exceeds file size, chop it down. This should 1054 * not normally occur but there is an append race where it 1055 * might occur XXX, so we log it. 1056 * 1057 * If the chopping creates a reverse-indexed or degenerate 1058 * situation with dirtyoff/end, we 0 both of them. 1059 */ 1060 1061 if (bp->b_dirtyend > bcount) { 1062 printf("NFS append race @%lx:%d\n", 1063 (long)bp->b_blkno * DEV_BSIZE, 1064 bp->b_dirtyend - bcount); 1065 bp->b_dirtyend = bcount; 1066 } 1067 1068 if (bp->b_dirtyoff >= bp->b_dirtyend) 1069 bp->b_dirtyoff = bp->b_dirtyend = 0; 1070 1071 /* 1072 * If the new write will leave a contiguous dirty 1073 * area, just update the b_dirtyoff and b_dirtyend, 1074 * otherwise force a write rpc of the old dirty area. 1075 * 1076 * While it is possible to merge discontiguous writes due to 1077 * our having a B_CACHE buffer ( and thus valid read data 1078 * for the hole), we don't because it could lead to 1079 * significant cache coherency problems with multiple clients, 1080 * especially if locking is implemented later on. 1081 * 1082 * as an optimization we could theoretically maintain 1083 * a linked list of discontinuous areas, but we would still 1084 * have to commit them separately so there isn't much 1085 * advantage to it except perhaps a bit of asynchronization. 1086 */ 1087 1088 if (bp->b_dirtyend > 0 && 1089 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 1090 if (bwrite(bp) == EINTR) { 1091 error = EINTR; 1092 break; 1093 } 1094 goto again; 1095 } 1096 1097 error = uiomove((char *)bp->b_data + on, n, uio); 1098 1099 /* 1100 * Since this block is being modified, it must be written 1101 * again and not just committed. Since write clustering does 1102 * not work for the stage 1 data write, only the stage 2 1103 * commit rpc, we have to clear B_CLUSTEROK as well. 1104 */ 1105 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1106 1107 if (error) { 1108 bp->b_ioflags |= BIO_ERROR; 1109 brelse(bp); 1110 break; 1111 } 1112 1113 /* 1114 * Only update dirtyoff/dirtyend if not a degenerate 1115 * condition. 1116 */ 1117 if (n) { 1118 if (bp->b_dirtyend > 0) { 1119 bp->b_dirtyoff = min(on, bp->b_dirtyoff); 1120 bp->b_dirtyend = max((on + n), bp->b_dirtyend); 1121 } else { 1122 bp->b_dirtyoff = on; 1123 bp->b_dirtyend = on + n; 1124 } 1125 vfs_bio_set_validclean(bp, on, n); 1126 } 1127 1128 /* 1129 * If IO_SYNC do bwrite(). 1130 * 1131 * IO_INVAL appears to be unused. The idea appears to be 1132 * to turn off caching in this case. Very odd. XXX 1133 */ 1134 if ((ioflag & IO_SYNC)) { 1135 if (ioflag & IO_INVAL) 1136 bp->b_flags |= B_NOCACHE; 1137 error = bwrite(bp); 1138 if (error) 1139 break; 1140 } else if ((n + on) == biosize) { 1141 bp->b_flags |= B_ASYNC; 1142 (void) (nmp->nm_rpcops->nr_writebp)(bp, 0, 0); 1143 } else { 1144 bdwrite(bp); 1145 } 1146 } while (uio->uio_resid > 0 && n > 0); 1147 1148 return (error); 1149} 1150 1151/* 1152 * Get an nfs cache block. 1153 * 1154 * Allocate a new one if the block isn't currently in the cache 1155 * and return the block marked busy. If the calling process is 1156 * interrupted by a signal for an interruptible mount point, return 1157 * NULL. 1158 * 1159 * The caller must carefully deal with the possible B_INVAL state of 1160 * the buffer. nfs_doio() clears B_INVAL (and nfs_asyncio() clears it 1161 * indirectly), so synchronous reads can be issued without worrying about 1162 * the B_INVAL state. We have to be a little more careful when dealing 1163 * with writes (see comments in nfs_write()) when extending a file past 1164 * its EOF. 1165 */ 1166static struct buf * 1167nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td) 1168{ 1169 struct buf *bp; 1170 struct mount *mp; 1171 struct nfsmount *nmp; 1172 1173 mp = vp->v_mount; 1174 nmp = VFSTONFS(mp); 1175 1176 if (nmp->nm_flag & NFSMNT_INT) { 1177 sigset_t oldset; 1178 1179 nfs_set_sigmask(td, &oldset); 1180 bp = getblk(vp, bn, size, PCATCH, 0, 0); 1181 nfs_restore_sigmask(td, &oldset); 1182 while (bp == NULL) { 1183 if (nfs_sigintr(nmp, NULL, td)) 1184 return (NULL); 1185 bp = getblk(vp, bn, size, 0, 2 * hz, 0); 1186 } 1187 } else { 1188 bp = getblk(vp, bn, size, 0, 0, 0); 1189 } 1190 1191 if (vp->v_type == VREG) { 1192 int biosize; 1193 1194 biosize = mp->mnt_stat.f_iosize; 1195 bp->b_blkno = bn * (biosize / DEV_BSIZE); 1196 } 1197 return (bp); 1198} 1199 1200/* 1201 * Flush and invalidate all dirty buffers. If another process is already 1202 * doing the flush, just wait for completion. 1203 */ 1204int 1205nfs_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg) 1206{ 1207 struct nfsnode *np = VTONFS(vp); 1208 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1209 int error = 0, slpflag, slptimeo; 1210 int old_lock = 0; 1211 1212 ASSERT_VOP_LOCKED(vp, "nfs_vinvalbuf"); 1213 1214 /* 1215 * XXX This check stops us from needlessly doing a vinvalbuf when 1216 * being called through vclean(). It is not clear that this is 1217 * unsafe. 1218 */ 1219 if (vp->v_iflag & VI_DOOMED) 1220 return (0); 1221 1222 if ((nmp->nm_flag & NFSMNT_INT) == 0) 1223 intrflg = 0; 1224 if (intrflg) { 1225 slpflag = PCATCH; 1226 slptimeo = 2 * hz; 1227 } else { 1228 slpflag = 0; 1229 slptimeo = 0; 1230 } 1231 1232 if ((old_lock = VOP_ISLOCKED(vp, td)) != LK_EXCLUSIVE) { 1233 if (old_lock == LK_SHARED) { 1234 /* Upgrade to exclusive lock, this might block */ 1235 vn_lock(vp, LK_UPGRADE | LK_RETRY, td); 1236 } else { 1237 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1238 } 1239 } 1240 1241 /* 1242 * Now, flush as required. 1243 */ 1244 error = vinvalbuf(vp, flags, td, slpflag, 0); 1245 while (error) { 1246 if (intrflg && (error = nfs_sigintr(nmp, NULL, td))) 1247 goto out; 1248 error = vinvalbuf(vp, flags, td, 0, slptimeo); 1249 } 1250 if (np->n_directio_asyncwr == 0) 1251 np->n_flag &= ~NMODIFIED; 1252out: 1253 if (old_lock != LK_EXCLUSIVE) { 1254 if (old_lock == LK_SHARED) { 1255 /* Downgrade from exclusive lock, this might block */ 1256 vn_lock(vp, LK_DOWNGRADE, td); 1257 } else { 1258 VOP_UNLOCK(vp, 0, td); 1259 } 1260 } 1261 return error; 1262} 1263 1264/* 1265 * Initiate asynchronous I/O. Return an error if no nfsiods are available. 1266 * This is mainly to avoid queueing async I/O requests when the nfsiods 1267 * are all hung on a dead server. 1268 * 1269 * Note: nfs_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp 1270 * is eventually dequeued by the async daemon, nfs_doio() *will*. 1271 */ 1272int 1273nfs_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td) 1274{ 1275 int iod; 1276 int gotiod; 1277 int slpflag = 0; 1278 int slptimeo = 0; 1279 int error, error2; 1280 1281 /* 1282 * Commits are usually short and sweet so lets save some cpu and 1283 * leave the async daemons for more important rpc's (such as reads 1284 * and writes). 1285 */ 1286 if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) && 1287 (nmp->nm_bufqiods > nfs_numasync / 2)) { 1288 return(EIO); 1289 } 1290 1291again: 1292 if (nmp->nm_flag & NFSMNT_INT) 1293 slpflag = PCATCH; 1294 gotiod = FALSE; 1295 1296 /* 1297 * Find a free iod to process this request. 1298 */ 1299 for (iod = 0; iod < nfs_numasync; iod++) 1300 if (nfs_iodwant[iod]) { 1301 gotiod = TRUE; 1302 break; 1303 } 1304 1305 /* 1306 * Try to create one if none are free. 1307 */ 1308 if (!gotiod) { 1309 iod = nfs_nfsiodnew(); 1310 if (iod != -1) 1311 gotiod = TRUE; 1312 } 1313 1314 if (gotiod) { 1315 /* 1316 * Found one, so wake it up and tell it which 1317 * mount to process. 1318 */ 1319 NFS_DPF(ASYNCIO, ("nfs_asyncio: waking iod %d for mount %p\n", 1320 iod, nmp)); 1321 nfs_iodwant[iod] = NULL; 1322 nfs_iodmount[iod] = nmp; 1323 nmp->nm_bufqiods++; 1324 wakeup(&nfs_iodwant[iod]); 1325 } 1326 1327 /* 1328 * If none are free, we may already have an iod working on this mount 1329 * point. If so, it will process our request. 1330 */ 1331 if (!gotiod) { 1332 if (nmp->nm_bufqiods > 0) { 1333 NFS_DPF(ASYNCIO, 1334 ("nfs_asyncio: %d iods are already processing mount %p\n", 1335 nmp->nm_bufqiods, nmp)); 1336 gotiod = TRUE; 1337 } 1338 } 1339 1340 /* 1341 * If we have an iod which can process the request, then queue 1342 * the buffer. 1343 */ 1344 if (gotiod) { 1345 /* 1346 * Ensure that the queue never grows too large. We still want 1347 * to asynchronize so we block rather then return EIO. 1348 */ 1349 while (nmp->nm_bufqlen >= 2*nfs_numasync) { 1350 NFS_DPF(ASYNCIO, 1351 ("nfs_asyncio: waiting for mount %p queue to drain\n", nmp)); 1352 nmp->nm_bufqwant = TRUE; 1353 error = nfs_tsleep(td, &nmp->nm_bufq, slpflag | PRIBIO, 1354 "nfsaio", slptimeo); 1355 if (error) { 1356 error2 = nfs_sigintr(nmp, NULL, td); 1357 if (error2) 1358 return (error2); 1359 if (slpflag == PCATCH) { 1360 slpflag = 0; 1361 slptimeo = 2 * hz; 1362 } 1363 } 1364 /* 1365 * We might have lost our iod while sleeping, 1366 * so check and loop if nescessary. 1367 */ 1368 if (nmp->nm_bufqiods == 0) { 1369 NFS_DPF(ASYNCIO, 1370 ("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); 1371 goto again; 1372 } 1373 } 1374 1375 if (bp->b_iocmd == BIO_READ) { 1376 if (bp->b_rcred == NOCRED && cred != NOCRED) 1377 bp->b_rcred = crhold(cred); 1378 } else { 1379 if (bp->b_wcred == NOCRED && cred != NOCRED) 1380 bp->b_wcred = crhold(cred); 1381 } 1382 1383 if (bp->b_flags & B_REMFREE) 1384 bremfreef(bp); 1385 BUF_KERNPROC(bp); 1386 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); 1387 nmp->nm_bufqlen++; 1388 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) 1389 VTONFS(bp->b_vp)->n_directio_asyncwr++; 1390 return (0); 1391 } 1392 1393 /* 1394 * All the iods are busy on other mounts, so return EIO to 1395 * force the caller to process the i/o synchronously. 1396 */ 1397 NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n")); 1398 return (EIO); 1399} 1400 1401void 1402nfs_doio_directwrite(struct buf *bp) 1403{ 1404 int iomode, must_commit; 1405 struct uio *uiop = (struct uio *)bp->b_caller1; 1406 char *iov_base = uiop->uio_iov->iov_base; 1407 struct nfsmount *nmp = VFSTONFS(bp->b_vp->v_mount); 1408 1409 iomode = NFSV3WRITE_FILESYNC; 1410 uiop->uio_td = NULL; /* NULL since we're in nfsiod */ 1411 (nmp->nm_rpcops->nr_writerpc)(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit); 1412 KASSERT((must_commit == 0), ("nfs_doio_directwrite: Did not commit write")); 1413 free(iov_base, M_NFSDIRECTIO); 1414 free(uiop->uio_iov, M_NFSDIRECTIO); 1415 free(uiop, M_NFSDIRECTIO); 1416 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { 1417 struct nfsnode *np = VTONFS(bp->b_vp); 1418 1419 np->n_directio_asyncwr--; 1420 if ((np->n_flag & NFSYNCWAIT) && np->n_directio_asyncwr == 0) { 1421 np->n_flag &= ~NFSYNCWAIT; 1422 wakeup((caddr_t)&np->n_directio_asyncwr); 1423 } 1424 } 1425 vdrop(bp->b_vp); 1426 bp->b_vp = NULL; 1427 relpbuf(bp, &nfs_pbuf_freecnt); 1428} 1429 1430/* 1431 * Do an I/O operation to/from a cache block. This may be called 1432 * synchronously or from an nfsiod. 1433 */ 1434int 1435nfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td) 1436{ 1437 struct uio *uiop; 1438 struct nfsnode *np; 1439 struct nfsmount *nmp; 1440 int error = 0, iomode, must_commit = 0; 1441 struct uio uio; 1442 struct iovec io; 1443 struct proc *p = td ? td->td_proc : NULL; 1444 1445 np = VTONFS(vp); 1446 nmp = VFSTONFS(vp->v_mount); 1447 uiop = &uio; 1448 uiop->uio_iov = &io; 1449 uiop->uio_iovcnt = 1; 1450 uiop->uio_segflg = UIO_SYSSPACE; 1451 uiop->uio_td = td; 1452 1453 /* 1454 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We 1455 * do this here so we do not have to do it in all the code that 1456 * calls us. 1457 */ 1458 bp->b_flags &= ~B_INVAL; 1459 bp->b_ioflags &= ~BIO_ERROR; 1460 1461 KASSERT(!(bp->b_flags & B_DONE), ("nfs_doio: bp %p already marked done", bp)); 1462 1463 if (bp->b_iocmd == BIO_READ) { 1464 io.iov_len = uiop->uio_resid = bp->b_bcount; 1465 io.iov_base = bp->b_data; 1466 uiop->uio_rw = UIO_READ; 1467 1468 switch (vp->v_type) { 1469 case VREG: 1470 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 1471 nfsstats.read_bios++; 1472 error = (nmp->nm_rpcops->nr_readrpc)(vp, uiop, cr); 1473 1474 if (!error) { 1475 if (uiop->uio_resid) { 1476 /* 1477 * If we had a short read with no error, we must have 1478 * hit a file hole. We should zero-fill the remainder. 1479 * This can also occur if the server hits the file EOF. 1480 * 1481 * Holes used to be able to occur due to pending 1482 * writes, but that is not possible any longer. 1483 */ 1484 int nread = bp->b_bcount - uiop->uio_resid; 1485 int left = uiop->uio_resid; 1486 1487 if (left > 0) 1488 bzero((char *)bp->b_data + nread, left); 1489 uiop->uio_resid = 0; 1490 } 1491 } 1492 /* ASSERT_VOP_LOCKED(vp, "nfs_doio"); */ 1493 if (p && (vp->v_vflag & VV_TEXT) && 1494 (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.va_mtime))) { 1495 PROC_LOCK(p); 1496 killproc(p, "text file modification"); 1497 PROC_UNLOCK(p); 1498 } 1499 break; 1500 case VLNK: 1501 uiop->uio_offset = (off_t)0; 1502 nfsstats.readlink_bios++; 1503 error = (nmp->nm_rpcops->nr_readlinkrpc)(vp, uiop, cr); 1504 break; 1505 case VDIR: 1506 nfsstats.readdir_bios++; 1507 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; 1508 if ((nmp->nm_flag & NFSMNT_NFSV4) != 0) 1509 error = nfs4_readdirrpc(vp, uiop, cr); 1510 else { 1511 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) { 1512 error = nfs_readdirplusrpc(vp, uiop, cr); 1513 if (error == NFSERR_NOTSUPP) 1514 nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 1515 } 1516 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 1517 error = nfs_readdirrpc(vp, uiop, cr); 1518 } 1519 /* 1520 * end-of-directory sets B_INVAL but does not generate an 1521 * error. 1522 */ 1523 if (error == 0 && uiop->uio_resid == bp->b_bcount) 1524 bp->b_flags |= B_INVAL; 1525 break; 1526 default: 1527 printf("nfs_doio: type %x unexpected\n", vp->v_type); 1528 break; 1529 }; 1530 if (error) { 1531 bp->b_ioflags |= BIO_ERROR; 1532 bp->b_error = error; 1533 } 1534 } else { 1535 /* 1536 * If we only need to commit, try to commit 1537 */ 1538 if (bp->b_flags & B_NEEDCOMMIT) { 1539 int retv; 1540 off_t off; 1541 1542 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 1543 retv = (nmp->nm_rpcops->nr_commit)( 1544 vp, off, bp->b_dirtyend-bp->b_dirtyoff, 1545 bp->b_wcred, td); 1546 if (retv == 0) { 1547 bp->b_dirtyoff = bp->b_dirtyend = 0; 1548 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1549 bp->b_resid = 0; 1550 bufdone(bp); 1551 return (0); 1552 } 1553 if (retv == NFSERR_STALEWRITEVERF) { 1554 nfs_clearcommit(vp->v_mount); 1555 } 1556 } 1557 1558 /* 1559 * Setup for actual write 1560 */ 1561 1562 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size) 1563 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE; 1564 1565 if (bp->b_dirtyend > bp->b_dirtyoff) { 1566 io.iov_len = uiop->uio_resid = bp->b_dirtyend 1567 - bp->b_dirtyoff; 1568 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE 1569 + bp->b_dirtyoff; 1570 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 1571 uiop->uio_rw = UIO_WRITE; 1572 nfsstats.write_bios++; 1573 1574 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC) 1575 iomode = NFSV3WRITE_UNSTABLE; 1576 else 1577 iomode = NFSV3WRITE_FILESYNC; 1578 1579 error = (nmp->nm_rpcops->nr_writerpc)(vp, uiop, cr, &iomode, &must_commit); 1580 1581 /* 1582 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try 1583 * to cluster the buffers needing commit. This will allow 1584 * the system to submit a single commit rpc for the whole 1585 * cluster. We can do this even if the buffer is not 100% 1586 * dirty (relative to the NFS blocksize), so we optimize the 1587 * append-to-file-case. 1588 * 1589 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be 1590 * cleared because write clustering only works for commit 1591 * rpc's, not for the data portion of the write). 1592 */ 1593 1594 if (!error && iomode == NFSV3WRITE_UNSTABLE) { 1595 bp->b_flags |= B_NEEDCOMMIT; 1596 if (bp->b_dirtyoff == 0 1597 && bp->b_dirtyend == bp->b_bcount) 1598 bp->b_flags |= B_CLUSTEROK; 1599 } else { 1600 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1601 } 1602 1603 /* 1604 * For an interrupted write, the buffer is still valid 1605 * and the write hasn't been pushed to the server yet, 1606 * so we can't set BIO_ERROR and report the interruption 1607 * by setting B_EINTR. For the B_ASYNC case, B_EINTR 1608 * is not relevant, so the rpc attempt is essentially 1609 * a noop. For the case of a V3 write rpc not being 1610 * committed to stable storage, the block is still 1611 * dirty and requires either a commit rpc or another 1612 * write rpc with iomode == NFSV3WRITE_FILESYNC before 1613 * the block is reused. This is indicated by setting 1614 * the B_DELWRI and B_NEEDCOMMIT flags. 1615 * 1616 * If the buffer is marked B_PAGING, it does not reside on 1617 * the vp's paging queues so we cannot call bdirty(). The 1618 * bp in this case is not an NFS cache block so we should 1619 * be safe. XXX 1620 */ 1621 if (error == EINTR || error == EIO || error == ETIMEDOUT 1622 || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 1623 int s; 1624 1625 s = splbio(); 1626 bp->b_flags &= ~(B_INVAL|B_NOCACHE); 1627 if ((bp->b_flags & B_PAGING) == 0) { 1628 bdirty(bp); 1629 bp->b_flags &= ~B_DONE; 1630 } 1631 if (error && (bp->b_flags & B_ASYNC) == 0) 1632 bp->b_flags |= B_EINTR; 1633 splx(s); 1634 } else { 1635 if (error) { 1636 bp->b_ioflags |= BIO_ERROR; 1637 bp->b_error = np->n_error = error; 1638 np->n_flag |= NWRITEERR; 1639 } 1640 bp->b_dirtyoff = bp->b_dirtyend = 0; 1641 } 1642 } else { 1643 bp->b_resid = 0; 1644 bufdone(bp); 1645 return (0); 1646 } 1647 } 1648 bp->b_resid = uiop->uio_resid; 1649 if (must_commit) 1650 nfs_clearcommit(vp->v_mount); 1651 bufdone(bp); 1652 return (error); 1653} 1654 1655/* 1656 * Used to aid in handling ftruncate() operations on the NFS client side. 1657 * Truncation creates a number of special problems for NFS. We have to 1658 * throw away VM pages and buffer cache buffers that are beyond EOF, and 1659 * we have to properly handle VM pages or (potentially dirty) buffers 1660 * that straddle the truncation point. 1661 */ 1662 1663int 1664nfs_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize) 1665{ 1666 struct nfsnode *np = VTONFS(vp); 1667 u_quad_t tsize = np->n_size; 1668 int biosize = vp->v_mount->mnt_stat.f_iosize; 1669 int error = 0; 1670 1671 np->n_size = nsize; 1672 1673 if (np->n_size < tsize) { 1674 struct buf *bp; 1675 daddr_t lbn; 1676 int bufsize; 1677 1678 /* 1679 * vtruncbuf() doesn't get the buffer overlapping the 1680 * truncation point. We may have a B_DELWRI and/or B_CACHE 1681 * buffer that now needs to be truncated. 1682 */ 1683 error = vtruncbuf(vp, cred, td, nsize, biosize); 1684 lbn = nsize / biosize; 1685 bufsize = nsize & (biosize - 1); 1686 bp = nfs_getcacheblk(vp, lbn, bufsize, td); 1687 if (!bp) 1688 return EINTR; 1689 if (bp->b_dirtyoff > bp->b_bcount) 1690 bp->b_dirtyoff = bp->b_bcount; 1691 if (bp->b_dirtyend > bp->b_bcount) 1692 bp->b_dirtyend = bp->b_bcount; 1693 bp->b_flags |= B_RELBUF; /* don't leave garbage around */ 1694 brelse(bp); 1695 } else { 1696 vnode_pager_setsize(vp, nsize); 1697 } 1698 return(error); 1699} 1700 1701