nfs_clvnops.c revision 276500
1/*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from nfs_vnops.c 8.16 (Berkeley) 5/27/95 33 */ 34 35#include <sys/cdefs.h> 36__FBSDID("$FreeBSD: stable/10/sys/fs/nfsclient/nfs_clvnops.c 276500 2015-01-01 10:44:20Z kib $"); 37 38/* 39 * vnode op calls for Sun NFS version 2, 3 and 4 40 */ 41 42#include "opt_kdtrace.h" 43#include "opt_inet.h" 44 45#include <sys/param.h> 46#include <sys/kernel.h> 47#include <sys/systm.h> 48#include <sys/resourcevar.h> 49#include <sys/proc.h> 50#include <sys/mount.h> 51#include <sys/bio.h> 52#include <sys/buf.h> 53#include <sys/jail.h> 54#include <sys/malloc.h> 55#include <sys/mbuf.h> 56#include <sys/namei.h> 57#include <sys/socket.h> 58#include <sys/vnode.h> 59#include <sys/dirent.h> 60#include <sys/fcntl.h> 61#include <sys/lockf.h> 62#include <sys/stat.h> 63#include <sys/sysctl.h> 64#include <sys/signalvar.h> 65 66#include <vm/vm.h> 67#include <vm/vm_extern.h> 68#include <vm/vm_object.h> 69 70#include <fs/nfs/nfsport.h> 71#include <fs/nfsclient/nfsnode.h> 72#include <fs/nfsclient/nfsmount.h> 73#include <fs/nfsclient/nfs.h> 74#include <fs/nfsclient/nfs_kdtrace.h> 75 76#include <net/if.h> 77#include <netinet/in.h> 78#include <netinet/in_var.h> 79 80#include <nfs/nfs_lock.h> 81 82#ifdef KDTRACE_HOOKS 83#include <sys/dtrace_bsd.h> 84 85dtrace_nfsclient_accesscache_flush_probe_func_t 86 dtrace_nfscl_accesscache_flush_done_probe; 87uint32_t nfscl_accesscache_flush_done_id; 88 89dtrace_nfsclient_accesscache_get_probe_func_t 90 dtrace_nfscl_accesscache_get_hit_probe, 91 dtrace_nfscl_accesscache_get_miss_probe; 92uint32_t nfscl_accesscache_get_hit_id; 93uint32_t nfscl_accesscache_get_miss_id; 94 95dtrace_nfsclient_accesscache_load_probe_func_t 96 dtrace_nfscl_accesscache_load_done_probe; 97uint32_t nfscl_accesscache_load_done_id; 98#endif /* !KDTRACE_HOOKS */ 99 100/* Defs */ 101#define TRUE 1 102#define FALSE 0 103 104extern struct nfsstats newnfsstats; 105extern int nfsrv_useacl; 106extern int nfscl_debuglevel; 107MALLOC_DECLARE(M_NEWNFSREQ); 108 109/* 110 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these 111 * calls are not in getblk() and brelse() so that they would not be necessary 112 * here. 113 */ 114#ifndef B_VMIO 115#define vfs_busy_pages(bp, f) 116#endif 117 118static vop_read_t nfsfifo_read; 119static vop_write_t nfsfifo_write; 120static vop_close_t nfsfifo_close; 121static int nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *, 122 struct thread *); 123static vop_lookup_t nfs_lookup; 124static vop_create_t nfs_create; 125static vop_mknod_t nfs_mknod; 126static vop_open_t nfs_open; 127static vop_pathconf_t nfs_pathconf; 128static vop_close_t nfs_close; 129static vop_access_t nfs_access; 130static vop_getattr_t nfs_getattr; 131static vop_setattr_t nfs_setattr; 132static vop_read_t nfs_read; 133static vop_fsync_t nfs_fsync; 134static vop_remove_t nfs_remove; 135static vop_link_t nfs_link; 136static vop_rename_t nfs_rename; 137static vop_mkdir_t nfs_mkdir; 138static vop_rmdir_t nfs_rmdir; 139static vop_symlink_t nfs_symlink; 140static vop_readdir_t nfs_readdir; 141static vop_strategy_t nfs_strategy; 142static vop_lock1_t nfs_lock1; 143static int nfs_lookitup(struct vnode *, char *, int, 144 struct ucred *, struct thread *, struct nfsnode **); 145static int nfs_sillyrename(struct vnode *, struct vnode *, 146 struct componentname *); 147static vop_access_t nfsspec_access; 148static vop_readlink_t nfs_readlink; 149static vop_print_t nfs_print; 150static vop_advlock_t nfs_advlock; 151static vop_advlockasync_t nfs_advlockasync; 152static vop_getacl_t nfs_getacl; 153static vop_setacl_t nfs_setacl; 154 155/* 156 * Global vfs data structures for nfs 157 */ 158struct vop_vector newnfs_vnodeops = { 159 .vop_default = &default_vnodeops, 160 .vop_access = nfs_access, 161 .vop_advlock = nfs_advlock, 162 .vop_advlockasync = nfs_advlockasync, 163 .vop_close = nfs_close, 164 .vop_create = nfs_create, 165 .vop_fsync = nfs_fsync, 166 .vop_getattr = nfs_getattr, 167 .vop_getpages = ncl_getpages, 168 .vop_putpages = ncl_putpages, 169 .vop_inactive = ncl_inactive, 170 .vop_link = nfs_link, 171 .vop_lock1 = nfs_lock1, 172 .vop_lookup = nfs_lookup, 173 .vop_mkdir = nfs_mkdir, 174 .vop_mknod = nfs_mknod, 175 .vop_open = nfs_open, 176 .vop_pathconf = nfs_pathconf, 177 .vop_print = nfs_print, 178 .vop_read = nfs_read, 179 .vop_readdir = nfs_readdir, 180 .vop_readlink = nfs_readlink, 181 .vop_reclaim = ncl_reclaim, 182 .vop_remove = nfs_remove, 183 .vop_rename = nfs_rename, 184 .vop_rmdir = nfs_rmdir, 185 .vop_setattr = nfs_setattr, 186 .vop_strategy = nfs_strategy, 187 .vop_symlink = nfs_symlink, 188 .vop_write = ncl_write, 189 .vop_getacl = nfs_getacl, 190 .vop_setacl = nfs_setacl, 191}; 192 193struct vop_vector newnfs_fifoops = { 194 .vop_default = &fifo_specops, 195 .vop_access = nfsspec_access, 196 .vop_close = nfsfifo_close, 197 .vop_fsync = nfs_fsync, 198 .vop_getattr = nfs_getattr, 199 .vop_inactive = ncl_inactive, 200 .vop_print = nfs_print, 201 .vop_read = nfsfifo_read, 202 .vop_reclaim = ncl_reclaim, 203 .vop_setattr = nfs_setattr, 204 .vop_write = nfsfifo_write, 205}; 206 207static int nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, 208 struct componentname *cnp, struct vattr *vap); 209static int nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name, 210 int namelen, struct ucred *cred, struct thread *td); 211static int nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, 212 char *fnameptr, int fnamelen, struct vnode *tdvp, struct vnode *tvp, 213 char *tnameptr, int tnamelen, struct ucred *cred, struct thread *td); 214static int nfs_renameit(struct vnode *sdvp, struct vnode *svp, 215 struct componentname *scnp, struct sillyrename *sp); 216 217/* 218 * Global variables 219 */ 220#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1)) 221 222SYSCTL_DECL(_vfs_nfs); 223 224static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO; 225SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW, 226 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout"); 227 228static int nfs_prime_access_cache = 0; 229SYSCTL_INT(_vfs_nfs, OID_AUTO, prime_access_cache, CTLFLAG_RW, 230 &nfs_prime_access_cache, 0, 231 "Prime NFS ACCESS cache when fetching attributes"); 232 233static int newnfs_commit_on_close = 0; 234SYSCTL_INT(_vfs_nfs, OID_AUTO, commit_on_close, CTLFLAG_RW, 235 &newnfs_commit_on_close, 0, "write+commit on close, else only write"); 236 237static int nfs_clean_pages_on_close = 1; 238SYSCTL_INT(_vfs_nfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW, 239 &nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close"); 240 241int newnfs_directio_enable = 0; 242SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW, 243 &newnfs_directio_enable, 0, "Enable NFS directio"); 244 245int nfs_keep_dirty_on_error; 246SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_keep_dirty_on_error, CTLFLAG_RW, 247 &nfs_keep_dirty_on_error, 0, "Retry pageout if error returned"); 248 249/* 250 * This sysctl allows other processes to mmap a file that has been opened 251 * O_DIRECT by a process. In general, having processes mmap the file while 252 * Direct IO is in progress can lead to Data Inconsistencies. But, we allow 253 * this by default to prevent DoS attacks - to prevent a malicious user from 254 * opening up files O_DIRECT preventing other users from mmap'ing these 255 * files. "Protected" environments where stricter consistency guarantees are 256 * required can disable this knob. The process that opened the file O_DIRECT 257 * cannot mmap() the file, because mmap'ed IO on an O_DIRECT open() is not 258 * meaningful. 259 */ 260int newnfs_directio_allow_mmap = 1; 261SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW, 262 &newnfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens"); 263 264#if 0 265SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD, 266 &newnfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count"); 267 268SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD, 269 &newnfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count"); 270#endif 271 272#define NFSACCESS_ALL (NFSACCESS_READ | NFSACCESS_MODIFY \ 273 | NFSACCESS_EXTEND | NFSACCESS_EXECUTE \ 274 | NFSACCESS_DELETE | NFSACCESS_LOOKUP) 275 276/* 277 * SMP Locking Note : 278 * The list of locks after the description of the lock is the ordering 279 * of other locks acquired with the lock held. 280 * np->n_mtx : Protects the fields in the nfsnode. 281 VM Object Lock 282 VI_MTX (acquired indirectly) 283 * nmp->nm_mtx : Protects the fields in the nfsmount. 284 rep->r_mtx 285 * ncl_iod_mutex : Global lock, protects shared nfsiod state. 286 * nfs_reqq_mtx : Global lock, protects the nfs_reqq list. 287 nmp->nm_mtx 288 rep->r_mtx 289 * rep->r_mtx : Protects the fields in an nfsreq. 290 */ 291 292static int 293nfs34_access_otw(struct vnode *vp, int wmode, struct thread *td, 294 struct ucred *cred, u_int32_t *retmode) 295{ 296 int error = 0, attrflag, i, lrupos; 297 u_int32_t rmode; 298 struct nfsnode *np = VTONFS(vp); 299 struct nfsvattr nfsva; 300 301 error = nfsrpc_accessrpc(vp, wmode, cred, td, &nfsva, &attrflag, 302 &rmode, NULL); 303 if (attrflag) 304 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 305 if (!error) { 306 lrupos = 0; 307 mtx_lock(&np->n_mtx); 308 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) { 309 if (np->n_accesscache[i].uid == cred->cr_uid) { 310 np->n_accesscache[i].mode = rmode; 311 np->n_accesscache[i].stamp = time_second; 312 break; 313 } 314 if (i > 0 && np->n_accesscache[i].stamp < 315 np->n_accesscache[lrupos].stamp) 316 lrupos = i; 317 } 318 if (i == NFS_ACCESSCACHESIZE) { 319 np->n_accesscache[lrupos].uid = cred->cr_uid; 320 np->n_accesscache[lrupos].mode = rmode; 321 np->n_accesscache[lrupos].stamp = time_second; 322 } 323 mtx_unlock(&np->n_mtx); 324 if (retmode != NULL) 325 *retmode = rmode; 326 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, rmode, 0); 327 } else if (NFS_ISV4(vp)) { 328 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 329 } 330#ifdef KDTRACE_HOOKS 331 if (error != 0) 332 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, 0, 333 error); 334#endif 335 return (error); 336} 337 338/* 339 * nfs access vnode op. 340 * For nfs version 2, just return ok. File accesses may fail later. 341 * For nfs version 3, use the access rpc to check accessibility. If file modes 342 * are changed on the server, accesses might still fail later. 343 */ 344static int 345nfs_access(struct vop_access_args *ap) 346{ 347 struct vnode *vp = ap->a_vp; 348 int error = 0, i, gotahit; 349 u_int32_t mode, wmode, rmode; 350 int v34 = NFS_ISV34(vp); 351 struct nfsnode *np = VTONFS(vp); 352 353 /* 354 * Disallow write attempts on filesystems mounted read-only; 355 * unless the file is a socket, fifo, or a block or character 356 * device resident on the filesystem. 357 */ 358 if ((ap->a_accmode & (VWRITE | VAPPEND | VWRITE_NAMED_ATTRS | 359 VDELETE_CHILD | VWRITE_ATTRIBUTES | VDELETE | VWRITE_ACL | 360 VWRITE_OWNER)) != 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0) { 361 switch (vp->v_type) { 362 case VREG: 363 case VDIR: 364 case VLNK: 365 return (EROFS); 366 default: 367 break; 368 } 369 } 370 /* 371 * For nfs v3 or v4, check to see if we have done this recently, and if 372 * so return our cached result instead of making an ACCESS call. 373 * If not, do an access rpc, otherwise you are stuck emulating 374 * ufs_access() locally using the vattr. This may not be correct, 375 * since the server may apply other access criteria such as 376 * client uid-->server uid mapping that we do not know about. 377 */ 378 if (v34) { 379 if (ap->a_accmode & VREAD) 380 mode = NFSACCESS_READ; 381 else 382 mode = 0; 383 if (vp->v_type != VDIR) { 384 if (ap->a_accmode & VWRITE) 385 mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND); 386 if (ap->a_accmode & VAPPEND) 387 mode |= NFSACCESS_EXTEND; 388 if (ap->a_accmode & VEXEC) 389 mode |= NFSACCESS_EXECUTE; 390 if (ap->a_accmode & VDELETE) 391 mode |= NFSACCESS_DELETE; 392 } else { 393 if (ap->a_accmode & VWRITE) 394 mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND); 395 if (ap->a_accmode & VAPPEND) 396 mode |= NFSACCESS_EXTEND; 397 if (ap->a_accmode & VEXEC) 398 mode |= NFSACCESS_LOOKUP; 399 if (ap->a_accmode & VDELETE) 400 mode |= NFSACCESS_DELETE; 401 if (ap->a_accmode & VDELETE_CHILD) 402 mode |= NFSACCESS_MODIFY; 403 } 404 /* XXX safety belt, only make blanket request if caching */ 405 if (nfsaccess_cache_timeout > 0) { 406 wmode = NFSACCESS_READ | NFSACCESS_MODIFY | 407 NFSACCESS_EXTEND | NFSACCESS_EXECUTE | 408 NFSACCESS_DELETE | NFSACCESS_LOOKUP; 409 } else { 410 wmode = mode; 411 } 412 413 /* 414 * Does our cached result allow us to give a definite yes to 415 * this request? 416 */ 417 gotahit = 0; 418 mtx_lock(&np->n_mtx); 419 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) { 420 if (ap->a_cred->cr_uid == np->n_accesscache[i].uid) { 421 if (time_second < (np->n_accesscache[i].stamp 422 + nfsaccess_cache_timeout) && 423 (np->n_accesscache[i].mode & mode) == mode) { 424 NFSINCRGLOBAL(newnfsstats.accesscache_hits); 425 gotahit = 1; 426 } 427 break; 428 } 429 } 430 mtx_unlock(&np->n_mtx); 431#ifdef KDTRACE_HOOKS 432 if (gotahit != 0) 433 KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp, 434 ap->a_cred->cr_uid, mode); 435 else 436 KDTRACE_NFS_ACCESSCACHE_GET_MISS(vp, 437 ap->a_cred->cr_uid, mode); 438#endif 439 if (gotahit == 0) { 440 /* 441 * Either a no, or a don't know. Go to the wire. 442 */ 443 NFSINCRGLOBAL(newnfsstats.accesscache_misses); 444 error = nfs34_access_otw(vp, wmode, ap->a_td, 445 ap->a_cred, &rmode); 446 if (!error && 447 (rmode & mode) != mode) 448 error = EACCES; 449 } 450 return (error); 451 } else { 452 if ((error = nfsspec_access(ap)) != 0) { 453 return (error); 454 } 455 /* 456 * Attempt to prevent a mapped root from accessing a file 457 * which it shouldn't. We try to read a byte from the file 458 * if the user is root and the file is not zero length. 459 * After calling nfsspec_access, we should have the correct 460 * file size cached. 461 */ 462 mtx_lock(&np->n_mtx); 463 if (ap->a_cred->cr_uid == 0 && (ap->a_accmode & VREAD) 464 && VTONFS(vp)->n_size > 0) { 465 struct iovec aiov; 466 struct uio auio; 467 char buf[1]; 468 469 mtx_unlock(&np->n_mtx); 470 aiov.iov_base = buf; 471 aiov.iov_len = 1; 472 auio.uio_iov = &aiov; 473 auio.uio_iovcnt = 1; 474 auio.uio_offset = 0; 475 auio.uio_resid = 1; 476 auio.uio_segflg = UIO_SYSSPACE; 477 auio.uio_rw = UIO_READ; 478 auio.uio_td = ap->a_td; 479 480 if (vp->v_type == VREG) 481 error = ncl_readrpc(vp, &auio, ap->a_cred); 482 else if (vp->v_type == VDIR) { 483 char* bp; 484 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK); 485 aiov.iov_base = bp; 486 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ; 487 error = ncl_readdirrpc(vp, &auio, ap->a_cred, 488 ap->a_td); 489 free(bp, M_TEMP); 490 } else if (vp->v_type == VLNK) 491 error = ncl_readlinkrpc(vp, &auio, ap->a_cred); 492 else 493 error = EACCES; 494 } else 495 mtx_unlock(&np->n_mtx); 496 return (error); 497 } 498} 499 500 501/* 502 * nfs open vnode op 503 * Check to see if the type is ok 504 * and that deletion is not in progress. 505 * For paged in text files, you will need to flush the page cache 506 * if consistency is lost. 507 */ 508/* ARGSUSED */ 509static int 510nfs_open(struct vop_open_args *ap) 511{ 512 struct vnode *vp = ap->a_vp; 513 struct nfsnode *np = VTONFS(vp); 514 struct vattr vattr; 515 int error; 516 int fmode = ap->a_mode; 517 struct ucred *cred; 518 519 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) 520 return (EOPNOTSUPP); 521 522 /* 523 * For NFSv4, we need to do the Open Op before cache validation, 524 * so that we conform to RFC3530 Sec. 9.3.1. 525 */ 526 if (NFS_ISV4(vp)) { 527 error = nfsrpc_open(vp, fmode, ap->a_cred, ap->a_td); 528 if (error) { 529 error = nfscl_maperr(ap->a_td, error, (uid_t)0, 530 (gid_t)0); 531 return (error); 532 } 533 } 534 535 /* 536 * Now, if this Open will be doing reading, re-validate/flush the 537 * cache, so that Close/Open coherency is maintained. 538 */ 539 mtx_lock(&np->n_mtx); 540 if (np->n_flag & NMODIFIED) { 541 mtx_unlock(&np->n_mtx); 542 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 543 if (error == EINTR || error == EIO) { 544 if (NFS_ISV4(vp)) 545 (void) nfsrpc_close(vp, 0, ap->a_td); 546 return (error); 547 } 548 mtx_lock(&np->n_mtx); 549 np->n_attrstamp = 0; 550 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 551 if (vp->v_type == VDIR) 552 np->n_direofoffset = 0; 553 mtx_unlock(&np->n_mtx); 554 error = VOP_GETATTR(vp, &vattr, ap->a_cred); 555 if (error) { 556 if (NFS_ISV4(vp)) 557 (void) nfsrpc_close(vp, 0, ap->a_td); 558 return (error); 559 } 560 mtx_lock(&np->n_mtx); 561 np->n_mtime = vattr.va_mtime; 562 if (NFS_ISV4(vp)) 563 np->n_change = vattr.va_filerev; 564 } else { 565 mtx_unlock(&np->n_mtx); 566 error = VOP_GETATTR(vp, &vattr, ap->a_cred); 567 if (error) { 568 if (NFS_ISV4(vp)) 569 (void) nfsrpc_close(vp, 0, ap->a_td); 570 return (error); 571 } 572 mtx_lock(&np->n_mtx); 573 if ((NFS_ISV4(vp) && np->n_change != vattr.va_filerev) || 574 NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) { 575 if (vp->v_type == VDIR) 576 np->n_direofoffset = 0; 577 mtx_unlock(&np->n_mtx); 578 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 579 if (error == EINTR || error == EIO) { 580 if (NFS_ISV4(vp)) 581 (void) nfsrpc_close(vp, 0, ap->a_td); 582 return (error); 583 } 584 mtx_lock(&np->n_mtx); 585 np->n_mtime = vattr.va_mtime; 586 if (NFS_ISV4(vp)) 587 np->n_change = vattr.va_filerev; 588 } 589 } 590 591 /* 592 * If the object has >= 1 O_DIRECT active opens, we disable caching. 593 */ 594 if (newnfs_directio_enable && (fmode & O_DIRECT) && 595 (vp->v_type == VREG)) { 596 if (np->n_directio_opens == 0) { 597 mtx_unlock(&np->n_mtx); 598 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 599 if (error) { 600 if (NFS_ISV4(vp)) 601 (void) nfsrpc_close(vp, 0, ap->a_td); 602 return (error); 603 } 604 mtx_lock(&np->n_mtx); 605 np->n_flag |= NNONCACHE; 606 } 607 np->n_directio_opens++; 608 } 609 610 /* If opened for writing via NFSv4.1 or later, mark that for pNFS. */ 611 if (NFSHASPNFS(VFSTONFS(vp->v_mount)) && (fmode & FWRITE) != 0) 612 np->n_flag |= NWRITEOPENED; 613 614 /* 615 * If this is an open for writing, capture a reference to the 616 * credentials, so they can be used by ncl_putpages(). Using 617 * these write credentials is preferable to the credentials of 618 * whatever thread happens to be doing the VOP_PUTPAGES() since 619 * the write RPCs are less likely to fail with EACCES. 620 */ 621 if ((fmode & FWRITE) != 0) { 622 cred = np->n_writecred; 623 np->n_writecred = crhold(ap->a_cred); 624 } else 625 cred = NULL; 626 mtx_unlock(&np->n_mtx); 627 628 if (cred != NULL) 629 crfree(cred); 630 vnode_create_vobject(vp, vattr.va_size, ap->a_td); 631 return (0); 632} 633 634/* 635 * nfs close vnode op 636 * What an NFS client should do upon close after writing is a debatable issue. 637 * Most NFS clients push delayed writes to the server upon close, basically for 638 * two reasons: 639 * 1 - So that any write errors may be reported back to the client process 640 * doing the close system call. By far the two most likely errors are 641 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 642 * 2 - To put a worst case upper bound on cache inconsistency between 643 * multiple clients for the file. 644 * There is also a consistency problem for Version 2 of the protocol w.r.t. 645 * not being able to tell if other clients are writing a file concurrently, 646 * since there is no way of knowing if the changed modify time in the reply 647 * is only due to the write for this client. 648 * (NFS Version 3 provides weak cache consistency data in the reply that 649 * should be sufficient to detect and handle this case.) 650 * 651 * The current code does the following: 652 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 653 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 654 * or commit them (this satisfies 1 and 2 except for the 655 * case where the server crashes after this close but 656 * before the commit RPC, which is felt to be "good 657 * enough". Changing the last argument to ncl_flush() to 658 * a 1 would force a commit operation, if it is felt a 659 * commit is necessary now. 660 * for NFS Version 4 - flush the dirty buffers and commit them, if 661 * nfscl_mustflush() says this is necessary. 662 * It is necessary if there is no write delegation held, 663 * in order to satisfy open/close coherency. 664 * If the file isn't cached on local stable storage, 665 * it may be necessary in order to detect "out of space" 666 * errors from the server, if the write delegation 667 * issued by the server doesn't allow the file to grow. 668 */ 669/* ARGSUSED */ 670static int 671nfs_close(struct vop_close_args *ap) 672{ 673 struct vnode *vp = ap->a_vp; 674 struct nfsnode *np = VTONFS(vp); 675 struct nfsvattr nfsva; 676 struct ucred *cred; 677 int error = 0, ret, localcred = 0; 678 int fmode = ap->a_fflag; 679 680 if ((vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF)) 681 return (0); 682 /* 683 * During shutdown, a_cred isn't valid, so just use root. 684 */ 685 if (ap->a_cred == NOCRED) { 686 cred = newnfs_getcred(); 687 localcred = 1; 688 } else { 689 cred = ap->a_cred; 690 } 691 if (vp->v_type == VREG) { 692 /* 693 * Examine and clean dirty pages, regardless of NMODIFIED. 694 * This closes a major hole in close-to-open consistency. 695 * We want to push out all dirty pages (and buffers) on 696 * close, regardless of whether they were dirtied by 697 * mmap'ed writes or via write(). 698 */ 699 if (nfs_clean_pages_on_close && vp->v_object) { 700 VM_OBJECT_WLOCK(vp->v_object); 701 vm_object_page_clean(vp->v_object, 0, 0, 0); 702 VM_OBJECT_WUNLOCK(vp->v_object); 703 } 704 mtx_lock(&np->n_mtx); 705 if (np->n_flag & NMODIFIED) { 706 mtx_unlock(&np->n_mtx); 707 if (NFS_ISV3(vp)) { 708 /* 709 * Under NFSv3 we have dirty buffers to dispose of. We 710 * must flush them to the NFS server. We have the option 711 * of waiting all the way through the commit rpc or just 712 * waiting for the initial write. The default is to only 713 * wait through the initial write so the data is in the 714 * server's cache, which is roughly similar to the state 715 * a standard disk subsystem leaves the file in on close(). 716 * 717 * We cannot clear the NMODIFIED bit in np->n_flag due to 718 * potential races with other processes, and certainly 719 * cannot clear it if we don't commit. 720 * These races occur when there is no longer the old 721 * traditional vnode locking implemented for Vnode Ops. 722 */ 723 int cm = newnfs_commit_on_close ? 1 : 0; 724 error = ncl_flush(vp, MNT_WAIT, cred, ap->a_td, cm, 0); 725 /* np->n_flag &= ~NMODIFIED; */ 726 } else if (NFS_ISV4(vp)) { 727 if (nfscl_mustflush(vp) != 0) { 728 int cm = newnfs_commit_on_close ? 1 : 0; 729 error = ncl_flush(vp, MNT_WAIT, cred, ap->a_td, 730 cm, 0); 731 /* 732 * as above w.r.t races when clearing 733 * NMODIFIED. 734 * np->n_flag &= ~NMODIFIED; 735 */ 736 } 737 } else 738 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 739 mtx_lock(&np->n_mtx); 740 } 741 /* 742 * Invalidate the attribute cache in all cases. 743 * An open is going to fetch fresh attrs any way, other procs 744 * on this node that have file open will be forced to do an 745 * otw attr fetch, but this is safe. 746 * --> A user found that their RPC count dropped by 20% when 747 * this was commented out and I can't see any requirement 748 * for it, so I've disabled it when negative lookups are 749 * enabled. (What does this have to do with negative lookup 750 * caching? Well nothing, except it was reported by the 751 * same user that needed negative lookup caching and I wanted 752 * there to be a way to disable it to see if it 753 * is the cause of some caching/coherency issue that might 754 * crop up.) 755 */ 756 if (VFSTONFS(vp->v_mount)->nm_negnametimeo == 0) { 757 np->n_attrstamp = 0; 758 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 759 } 760 if (np->n_flag & NWRITEERR) { 761 np->n_flag &= ~NWRITEERR; 762 error = np->n_error; 763 } 764 mtx_unlock(&np->n_mtx); 765 } 766 767 if (NFS_ISV4(vp)) { 768 /* 769 * Get attributes so "change" is up to date. 770 */ 771 if (error == 0 && nfscl_mustflush(vp) != 0 && 772 vp->v_type == VREG && 773 (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOCTO) == 0) { 774 ret = nfsrpc_getattr(vp, cred, ap->a_td, &nfsva, 775 NULL); 776 if (!ret) { 777 np->n_change = nfsva.na_filerev; 778 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, 779 NULL, 0, 0); 780 } 781 } 782 783 /* 784 * and do the close. 785 */ 786 ret = nfsrpc_close(vp, 0, ap->a_td); 787 if (!error && ret) 788 error = ret; 789 if (error) 790 error = nfscl_maperr(ap->a_td, error, (uid_t)0, 791 (gid_t)0); 792 } 793 if (newnfs_directio_enable) 794 KASSERT((np->n_directio_asyncwr == 0), 795 ("nfs_close: dirty unflushed (%d) directio buffers\n", 796 np->n_directio_asyncwr)); 797 if (newnfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) { 798 mtx_lock(&np->n_mtx); 799 KASSERT((np->n_directio_opens > 0), 800 ("nfs_close: unexpectedly value (0) of n_directio_opens\n")); 801 np->n_directio_opens--; 802 if (np->n_directio_opens == 0) 803 np->n_flag &= ~NNONCACHE; 804 mtx_unlock(&np->n_mtx); 805 } 806 if (localcred) 807 NFSFREECRED(cred); 808 return (error); 809} 810 811/* 812 * nfs getattr call from vfs. 813 */ 814static int 815nfs_getattr(struct vop_getattr_args *ap) 816{ 817 struct vnode *vp = ap->a_vp; 818 struct thread *td = curthread; /* XXX */ 819 struct nfsnode *np = VTONFS(vp); 820 int error = 0; 821 struct nfsvattr nfsva; 822 struct vattr *vap = ap->a_vap; 823 struct vattr vattr; 824 825 /* 826 * Update local times for special files. 827 */ 828 mtx_lock(&np->n_mtx); 829 if (np->n_flag & (NACC | NUPD)) 830 np->n_flag |= NCHG; 831 mtx_unlock(&np->n_mtx); 832 /* 833 * First look in the cache. 834 */ 835 if (ncl_getattrcache(vp, &vattr) == 0) { 836 vap->va_type = vattr.va_type; 837 vap->va_mode = vattr.va_mode; 838 vap->va_nlink = vattr.va_nlink; 839 vap->va_uid = vattr.va_uid; 840 vap->va_gid = vattr.va_gid; 841 vap->va_fsid = vattr.va_fsid; 842 vap->va_fileid = vattr.va_fileid; 843 vap->va_size = vattr.va_size; 844 vap->va_blocksize = vattr.va_blocksize; 845 vap->va_atime = vattr.va_atime; 846 vap->va_mtime = vattr.va_mtime; 847 vap->va_ctime = vattr.va_ctime; 848 vap->va_gen = vattr.va_gen; 849 vap->va_flags = vattr.va_flags; 850 vap->va_rdev = vattr.va_rdev; 851 vap->va_bytes = vattr.va_bytes; 852 vap->va_filerev = vattr.va_filerev; 853 /* 854 * Get the local modify time for the case of a write 855 * delegation. 856 */ 857 nfscl_deleggetmodtime(vp, &vap->va_mtime); 858 return (0); 859 } 860 861 if (NFS_ISV34(vp) && nfs_prime_access_cache && 862 nfsaccess_cache_timeout > 0) { 863 NFSINCRGLOBAL(newnfsstats.accesscache_misses); 864 nfs34_access_otw(vp, NFSACCESS_ALL, td, ap->a_cred, NULL); 865 if (ncl_getattrcache(vp, ap->a_vap) == 0) { 866 nfscl_deleggetmodtime(vp, &ap->a_vap->va_mtime); 867 return (0); 868 } 869 } 870 error = nfsrpc_getattr(vp, ap->a_cred, td, &nfsva, NULL); 871 if (!error) 872 error = nfscl_loadattrcache(&vp, &nfsva, vap, NULL, 0, 0); 873 if (!error) { 874 /* 875 * Get the local modify time for the case of a write 876 * delegation. 877 */ 878 nfscl_deleggetmodtime(vp, &vap->va_mtime); 879 } else if (NFS_ISV4(vp)) { 880 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 881 } 882 return (error); 883} 884 885/* 886 * nfs setattr call. 887 */ 888static int 889nfs_setattr(struct vop_setattr_args *ap) 890{ 891 struct vnode *vp = ap->a_vp; 892 struct nfsnode *np = VTONFS(vp); 893 struct thread *td = curthread; /* XXX */ 894 struct vattr *vap = ap->a_vap; 895 int error = 0; 896 u_quad_t tsize; 897 898#ifndef nolint 899 tsize = (u_quad_t)0; 900#endif 901 902 /* 903 * Setting of flags and marking of atimes are not supported. 904 */ 905 if (vap->va_flags != VNOVAL) 906 return (EOPNOTSUPP); 907 908 /* 909 * Disallow write attempts if the filesystem is mounted read-only. 910 */ 911 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 912 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 913 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 914 (vp->v_mount->mnt_flag & MNT_RDONLY)) 915 return (EROFS); 916 if (vap->va_size != VNOVAL) { 917 switch (vp->v_type) { 918 case VDIR: 919 return (EISDIR); 920 case VCHR: 921 case VBLK: 922 case VSOCK: 923 case VFIFO: 924 if (vap->va_mtime.tv_sec == VNOVAL && 925 vap->va_atime.tv_sec == VNOVAL && 926 vap->va_mode == (mode_t)VNOVAL && 927 vap->va_uid == (uid_t)VNOVAL && 928 vap->va_gid == (gid_t)VNOVAL) 929 return (0); 930 vap->va_size = VNOVAL; 931 break; 932 default: 933 /* 934 * Disallow write attempts if the filesystem is 935 * mounted read-only. 936 */ 937 if (vp->v_mount->mnt_flag & MNT_RDONLY) 938 return (EROFS); 939 /* 940 * We run vnode_pager_setsize() early (why?), 941 * we must set np->n_size now to avoid vinvalbuf 942 * V_SAVE races that might setsize a lower 943 * value. 944 */ 945 mtx_lock(&np->n_mtx); 946 tsize = np->n_size; 947 mtx_unlock(&np->n_mtx); 948 error = ncl_meta_setsize(vp, ap->a_cred, td, 949 vap->va_size); 950 mtx_lock(&np->n_mtx); 951 if (np->n_flag & NMODIFIED) { 952 tsize = np->n_size; 953 mtx_unlock(&np->n_mtx); 954 if (vap->va_size == 0) 955 error = ncl_vinvalbuf(vp, 0, td, 1); 956 else 957 error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 958 if (error) { 959 vnode_pager_setsize(vp, tsize); 960 return (error); 961 } 962 /* 963 * Call nfscl_delegmodtime() to set the modify time 964 * locally, as required. 965 */ 966 nfscl_delegmodtime(vp); 967 } else 968 mtx_unlock(&np->n_mtx); 969 /* 970 * np->n_size has already been set to vap->va_size 971 * in ncl_meta_setsize(). We must set it again since 972 * nfs_loadattrcache() could be called through 973 * ncl_meta_setsize() and could modify np->n_size. 974 */ 975 mtx_lock(&np->n_mtx); 976 np->n_vattr.na_size = np->n_size = vap->va_size; 977 mtx_unlock(&np->n_mtx); 978 }; 979 } else { 980 mtx_lock(&np->n_mtx); 981 if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) && 982 (np->n_flag & NMODIFIED) && vp->v_type == VREG) { 983 mtx_unlock(&np->n_mtx); 984 if ((error = ncl_vinvalbuf(vp, V_SAVE, td, 1)) != 0 && 985 (error == EINTR || error == EIO)) 986 return (error); 987 } else 988 mtx_unlock(&np->n_mtx); 989 } 990 error = nfs_setattrrpc(vp, vap, ap->a_cred, td); 991 if (error && vap->va_size != VNOVAL) { 992 mtx_lock(&np->n_mtx); 993 np->n_size = np->n_vattr.na_size = tsize; 994 vnode_pager_setsize(vp, tsize); 995 mtx_unlock(&np->n_mtx); 996 } 997 return (error); 998} 999 1000/* 1001 * Do an nfs setattr rpc. 1002 */ 1003static int 1004nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred, 1005 struct thread *td) 1006{ 1007 struct nfsnode *np = VTONFS(vp); 1008 int error, ret, attrflag, i; 1009 struct nfsvattr nfsva; 1010 1011 if (NFS_ISV34(vp)) { 1012 mtx_lock(&np->n_mtx); 1013 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) 1014 np->n_accesscache[i].stamp = 0; 1015 np->n_flag |= NDELEGMOD; 1016 mtx_unlock(&np->n_mtx); 1017 KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp); 1018 } 1019 error = nfsrpc_setattr(vp, vap, NULL, cred, td, &nfsva, &attrflag, 1020 NULL); 1021 if (attrflag) { 1022 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 1023 if (ret && !error) 1024 error = ret; 1025 } 1026 if (error && NFS_ISV4(vp)) 1027 error = nfscl_maperr(td, error, vap->va_uid, vap->va_gid); 1028 return (error); 1029} 1030 1031/* 1032 * nfs lookup call, one step at a time... 1033 * First look in cache 1034 * If not found, unlock the directory nfsnode and do the rpc 1035 */ 1036static int 1037nfs_lookup(struct vop_lookup_args *ap) 1038{ 1039 struct componentname *cnp = ap->a_cnp; 1040 struct vnode *dvp = ap->a_dvp; 1041 struct vnode **vpp = ap->a_vpp; 1042 struct mount *mp = dvp->v_mount; 1043 int flags = cnp->cn_flags; 1044 struct vnode *newvp; 1045 struct nfsmount *nmp; 1046 struct nfsnode *np, *newnp; 1047 int error = 0, attrflag, dattrflag, ltype, ncticks; 1048 struct thread *td = cnp->cn_thread; 1049 struct nfsfh *nfhp; 1050 struct nfsvattr dnfsva, nfsva; 1051 struct vattr vattr; 1052 struct timespec nctime; 1053 1054 *vpp = NULLVP; 1055 if ((flags & ISLASTCN) && (mp->mnt_flag & MNT_RDONLY) && 1056 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 1057 return (EROFS); 1058 if (dvp->v_type != VDIR) 1059 return (ENOTDIR); 1060 nmp = VFSTONFS(mp); 1061 np = VTONFS(dvp); 1062 1063 /* For NFSv4, wait until any remove is done. */ 1064 mtx_lock(&np->n_mtx); 1065 while (NFSHASNFSV4(nmp) && (np->n_flag & NREMOVEINPROG)) { 1066 np->n_flag |= NREMOVEWANT; 1067 (void) msleep((caddr_t)np, &np->n_mtx, PZERO, "nfslkup", 0); 1068 } 1069 mtx_unlock(&np->n_mtx); 1070 1071 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0) 1072 return (error); 1073 error = cache_lookup(dvp, vpp, cnp, &nctime, &ncticks); 1074 if (error > 0 && error != ENOENT) 1075 return (error); 1076 if (error == -1) { 1077 /* 1078 * Lookups of "." are special and always return the 1079 * current directory. cache_lookup() already handles 1080 * associated locking bookkeeping, etc. 1081 */ 1082 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') { 1083 /* XXX: Is this really correct? */ 1084 if (cnp->cn_nameiop != LOOKUP && 1085 (flags & ISLASTCN)) 1086 cnp->cn_flags |= SAVENAME; 1087 return (0); 1088 } 1089 1090 /* 1091 * We only accept a positive hit in the cache if the 1092 * change time of the file matches our cached copy. 1093 * Otherwise, we discard the cache entry and fallback 1094 * to doing a lookup RPC. We also only trust cache 1095 * entries for less than nm_nametimeo seconds. 1096 * 1097 * To better handle stale file handles and attributes, 1098 * clear the attribute cache of this node if it is a 1099 * leaf component, part of an open() call, and not 1100 * locally modified before fetching the attributes. 1101 * This should allow stale file handles to be detected 1102 * here where we can fall back to a LOOKUP RPC to 1103 * recover rather than having nfs_open() detect the 1104 * stale file handle and failing open(2) with ESTALE. 1105 */ 1106 newvp = *vpp; 1107 newnp = VTONFS(newvp); 1108 if (!(nmp->nm_flag & NFSMNT_NOCTO) && 1109 (flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) && 1110 !(newnp->n_flag & NMODIFIED)) { 1111 mtx_lock(&newnp->n_mtx); 1112 newnp->n_attrstamp = 0; 1113 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp); 1114 mtx_unlock(&newnp->n_mtx); 1115 } 1116 if (nfscl_nodeleg(newvp, 0) == 0 || 1117 ((u_int)(ticks - ncticks) < (nmp->nm_nametimeo * hz) && 1118 VOP_GETATTR(newvp, &vattr, cnp->cn_cred) == 0 && 1119 timespeccmp(&vattr.va_ctime, &nctime, ==))) { 1120 NFSINCRGLOBAL(newnfsstats.lookupcache_hits); 1121 if (cnp->cn_nameiop != LOOKUP && 1122 (flags & ISLASTCN)) 1123 cnp->cn_flags |= SAVENAME; 1124 return (0); 1125 } 1126 cache_purge(newvp); 1127 if (dvp != newvp) 1128 vput(newvp); 1129 else 1130 vrele(newvp); 1131 *vpp = NULLVP; 1132 } else if (error == ENOENT) { 1133 if (dvp->v_iflag & VI_DOOMED) 1134 return (ENOENT); 1135 /* 1136 * We only accept a negative hit in the cache if the 1137 * modification time of the parent directory matches 1138 * the cached copy in the name cache entry. 1139 * Otherwise, we discard all of the negative cache 1140 * entries for this directory. We also only trust 1141 * negative cache entries for up to nm_negnametimeo 1142 * seconds. 1143 */ 1144 if ((u_int)(ticks - ncticks) < (nmp->nm_negnametimeo * hz) && 1145 VOP_GETATTR(dvp, &vattr, cnp->cn_cred) == 0 && 1146 timespeccmp(&vattr.va_mtime, &nctime, ==)) { 1147 NFSINCRGLOBAL(newnfsstats.lookupcache_hits); 1148 return (ENOENT); 1149 } 1150 cache_purge_negative(dvp); 1151 } 1152 1153 error = 0; 1154 newvp = NULLVP; 1155 NFSINCRGLOBAL(newnfsstats.lookupcache_misses); 1156 error = nfsrpc_lookup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 1157 cnp->cn_cred, td, &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 1158 NULL); 1159 if (dattrflag) 1160 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1161 if (error) { 1162 if (newvp != NULLVP) { 1163 vput(newvp); 1164 *vpp = NULLVP; 1165 } 1166 1167 if (error != ENOENT) { 1168 if (NFS_ISV4(dvp)) 1169 error = nfscl_maperr(td, error, (uid_t)0, 1170 (gid_t)0); 1171 return (error); 1172 } 1173 1174 /* The requested file was not found. */ 1175 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 1176 (flags & ISLASTCN)) { 1177 /* 1178 * XXX: UFS does a full VOP_ACCESS(dvp, 1179 * VWRITE) here instead of just checking 1180 * MNT_RDONLY. 1181 */ 1182 if (mp->mnt_flag & MNT_RDONLY) 1183 return (EROFS); 1184 cnp->cn_flags |= SAVENAME; 1185 return (EJUSTRETURN); 1186 } 1187 1188 if ((cnp->cn_flags & MAKEENTRY) != 0 && dattrflag) { 1189 /* 1190 * Cache the modification time of the parent 1191 * directory from the post-op attributes in 1192 * the name cache entry. The negative cache 1193 * entry will be ignored once the directory 1194 * has changed. Don't bother adding the entry 1195 * if the directory has already changed. 1196 */ 1197 mtx_lock(&np->n_mtx); 1198 if (timespeccmp(&np->n_vattr.na_mtime, 1199 &dnfsva.na_mtime, ==)) { 1200 mtx_unlock(&np->n_mtx); 1201 cache_enter_time(dvp, NULL, cnp, 1202 &dnfsva.na_mtime, NULL); 1203 } else 1204 mtx_unlock(&np->n_mtx); 1205 } 1206 return (ENOENT); 1207 } 1208 1209 /* 1210 * Handle RENAME case... 1211 */ 1212 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) { 1213 if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) { 1214 FREE((caddr_t)nfhp, M_NFSFH); 1215 return (EISDIR); 1216 } 1217 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL, 1218 LK_EXCLUSIVE); 1219 if (error) 1220 return (error); 1221 newvp = NFSTOV(np); 1222 if (attrflag) 1223 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1224 0, 1); 1225 *vpp = newvp; 1226 cnp->cn_flags |= SAVENAME; 1227 return (0); 1228 } 1229 1230 if (flags & ISDOTDOT) { 1231 ltype = NFSVOPISLOCKED(dvp); 1232 error = vfs_busy(mp, MBF_NOWAIT); 1233 if (error != 0) { 1234 vfs_ref(mp); 1235 NFSVOPUNLOCK(dvp, 0); 1236 error = vfs_busy(mp, 0); 1237 NFSVOPLOCK(dvp, ltype | LK_RETRY); 1238 vfs_rel(mp); 1239 if (error == 0 && (dvp->v_iflag & VI_DOOMED)) { 1240 vfs_unbusy(mp); 1241 error = ENOENT; 1242 } 1243 if (error != 0) 1244 return (error); 1245 } 1246 NFSVOPUNLOCK(dvp, 0); 1247 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL, 1248 cnp->cn_lkflags); 1249 if (error == 0) 1250 newvp = NFSTOV(np); 1251 vfs_unbusy(mp); 1252 if (newvp != dvp) 1253 NFSVOPLOCK(dvp, ltype | LK_RETRY); 1254 if (dvp->v_iflag & VI_DOOMED) { 1255 if (error == 0) { 1256 if (newvp == dvp) 1257 vrele(newvp); 1258 else 1259 vput(newvp); 1260 } 1261 error = ENOENT; 1262 } 1263 if (error != 0) 1264 return (error); 1265 if (attrflag) 1266 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1267 0, 1); 1268 } else if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) { 1269 FREE((caddr_t)nfhp, M_NFSFH); 1270 VREF(dvp); 1271 newvp = dvp; 1272 if (attrflag) 1273 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1274 0, 1); 1275 } else { 1276 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL, 1277 cnp->cn_lkflags); 1278 if (error) 1279 return (error); 1280 newvp = NFSTOV(np); 1281 if (attrflag) 1282 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1283 0, 1); 1284 else if ((flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) && 1285 !(np->n_flag & NMODIFIED)) { 1286 /* 1287 * Flush the attribute cache when opening a 1288 * leaf node to ensure that fresh attributes 1289 * are fetched in nfs_open() since we did not 1290 * fetch attributes from the LOOKUP reply. 1291 */ 1292 mtx_lock(&np->n_mtx); 1293 np->n_attrstamp = 0; 1294 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp); 1295 mtx_unlock(&np->n_mtx); 1296 } 1297 } 1298 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 1299 cnp->cn_flags |= SAVENAME; 1300 if ((cnp->cn_flags & MAKEENTRY) && 1301 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) && 1302 attrflag != 0 && (newvp->v_type != VDIR || dattrflag != 0)) 1303 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, 1304 newvp->v_type != VDIR ? NULL : &dnfsva.na_ctime); 1305 *vpp = newvp; 1306 return (0); 1307} 1308 1309/* 1310 * nfs read call. 1311 * Just call ncl_bioread() to do the work. 1312 */ 1313static int 1314nfs_read(struct vop_read_args *ap) 1315{ 1316 struct vnode *vp = ap->a_vp; 1317 1318 switch (vp->v_type) { 1319 case VREG: 1320 return (ncl_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred)); 1321 case VDIR: 1322 return (EISDIR); 1323 default: 1324 return (EOPNOTSUPP); 1325 } 1326} 1327 1328/* 1329 * nfs readlink call 1330 */ 1331static int 1332nfs_readlink(struct vop_readlink_args *ap) 1333{ 1334 struct vnode *vp = ap->a_vp; 1335 1336 if (vp->v_type != VLNK) 1337 return (EINVAL); 1338 return (ncl_bioread(vp, ap->a_uio, 0, ap->a_cred)); 1339} 1340 1341/* 1342 * Do a readlink rpc. 1343 * Called by ncl_doio() from below the buffer cache. 1344 */ 1345int 1346ncl_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred) 1347{ 1348 int error, ret, attrflag; 1349 struct nfsvattr nfsva; 1350 1351 error = nfsrpc_readlink(vp, uiop, cred, uiop->uio_td, &nfsva, 1352 &attrflag, NULL); 1353 if (attrflag) { 1354 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 1355 if (ret && !error) 1356 error = ret; 1357 } 1358 if (error && NFS_ISV4(vp)) 1359 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); 1360 return (error); 1361} 1362 1363/* 1364 * nfs read rpc call 1365 * Ditto above 1366 */ 1367int 1368ncl_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred) 1369{ 1370 int error, ret, attrflag; 1371 struct nfsvattr nfsva; 1372 struct nfsmount *nmp; 1373 1374 nmp = VFSTONFS(vnode_mount(vp)); 1375 error = EIO; 1376 attrflag = 0; 1377 if (NFSHASPNFS(nmp)) 1378 error = nfscl_doiods(vp, uiop, NULL, NULL, 1379 NFSV4OPEN_ACCESSREAD, cred, uiop->uio_td); 1380 NFSCL_DEBUG(4, "readrpc: aft doiods=%d\n", error); 1381 if (error != 0) 1382 error = nfsrpc_read(vp, uiop, cred, uiop->uio_td, &nfsva, 1383 &attrflag, NULL); 1384 if (attrflag) { 1385 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 1386 if (ret && !error) 1387 error = ret; 1388 } 1389 if (error && NFS_ISV4(vp)) 1390 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); 1391 return (error); 1392} 1393 1394/* 1395 * nfs write call 1396 */ 1397int 1398ncl_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 1399 int *iomode, int *must_commit, int called_from_strategy) 1400{ 1401 struct nfsvattr nfsva; 1402 int error, attrflag, ret; 1403 struct nfsmount *nmp; 1404 1405 nmp = VFSTONFS(vnode_mount(vp)); 1406 error = EIO; 1407 attrflag = 0; 1408 if (NFSHASPNFS(nmp)) 1409 error = nfscl_doiods(vp, uiop, iomode, must_commit, 1410 NFSV4OPEN_ACCESSWRITE, cred, uiop->uio_td); 1411 NFSCL_DEBUG(4, "writerpc: aft doiods=%d\n", error); 1412 if (error != 0) 1413 error = nfsrpc_write(vp, uiop, iomode, must_commit, cred, 1414 uiop->uio_td, &nfsva, &attrflag, NULL, 1415 called_from_strategy); 1416 if (attrflag) { 1417 if (VTONFS(vp)->n_flag & ND_NFSV4) 1418 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 1, 1419 1); 1420 else 1421 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1422 1); 1423 if (ret && !error) 1424 error = ret; 1425 } 1426 if (DOINGASYNC(vp)) 1427 *iomode = NFSWRITE_FILESYNC; 1428 if (error && NFS_ISV4(vp)) 1429 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); 1430 return (error); 1431} 1432 1433/* 1434 * nfs mknod rpc 1435 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1436 * mode set to specify the file type and the size field for rdev. 1437 */ 1438static int 1439nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1440 struct vattr *vap) 1441{ 1442 struct nfsvattr nfsva, dnfsva; 1443 struct vnode *newvp = NULL; 1444 struct nfsnode *np = NULL, *dnp; 1445 struct nfsfh *nfhp; 1446 struct vattr vattr; 1447 int error = 0, attrflag, dattrflag; 1448 u_int32_t rdev; 1449 1450 if (vap->va_type == VCHR || vap->va_type == VBLK) 1451 rdev = vap->va_rdev; 1452 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1453 rdev = 0xffffffff; 1454 else 1455 return (EOPNOTSUPP); 1456 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred))) 1457 return (error); 1458 error = nfsrpc_mknod(dvp, cnp->cn_nameptr, cnp->cn_namelen, vap, 1459 rdev, vap->va_type, cnp->cn_cred, cnp->cn_thread, &dnfsva, 1460 &nfsva, &nfhp, &attrflag, &dattrflag, NULL); 1461 if (!error) { 1462 if (!nfhp) 1463 (void) nfsrpc_lookup(dvp, cnp->cn_nameptr, 1464 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, 1465 &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 1466 NULL); 1467 if (nfhp) 1468 error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, 1469 cnp->cn_thread, &np, NULL, LK_EXCLUSIVE); 1470 } 1471 if (dattrflag) 1472 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1473 if (!error) { 1474 newvp = NFSTOV(np); 1475 if (attrflag != 0) { 1476 error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1477 0, 1); 1478 if (error != 0) 1479 vput(newvp); 1480 } 1481 } 1482 if (!error) { 1483 *vpp = newvp; 1484 } else if (NFS_ISV4(dvp)) { 1485 error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid, 1486 vap->va_gid); 1487 } 1488 dnp = VTONFS(dvp); 1489 mtx_lock(&dnp->n_mtx); 1490 dnp->n_flag |= NMODIFIED; 1491 if (!dattrflag) { 1492 dnp->n_attrstamp = 0; 1493 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 1494 } 1495 mtx_unlock(&dnp->n_mtx); 1496 return (error); 1497} 1498 1499/* 1500 * nfs mknod vop 1501 * just call nfs_mknodrpc() to do the work. 1502 */ 1503/* ARGSUSED */ 1504static int 1505nfs_mknod(struct vop_mknod_args *ap) 1506{ 1507 return (nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap)); 1508} 1509 1510static struct mtx nfs_cverf_mtx; 1511MTX_SYSINIT(nfs_cverf_mtx, &nfs_cverf_mtx, "NFS create verifier mutex", 1512 MTX_DEF); 1513 1514static nfsquad_t 1515nfs_get_cverf(void) 1516{ 1517 static nfsquad_t cverf; 1518 nfsquad_t ret; 1519 static int cverf_initialized = 0; 1520 1521 mtx_lock(&nfs_cverf_mtx); 1522 if (cverf_initialized == 0) { 1523 cverf.lval[0] = arc4random(); 1524 cverf.lval[1] = arc4random(); 1525 cverf_initialized = 1; 1526 } else 1527 cverf.qval++; 1528 ret = cverf; 1529 mtx_unlock(&nfs_cverf_mtx); 1530 1531 return (ret); 1532} 1533 1534/* 1535 * nfs file create call 1536 */ 1537static int 1538nfs_create(struct vop_create_args *ap) 1539{ 1540 struct vnode *dvp = ap->a_dvp; 1541 struct vattr *vap = ap->a_vap; 1542 struct componentname *cnp = ap->a_cnp; 1543 struct nfsnode *np = NULL, *dnp; 1544 struct vnode *newvp = NULL; 1545 struct nfsmount *nmp; 1546 struct nfsvattr dnfsva, nfsva; 1547 struct nfsfh *nfhp; 1548 nfsquad_t cverf; 1549 int error = 0, attrflag, dattrflag, fmode = 0; 1550 struct vattr vattr; 1551 1552 /* 1553 * Oops, not for me.. 1554 */ 1555 if (vap->va_type == VSOCK) 1556 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1557 1558 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred))) 1559 return (error); 1560 if (vap->va_vaflags & VA_EXCLUSIVE) 1561 fmode |= O_EXCL; 1562 dnp = VTONFS(dvp); 1563 nmp = VFSTONFS(vnode_mount(dvp)); 1564again: 1565 /* For NFSv4, wait until any remove is done. */ 1566 mtx_lock(&dnp->n_mtx); 1567 while (NFSHASNFSV4(nmp) && (dnp->n_flag & NREMOVEINPROG)) { 1568 dnp->n_flag |= NREMOVEWANT; 1569 (void) msleep((caddr_t)dnp, &dnp->n_mtx, PZERO, "nfscrt", 0); 1570 } 1571 mtx_unlock(&dnp->n_mtx); 1572 1573 cverf = nfs_get_cverf(); 1574 error = nfsrpc_create(dvp, cnp->cn_nameptr, cnp->cn_namelen, 1575 vap, cverf, fmode, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, 1576 &nfhp, &attrflag, &dattrflag, NULL); 1577 if (!error) { 1578 if (nfhp == NULL) 1579 (void) nfsrpc_lookup(dvp, cnp->cn_nameptr, 1580 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, 1581 &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 1582 NULL); 1583 if (nfhp != NULL) 1584 error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, 1585 cnp->cn_thread, &np, NULL, LK_EXCLUSIVE); 1586 } 1587 if (dattrflag) 1588 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1589 if (!error) { 1590 newvp = NFSTOV(np); 1591 if (attrflag == 0) 1592 error = nfsrpc_getattr(newvp, cnp->cn_cred, 1593 cnp->cn_thread, &nfsva, NULL); 1594 if (error == 0) 1595 error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1596 0, 1); 1597 } 1598 if (error) { 1599 if (newvp != NULL) { 1600 vput(newvp); 1601 newvp = NULL; 1602 } 1603 if (NFS_ISV34(dvp) && (fmode & O_EXCL) && 1604 error == NFSERR_NOTSUPP) { 1605 fmode &= ~O_EXCL; 1606 goto again; 1607 } 1608 } else if (NFS_ISV34(dvp) && (fmode & O_EXCL)) { 1609 if (nfscl_checksattr(vap, &nfsva)) { 1610 error = nfsrpc_setattr(newvp, vap, NULL, cnp->cn_cred, 1611 cnp->cn_thread, &nfsva, &attrflag, NULL); 1612 if (error && (vap->va_uid != (uid_t)VNOVAL || 1613 vap->va_gid != (gid_t)VNOVAL)) { 1614 /* try again without setting uid/gid */ 1615 vap->va_uid = (uid_t)VNOVAL; 1616 vap->va_gid = (uid_t)VNOVAL; 1617 error = nfsrpc_setattr(newvp, vap, NULL, 1618 cnp->cn_cred, cnp->cn_thread, &nfsva, 1619 &attrflag, NULL); 1620 } 1621 if (attrflag) 1622 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, 1623 NULL, 0, 1); 1624 if (error != 0) 1625 vput(newvp); 1626 } 1627 } 1628 if (!error) { 1629 if ((cnp->cn_flags & MAKEENTRY) && attrflag) 1630 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, 1631 NULL); 1632 *ap->a_vpp = newvp; 1633 } else if (NFS_ISV4(dvp)) { 1634 error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid, 1635 vap->va_gid); 1636 } 1637 mtx_lock(&dnp->n_mtx); 1638 dnp->n_flag |= NMODIFIED; 1639 if (!dattrflag) { 1640 dnp->n_attrstamp = 0; 1641 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 1642 } 1643 mtx_unlock(&dnp->n_mtx); 1644 return (error); 1645} 1646 1647/* 1648 * nfs file remove call 1649 * To try and make nfs semantics closer to ufs semantics, a file that has 1650 * other processes using the vnode is renamed instead of removed and then 1651 * removed later on the last close. 1652 * - If v_usecount > 1 1653 * If a rename is not already in the works 1654 * call nfs_sillyrename() to set it up 1655 * else 1656 * do the remove rpc 1657 */ 1658static int 1659nfs_remove(struct vop_remove_args *ap) 1660{ 1661 struct vnode *vp = ap->a_vp; 1662 struct vnode *dvp = ap->a_dvp; 1663 struct componentname *cnp = ap->a_cnp; 1664 struct nfsnode *np = VTONFS(vp); 1665 int error = 0; 1666 struct vattr vattr; 1667 1668 KASSERT((cnp->cn_flags & HASBUF) != 0, ("nfs_remove: no name")); 1669 KASSERT(vrefcnt(vp) > 0, ("nfs_remove: bad v_usecount")); 1670 if (vp->v_type == VDIR) 1671 error = EPERM; 1672 else if (vrefcnt(vp) == 1 || (np->n_sillyrename && 1673 VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 && 1674 vattr.va_nlink > 1)) { 1675 /* 1676 * Purge the name cache so that the chance of a lookup for 1677 * the name succeeding while the remove is in progress is 1678 * minimized. Without node locking it can still happen, such 1679 * that an I/O op returns ESTALE, but since you get this if 1680 * another host removes the file.. 1681 */ 1682 cache_purge(vp); 1683 /* 1684 * throw away biocache buffers, mainly to avoid 1685 * unnecessary delayed writes later. 1686 */ 1687 error = ncl_vinvalbuf(vp, 0, cnp->cn_thread, 1); 1688 /* Do the rpc */ 1689 if (error != EINTR && error != EIO) 1690 error = nfs_removerpc(dvp, vp, cnp->cn_nameptr, 1691 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread); 1692 /* 1693 * Kludge City: If the first reply to the remove rpc is lost.. 1694 * the reply to the retransmitted request will be ENOENT 1695 * since the file was in fact removed 1696 * Therefore, we cheat and return success. 1697 */ 1698 if (error == ENOENT) 1699 error = 0; 1700 } else if (!np->n_sillyrename) 1701 error = nfs_sillyrename(dvp, vp, cnp); 1702 mtx_lock(&np->n_mtx); 1703 np->n_attrstamp = 0; 1704 mtx_unlock(&np->n_mtx); 1705 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 1706 return (error); 1707} 1708 1709/* 1710 * nfs file remove rpc called from nfs_inactive 1711 */ 1712int 1713ncl_removeit(struct sillyrename *sp, struct vnode *vp) 1714{ 1715 /* 1716 * Make sure that the directory vnode is still valid. 1717 * XXX we should lock sp->s_dvp here. 1718 */ 1719 if (sp->s_dvp->v_type == VBAD) 1720 return (0); 1721 return (nfs_removerpc(sp->s_dvp, vp, sp->s_name, sp->s_namlen, 1722 sp->s_cred, NULL)); 1723} 1724 1725/* 1726 * Nfs remove rpc, called from nfs_remove() and ncl_removeit(). 1727 */ 1728static int 1729nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name, 1730 int namelen, struct ucred *cred, struct thread *td) 1731{ 1732 struct nfsvattr dnfsva; 1733 struct nfsnode *dnp = VTONFS(dvp); 1734 int error = 0, dattrflag; 1735 1736 mtx_lock(&dnp->n_mtx); 1737 dnp->n_flag |= NREMOVEINPROG; 1738 mtx_unlock(&dnp->n_mtx); 1739 error = nfsrpc_remove(dvp, name, namelen, vp, cred, td, &dnfsva, 1740 &dattrflag, NULL); 1741 mtx_lock(&dnp->n_mtx); 1742 if ((dnp->n_flag & NREMOVEWANT)) { 1743 dnp->n_flag &= ~(NREMOVEWANT | NREMOVEINPROG); 1744 mtx_unlock(&dnp->n_mtx); 1745 wakeup((caddr_t)dnp); 1746 } else { 1747 dnp->n_flag &= ~NREMOVEINPROG; 1748 mtx_unlock(&dnp->n_mtx); 1749 } 1750 if (dattrflag) 1751 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1752 mtx_lock(&dnp->n_mtx); 1753 dnp->n_flag |= NMODIFIED; 1754 if (!dattrflag) { 1755 dnp->n_attrstamp = 0; 1756 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 1757 } 1758 mtx_unlock(&dnp->n_mtx); 1759 if (error && NFS_ISV4(dvp)) 1760 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 1761 return (error); 1762} 1763 1764/* 1765 * nfs file rename call 1766 */ 1767static int 1768nfs_rename(struct vop_rename_args *ap) 1769{ 1770 struct vnode *fvp = ap->a_fvp; 1771 struct vnode *tvp = ap->a_tvp; 1772 struct vnode *fdvp = ap->a_fdvp; 1773 struct vnode *tdvp = ap->a_tdvp; 1774 struct componentname *tcnp = ap->a_tcnp; 1775 struct componentname *fcnp = ap->a_fcnp; 1776 struct nfsnode *fnp = VTONFS(ap->a_fvp); 1777 struct nfsnode *tdnp = VTONFS(ap->a_tdvp); 1778 struct nfsv4node *newv4 = NULL; 1779 int error; 1780 1781 KASSERT((tcnp->cn_flags & HASBUF) != 0 && 1782 (fcnp->cn_flags & HASBUF) != 0, ("nfs_rename: no name")); 1783 /* Check for cross-device rename */ 1784 if ((fvp->v_mount != tdvp->v_mount) || 1785 (tvp && (fvp->v_mount != tvp->v_mount))) { 1786 error = EXDEV; 1787 goto out; 1788 } 1789 1790 if (fvp == tvp) { 1791 ncl_printf("nfs_rename: fvp == tvp (can't happen)\n"); 1792 error = 0; 1793 goto out; 1794 } 1795 if ((error = NFSVOPLOCK(fvp, LK_EXCLUSIVE)) != 0) 1796 goto out; 1797 1798 /* 1799 * We have to flush B_DELWRI data prior to renaming 1800 * the file. If we don't, the delayed-write buffers 1801 * can be flushed out later after the file has gone stale 1802 * under NFSV3. NFSV2 does not have this problem because 1803 * ( as far as I can tell ) it flushes dirty buffers more 1804 * often. 1805 * 1806 * Skip the rename operation if the fsync fails, this can happen 1807 * due to the server's volume being full, when we pushed out data 1808 * that was written back to our cache earlier. Not checking for 1809 * this condition can result in potential (silent) data loss. 1810 */ 1811 error = VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_thread); 1812 NFSVOPUNLOCK(fvp, 0); 1813 if (!error && tvp) 1814 error = VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_thread); 1815 if (error) 1816 goto out; 1817 1818 /* 1819 * If the tvp exists and is in use, sillyrename it before doing the 1820 * rename of the new file over it. 1821 * XXX Can't sillyrename a directory. 1822 */ 1823 if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename && 1824 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) { 1825 vput(tvp); 1826 tvp = NULL; 1827 } 1828 1829 error = nfs_renamerpc(fdvp, fvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1830 tdvp, tvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 1831 tcnp->cn_thread); 1832 1833 if (error == 0 && NFS_ISV4(tdvp)) { 1834 /* 1835 * For NFSv4, check to see if it is the same name and 1836 * replace the name, if it is different. 1837 */ 1838 MALLOC(newv4, struct nfsv4node *, 1839 sizeof (struct nfsv4node) + 1840 tdnp->n_fhp->nfh_len + tcnp->cn_namelen - 1, 1841 M_NFSV4NODE, M_WAITOK); 1842 mtx_lock(&tdnp->n_mtx); 1843 mtx_lock(&fnp->n_mtx); 1844 if (fnp->n_v4 != NULL && fvp->v_type == VREG && 1845 (fnp->n_v4->n4_namelen != tcnp->cn_namelen || 1846 NFSBCMP(tcnp->cn_nameptr, NFS4NODENAME(fnp->n_v4), 1847 tcnp->cn_namelen) || 1848 tdnp->n_fhp->nfh_len != fnp->n_v4->n4_fhlen || 1849 NFSBCMP(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data, 1850 tdnp->n_fhp->nfh_len))) { 1851#ifdef notdef 1852{ char nnn[100]; int nnnl; 1853nnnl = (tcnp->cn_namelen < 100) ? tcnp->cn_namelen : 99; 1854bcopy(tcnp->cn_nameptr, nnn, nnnl); 1855nnn[nnnl] = '\0'; 1856printf("ren replace=%s\n",nnn); 1857} 1858#endif 1859 FREE((caddr_t)fnp->n_v4, M_NFSV4NODE); 1860 fnp->n_v4 = newv4; 1861 newv4 = NULL; 1862 fnp->n_v4->n4_fhlen = tdnp->n_fhp->nfh_len; 1863 fnp->n_v4->n4_namelen = tcnp->cn_namelen; 1864 NFSBCOPY(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data, 1865 tdnp->n_fhp->nfh_len); 1866 NFSBCOPY(tcnp->cn_nameptr, 1867 NFS4NODENAME(fnp->n_v4), tcnp->cn_namelen); 1868 } 1869 mtx_unlock(&tdnp->n_mtx); 1870 mtx_unlock(&fnp->n_mtx); 1871 if (newv4 != NULL) 1872 FREE((caddr_t)newv4, M_NFSV4NODE); 1873 } 1874 1875 if (fvp->v_type == VDIR) { 1876 if (tvp != NULL && tvp->v_type == VDIR) 1877 cache_purge(tdvp); 1878 cache_purge(fdvp); 1879 } 1880 1881out: 1882 if (tdvp == tvp) 1883 vrele(tdvp); 1884 else 1885 vput(tdvp); 1886 if (tvp) 1887 vput(tvp); 1888 vrele(fdvp); 1889 vrele(fvp); 1890 /* 1891 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 1892 */ 1893 if (error == ENOENT) 1894 error = 0; 1895 return (error); 1896} 1897 1898/* 1899 * nfs file rename rpc called from nfs_remove() above 1900 */ 1901static int 1902nfs_renameit(struct vnode *sdvp, struct vnode *svp, struct componentname *scnp, 1903 struct sillyrename *sp) 1904{ 1905 1906 return (nfs_renamerpc(sdvp, svp, scnp->cn_nameptr, scnp->cn_namelen, 1907 sdvp, NULL, sp->s_name, sp->s_namlen, scnp->cn_cred, 1908 scnp->cn_thread)); 1909} 1910 1911/* 1912 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 1913 */ 1914static int 1915nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, char *fnameptr, 1916 int fnamelen, struct vnode *tdvp, struct vnode *tvp, char *tnameptr, 1917 int tnamelen, struct ucred *cred, struct thread *td) 1918{ 1919 struct nfsvattr fnfsva, tnfsva; 1920 struct nfsnode *fdnp = VTONFS(fdvp); 1921 struct nfsnode *tdnp = VTONFS(tdvp); 1922 int error = 0, fattrflag, tattrflag; 1923 1924 error = nfsrpc_rename(fdvp, fvp, fnameptr, fnamelen, tdvp, tvp, 1925 tnameptr, tnamelen, cred, td, &fnfsva, &tnfsva, &fattrflag, 1926 &tattrflag, NULL, NULL); 1927 mtx_lock(&fdnp->n_mtx); 1928 fdnp->n_flag |= NMODIFIED; 1929 if (fattrflag != 0) { 1930 mtx_unlock(&fdnp->n_mtx); 1931 (void) nfscl_loadattrcache(&fdvp, &fnfsva, NULL, NULL, 0, 1); 1932 } else { 1933 fdnp->n_attrstamp = 0; 1934 mtx_unlock(&fdnp->n_mtx); 1935 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(fdvp); 1936 } 1937 mtx_lock(&tdnp->n_mtx); 1938 tdnp->n_flag |= NMODIFIED; 1939 if (tattrflag != 0) { 1940 mtx_unlock(&tdnp->n_mtx); 1941 (void) nfscl_loadattrcache(&tdvp, &tnfsva, NULL, NULL, 0, 1); 1942 } else { 1943 tdnp->n_attrstamp = 0; 1944 mtx_unlock(&tdnp->n_mtx); 1945 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp); 1946 } 1947 if (error && NFS_ISV4(fdvp)) 1948 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 1949 return (error); 1950} 1951 1952/* 1953 * nfs hard link create call 1954 */ 1955static int 1956nfs_link(struct vop_link_args *ap) 1957{ 1958 struct vnode *vp = ap->a_vp; 1959 struct vnode *tdvp = ap->a_tdvp; 1960 struct componentname *cnp = ap->a_cnp; 1961 struct nfsnode *np, *tdnp; 1962 struct nfsvattr nfsva, dnfsva; 1963 int error = 0, attrflag, dattrflag; 1964 1965 /* 1966 * Push all writes to the server, so that the attribute cache 1967 * doesn't get "out of sync" with the server. 1968 * XXX There should be a better way! 1969 */ 1970 VOP_FSYNC(vp, MNT_WAIT, cnp->cn_thread); 1971 1972 error = nfsrpc_link(tdvp, vp, cnp->cn_nameptr, cnp->cn_namelen, 1973 cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &attrflag, 1974 &dattrflag, NULL); 1975 tdnp = VTONFS(tdvp); 1976 mtx_lock(&tdnp->n_mtx); 1977 tdnp->n_flag |= NMODIFIED; 1978 if (dattrflag != 0) { 1979 mtx_unlock(&tdnp->n_mtx); 1980 (void) nfscl_loadattrcache(&tdvp, &dnfsva, NULL, NULL, 0, 1); 1981 } else { 1982 tdnp->n_attrstamp = 0; 1983 mtx_unlock(&tdnp->n_mtx); 1984 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp); 1985 } 1986 if (attrflag) 1987 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 1988 else { 1989 np = VTONFS(vp); 1990 mtx_lock(&np->n_mtx); 1991 np->n_attrstamp = 0; 1992 mtx_unlock(&np->n_mtx); 1993 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 1994 } 1995 /* 1996 * If negative lookup caching is enabled, I might as well 1997 * add an entry for this node. Not necessary for correctness, 1998 * but if negative caching is enabled, then the system 1999 * must care about lookup caching hit rate, so... 2000 */ 2001 if (VFSTONFS(vp->v_mount)->nm_negnametimeo != 0 && 2002 (cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) { 2003 cache_enter_time(tdvp, vp, cnp, &nfsva.na_ctime, NULL); 2004 } 2005 if (error && NFS_ISV4(vp)) 2006 error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0, 2007 (gid_t)0); 2008 return (error); 2009} 2010 2011/* 2012 * nfs symbolic link create call 2013 */ 2014static int 2015nfs_symlink(struct vop_symlink_args *ap) 2016{ 2017 struct vnode *dvp = ap->a_dvp; 2018 struct vattr *vap = ap->a_vap; 2019 struct componentname *cnp = ap->a_cnp; 2020 struct nfsvattr nfsva, dnfsva; 2021 struct nfsfh *nfhp; 2022 struct nfsnode *np = NULL, *dnp; 2023 struct vnode *newvp = NULL; 2024 int error = 0, attrflag, dattrflag, ret; 2025 2026 vap->va_type = VLNK; 2027 error = nfsrpc_symlink(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2028 ap->a_target, vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, 2029 &nfsva, &nfhp, &attrflag, &dattrflag, NULL); 2030 if (nfhp) { 2031 ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread, 2032 &np, NULL, LK_EXCLUSIVE); 2033 if (!ret) 2034 newvp = NFSTOV(np); 2035 else if (!error) 2036 error = ret; 2037 } 2038 if (newvp != NULL) { 2039 if (attrflag) 2040 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 2041 0, 1); 2042 } else if (!error) { 2043 /* 2044 * If we do not have an error and we could not extract the 2045 * newvp from the response due to the request being NFSv2, we 2046 * have to do a lookup in order to obtain a newvp to return. 2047 */ 2048 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2049 cnp->cn_cred, cnp->cn_thread, &np); 2050 if (!error) 2051 newvp = NFSTOV(np); 2052 } 2053 if (error) { 2054 if (newvp) 2055 vput(newvp); 2056 if (NFS_ISV4(dvp)) 2057 error = nfscl_maperr(cnp->cn_thread, error, 2058 vap->va_uid, vap->va_gid); 2059 } else { 2060 *ap->a_vpp = newvp; 2061 } 2062 2063 dnp = VTONFS(dvp); 2064 mtx_lock(&dnp->n_mtx); 2065 dnp->n_flag |= NMODIFIED; 2066 if (dattrflag != 0) { 2067 mtx_unlock(&dnp->n_mtx); 2068 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2069 } else { 2070 dnp->n_attrstamp = 0; 2071 mtx_unlock(&dnp->n_mtx); 2072 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 2073 } 2074 /* 2075 * If negative lookup caching is enabled, I might as well 2076 * add an entry for this node. Not necessary for correctness, 2077 * but if negative caching is enabled, then the system 2078 * must care about lookup caching hit rate, so... 2079 */ 2080 if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 && 2081 (cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) { 2082 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, NULL); 2083 } 2084 return (error); 2085} 2086 2087/* 2088 * nfs make dir call 2089 */ 2090static int 2091nfs_mkdir(struct vop_mkdir_args *ap) 2092{ 2093 struct vnode *dvp = ap->a_dvp; 2094 struct vattr *vap = ap->a_vap; 2095 struct componentname *cnp = ap->a_cnp; 2096 struct nfsnode *np = NULL, *dnp; 2097 struct vnode *newvp = NULL; 2098 struct vattr vattr; 2099 struct nfsfh *nfhp; 2100 struct nfsvattr nfsva, dnfsva; 2101 int error = 0, attrflag, dattrflag, ret; 2102 2103 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0) 2104 return (error); 2105 vap->va_type = VDIR; 2106 error = nfsrpc_mkdir(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2107 vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &nfhp, 2108 &attrflag, &dattrflag, NULL); 2109 dnp = VTONFS(dvp); 2110 mtx_lock(&dnp->n_mtx); 2111 dnp->n_flag |= NMODIFIED; 2112 if (dattrflag != 0) { 2113 mtx_unlock(&dnp->n_mtx); 2114 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2115 } else { 2116 dnp->n_attrstamp = 0; 2117 mtx_unlock(&dnp->n_mtx); 2118 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 2119 } 2120 if (nfhp) { 2121 ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread, 2122 &np, NULL, LK_EXCLUSIVE); 2123 if (!ret) { 2124 newvp = NFSTOV(np); 2125 if (attrflag) 2126 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, 2127 NULL, 0, 1); 2128 } else if (!error) 2129 error = ret; 2130 } 2131 if (!error && newvp == NULL) { 2132 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2133 cnp->cn_cred, cnp->cn_thread, &np); 2134 if (!error) { 2135 newvp = NFSTOV(np); 2136 if (newvp->v_type != VDIR) 2137 error = EEXIST; 2138 } 2139 } 2140 if (error) { 2141 if (newvp) 2142 vput(newvp); 2143 if (NFS_ISV4(dvp)) 2144 error = nfscl_maperr(cnp->cn_thread, error, 2145 vap->va_uid, vap->va_gid); 2146 } else { 2147 /* 2148 * If negative lookup caching is enabled, I might as well 2149 * add an entry for this node. Not necessary for correctness, 2150 * but if negative caching is enabled, then the system 2151 * must care about lookup caching hit rate, so... 2152 */ 2153 if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 && 2154 (cnp->cn_flags & MAKEENTRY) && 2155 attrflag != 0 && dattrflag != 0) 2156 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, 2157 &dnfsva.na_ctime); 2158 *ap->a_vpp = newvp; 2159 } 2160 return (error); 2161} 2162 2163/* 2164 * nfs remove directory call 2165 */ 2166static int 2167nfs_rmdir(struct vop_rmdir_args *ap) 2168{ 2169 struct vnode *vp = ap->a_vp; 2170 struct vnode *dvp = ap->a_dvp; 2171 struct componentname *cnp = ap->a_cnp; 2172 struct nfsnode *dnp; 2173 struct nfsvattr dnfsva; 2174 int error, dattrflag; 2175 2176 if (dvp == vp) 2177 return (EINVAL); 2178 error = nfsrpc_rmdir(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2179 cnp->cn_cred, cnp->cn_thread, &dnfsva, &dattrflag, NULL); 2180 dnp = VTONFS(dvp); 2181 mtx_lock(&dnp->n_mtx); 2182 dnp->n_flag |= NMODIFIED; 2183 if (dattrflag != 0) { 2184 mtx_unlock(&dnp->n_mtx); 2185 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2186 } else { 2187 dnp->n_attrstamp = 0; 2188 mtx_unlock(&dnp->n_mtx); 2189 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 2190 } 2191 2192 cache_purge(dvp); 2193 cache_purge(vp); 2194 if (error && NFS_ISV4(dvp)) 2195 error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0, 2196 (gid_t)0); 2197 /* 2198 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 2199 */ 2200 if (error == ENOENT) 2201 error = 0; 2202 return (error); 2203} 2204 2205/* 2206 * nfs readdir call 2207 */ 2208static int 2209nfs_readdir(struct vop_readdir_args *ap) 2210{ 2211 struct vnode *vp = ap->a_vp; 2212 struct nfsnode *np = VTONFS(vp); 2213 struct uio *uio = ap->a_uio; 2214 ssize_t tresid; 2215 int error = 0; 2216 struct vattr vattr; 2217 2218 if (ap->a_eofflag != NULL) 2219 *ap->a_eofflag = 0; 2220 if (vp->v_type != VDIR) 2221 return(EPERM); 2222 2223 /* 2224 * First, check for hit on the EOF offset cache 2225 */ 2226 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset && 2227 (np->n_flag & NMODIFIED) == 0) { 2228 if (VOP_GETATTR(vp, &vattr, ap->a_cred) == 0) { 2229 mtx_lock(&np->n_mtx); 2230 if ((NFS_ISV4(vp) && np->n_change == vattr.va_filerev) || 2231 !NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) { 2232 mtx_unlock(&np->n_mtx); 2233 NFSINCRGLOBAL(newnfsstats.direofcache_hits); 2234 if (ap->a_eofflag != NULL) 2235 *ap->a_eofflag = 1; 2236 return (0); 2237 } else 2238 mtx_unlock(&np->n_mtx); 2239 } 2240 } 2241 2242 /* 2243 * Call ncl_bioread() to do the real work. 2244 */ 2245 tresid = uio->uio_resid; 2246 error = ncl_bioread(vp, uio, 0, ap->a_cred); 2247 2248 if (!error && uio->uio_resid == tresid) { 2249 NFSINCRGLOBAL(newnfsstats.direofcache_misses); 2250 if (ap->a_eofflag != NULL) 2251 *ap->a_eofflag = 1; 2252 } 2253 return (error); 2254} 2255 2256/* 2257 * Readdir rpc call. 2258 * Called from below the buffer cache by ncl_doio(). 2259 */ 2260int 2261ncl_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2262 struct thread *td) 2263{ 2264 struct nfsvattr nfsva; 2265 nfsuint64 *cookiep, cookie; 2266 struct nfsnode *dnp = VTONFS(vp); 2267 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2268 int error = 0, eof, attrflag; 2269 2270 KASSERT(uiop->uio_iovcnt == 1 && 2271 (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 && 2272 (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0, 2273 ("nfs readdirrpc bad uio")); 2274 2275 /* 2276 * If there is no cookie, assume directory was stale. 2277 */ 2278 ncl_dircookie_lock(dnp); 2279 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0); 2280 if (cookiep) { 2281 cookie = *cookiep; 2282 ncl_dircookie_unlock(dnp); 2283 } else { 2284 ncl_dircookie_unlock(dnp); 2285 return (NFSERR_BAD_COOKIE); 2286 } 2287 2288 if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp)) 2289 (void)ncl_fsinfo(nmp, vp, cred, td); 2290 2291 error = nfsrpc_readdir(vp, uiop, &cookie, cred, td, &nfsva, 2292 &attrflag, &eof, NULL); 2293 if (attrflag) 2294 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 2295 2296 if (!error) { 2297 /* 2298 * We are now either at the end of the directory or have filled 2299 * the block. 2300 */ 2301 if (eof) 2302 dnp->n_direofoffset = uiop->uio_offset; 2303 else { 2304 if (uiop->uio_resid > 0) 2305 ncl_printf("EEK! readdirrpc resid > 0\n"); 2306 ncl_dircookie_lock(dnp); 2307 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1); 2308 *cookiep = cookie; 2309 ncl_dircookie_unlock(dnp); 2310 } 2311 } else if (NFS_ISV4(vp)) { 2312 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2313 } 2314 return (error); 2315} 2316 2317/* 2318 * NFS V3 readdir plus RPC. Used in place of ncl_readdirrpc(). 2319 */ 2320int 2321ncl_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2322 struct thread *td) 2323{ 2324 struct nfsvattr nfsva; 2325 nfsuint64 *cookiep, cookie; 2326 struct nfsnode *dnp = VTONFS(vp); 2327 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2328 int error = 0, attrflag, eof; 2329 2330 KASSERT(uiop->uio_iovcnt == 1 && 2331 (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 && 2332 (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0, 2333 ("nfs readdirplusrpc bad uio")); 2334 2335 /* 2336 * If there is no cookie, assume directory was stale. 2337 */ 2338 ncl_dircookie_lock(dnp); 2339 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0); 2340 if (cookiep) { 2341 cookie = *cookiep; 2342 ncl_dircookie_unlock(dnp); 2343 } else { 2344 ncl_dircookie_unlock(dnp); 2345 return (NFSERR_BAD_COOKIE); 2346 } 2347 2348 if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp)) 2349 (void)ncl_fsinfo(nmp, vp, cred, td); 2350 error = nfsrpc_readdirplus(vp, uiop, &cookie, cred, td, &nfsva, 2351 &attrflag, &eof, NULL); 2352 if (attrflag) 2353 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 2354 2355 if (!error) { 2356 /* 2357 * We are now either at end of the directory or have filled the 2358 * the block. 2359 */ 2360 if (eof) 2361 dnp->n_direofoffset = uiop->uio_offset; 2362 else { 2363 if (uiop->uio_resid > 0) 2364 ncl_printf("EEK! readdirplusrpc resid > 0\n"); 2365 ncl_dircookie_lock(dnp); 2366 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1); 2367 *cookiep = cookie; 2368 ncl_dircookie_unlock(dnp); 2369 } 2370 } else if (NFS_ISV4(vp)) { 2371 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2372 } 2373 return (error); 2374} 2375 2376/* 2377 * Silly rename. To make the NFS filesystem that is stateless look a little 2378 * more like the "ufs" a remove of an active vnode is translated to a rename 2379 * to a funny looking filename that is removed by nfs_inactive on the 2380 * nfsnode. There is the potential for another process on a different client 2381 * to create the same funny name between the nfs_lookitup() fails and the 2382 * nfs_rename() completes, but... 2383 */ 2384static int 2385nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 2386{ 2387 struct sillyrename *sp; 2388 struct nfsnode *np; 2389 int error; 2390 short pid; 2391 unsigned int lticks; 2392 2393 cache_purge(dvp); 2394 np = VTONFS(vp); 2395 KASSERT(vp->v_type != VDIR, ("nfs: sillyrename dir")); 2396 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename), 2397 M_NEWNFSREQ, M_WAITOK); 2398 sp->s_cred = crhold(cnp->cn_cred); 2399 sp->s_dvp = dvp; 2400 VREF(dvp); 2401 2402 /* 2403 * Fudge together a funny name. 2404 * Changing the format of the funny name to accomodate more 2405 * sillynames per directory. 2406 * The name is now changed to .nfs.<ticks>.<pid>.4, where ticks is 2407 * CPU ticks since boot. 2408 */ 2409 pid = cnp->cn_thread->td_proc->p_pid; 2410 lticks = (unsigned int)ticks; 2411 for ( ; ; ) { 2412 sp->s_namlen = sprintf(sp->s_name, 2413 ".nfs.%08x.%04x4.4", lticks, 2414 pid); 2415 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2416 cnp->cn_thread, NULL)) 2417 break; 2418 lticks++; 2419 } 2420 error = nfs_renameit(dvp, vp, cnp, sp); 2421 if (error) 2422 goto bad; 2423 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2424 cnp->cn_thread, &np); 2425 np->n_sillyrename = sp; 2426 return (0); 2427bad: 2428 vrele(sp->s_dvp); 2429 crfree(sp->s_cred); 2430 free((caddr_t)sp, M_NEWNFSREQ); 2431 return (error); 2432} 2433 2434/* 2435 * Look up a file name and optionally either update the file handle or 2436 * allocate an nfsnode, depending on the value of npp. 2437 * npp == NULL --> just do the lookup 2438 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 2439 * handled too 2440 * *npp != NULL --> update the file handle in the vnode 2441 */ 2442static int 2443nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred, 2444 struct thread *td, struct nfsnode **npp) 2445{ 2446 struct vnode *newvp = NULL, *vp; 2447 struct nfsnode *np, *dnp = VTONFS(dvp); 2448 struct nfsfh *nfhp, *onfhp; 2449 struct nfsvattr nfsva, dnfsva; 2450 struct componentname cn; 2451 int error = 0, attrflag, dattrflag; 2452 u_int hash; 2453 2454 error = nfsrpc_lookup(dvp, name, len, cred, td, &dnfsva, &nfsva, 2455 &nfhp, &attrflag, &dattrflag, NULL); 2456 if (dattrflag) 2457 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2458 if (npp && !error) { 2459 if (*npp != NULL) { 2460 np = *npp; 2461 vp = NFSTOV(np); 2462 /* 2463 * For NFSv4, check to see if it is the same name and 2464 * replace the name, if it is different. 2465 */ 2466 if (np->n_v4 != NULL && nfsva.na_type == VREG && 2467 (np->n_v4->n4_namelen != len || 2468 NFSBCMP(name, NFS4NODENAME(np->n_v4), len) || 2469 dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen || 2470 NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, 2471 dnp->n_fhp->nfh_len))) { 2472#ifdef notdef 2473{ char nnn[100]; int nnnl; 2474nnnl = (len < 100) ? len : 99; 2475bcopy(name, nnn, nnnl); 2476nnn[nnnl] = '\0'; 2477printf("replace=%s\n",nnn); 2478} 2479#endif 2480 FREE((caddr_t)np->n_v4, M_NFSV4NODE); 2481 MALLOC(np->n_v4, struct nfsv4node *, 2482 sizeof (struct nfsv4node) + 2483 dnp->n_fhp->nfh_len + len - 1, 2484 M_NFSV4NODE, M_WAITOK); 2485 np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len; 2486 np->n_v4->n4_namelen = len; 2487 NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, 2488 dnp->n_fhp->nfh_len); 2489 NFSBCOPY(name, NFS4NODENAME(np->n_v4), len); 2490 } 2491 hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len, 2492 FNV1_32_INIT); 2493 onfhp = np->n_fhp; 2494 /* 2495 * Rehash node for new file handle. 2496 */ 2497 vfs_hash_rehash(vp, hash); 2498 np->n_fhp = nfhp; 2499 if (onfhp != NULL) 2500 FREE((caddr_t)onfhp, M_NFSFH); 2501 newvp = NFSTOV(np); 2502 } else if (NFS_CMPFH(dnp, nfhp->nfh_fh, nfhp->nfh_len)) { 2503 FREE((caddr_t)nfhp, M_NFSFH); 2504 VREF(dvp); 2505 newvp = dvp; 2506 } else { 2507 cn.cn_nameptr = name; 2508 cn.cn_namelen = len; 2509 error = nfscl_nget(dvp->v_mount, dvp, nfhp, &cn, td, 2510 &np, NULL, LK_EXCLUSIVE); 2511 if (error) 2512 return (error); 2513 newvp = NFSTOV(np); 2514 } 2515 if (!attrflag && *npp == NULL) { 2516 if (newvp == dvp) 2517 vrele(newvp); 2518 else 2519 vput(newvp); 2520 return (ENOENT); 2521 } 2522 if (attrflag) 2523 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 2524 0, 1); 2525 } 2526 if (npp && *npp == NULL) { 2527 if (error) { 2528 if (newvp) { 2529 if (newvp == dvp) 2530 vrele(newvp); 2531 else 2532 vput(newvp); 2533 } 2534 } else 2535 *npp = np; 2536 } 2537 if (error && NFS_ISV4(dvp)) 2538 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2539 return (error); 2540} 2541 2542/* 2543 * Nfs Version 3 and 4 commit rpc 2544 */ 2545int 2546ncl_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred, 2547 struct thread *td) 2548{ 2549 struct nfsvattr nfsva; 2550 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2551 int error, attrflag; 2552 2553 mtx_lock(&nmp->nm_mtx); 2554 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) { 2555 mtx_unlock(&nmp->nm_mtx); 2556 return (0); 2557 } 2558 mtx_unlock(&nmp->nm_mtx); 2559 error = nfsrpc_commit(vp, offset, cnt, cred, td, &nfsva, 2560 &attrflag, NULL); 2561 if (attrflag != 0) 2562 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 2563 0, 1); 2564 if (error != 0 && NFS_ISV4(vp)) 2565 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2566 return (error); 2567} 2568 2569/* 2570 * Strategy routine. 2571 * For async requests when nfsiod(s) are running, queue the request by 2572 * calling ncl_asyncio(), otherwise just all ncl_doio() to do the 2573 * request. 2574 */ 2575static int 2576nfs_strategy(struct vop_strategy_args *ap) 2577{ 2578 struct buf *bp = ap->a_bp; 2579 struct ucred *cr; 2580 2581 KASSERT(!(bp->b_flags & B_DONE), 2582 ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp)); 2583 BUF_ASSERT_HELD(bp); 2584 2585 if (bp->b_iocmd == BIO_READ) 2586 cr = bp->b_rcred; 2587 else 2588 cr = bp->b_wcred; 2589 2590 /* 2591 * If the op is asynchronous and an i/o daemon is waiting 2592 * queue the request, wake it up and wait for completion 2593 * otherwise just do it ourselves. 2594 */ 2595 if ((bp->b_flags & B_ASYNC) == 0 || 2596 ncl_asyncio(VFSTONFS(ap->a_vp->v_mount), bp, NOCRED, curthread)) 2597 (void) ncl_doio(ap->a_vp, bp, cr, curthread, 1); 2598 return (0); 2599} 2600 2601/* 2602 * fsync vnode op. Just call ncl_flush() with commit == 1. 2603 */ 2604/* ARGSUSED */ 2605static int 2606nfs_fsync(struct vop_fsync_args *ap) 2607{ 2608 2609 if (ap->a_vp->v_type != VREG) { 2610 /* 2611 * For NFS, metadata is changed synchronously on the server, 2612 * so there is nothing to flush. Also, ncl_flush() clears 2613 * the NMODIFIED flag and that shouldn't be done here for 2614 * directories. 2615 */ 2616 return (0); 2617 } 2618 return (ncl_flush(ap->a_vp, ap->a_waitfor, NULL, ap->a_td, 1, 0)); 2619} 2620 2621/* 2622 * Flush all the blocks associated with a vnode. 2623 * Walk through the buffer pool and push any dirty pages 2624 * associated with the vnode. 2625 * If the called_from_renewthread argument is TRUE, it has been called 2626 * from the NFSv4 renew thread and, as such, cannot block indefinitely 2627 * waiting for a buffer write to complete. 2628 */ 2629int 2630ncl_flush(struct vnode *vp, int waitfor, struct ucred *cred, struct thread *td, 2631 int commit, int called_from_renewthread) 2632{ 2633 struct nfsnode *np = VTONFS(vp); 2634 struct buf *bp; 2635 int i; 2636 struct buf *nbp; 2637 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2638 int error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos; 2639 int passone = 1, trycnt = 0; 2640 u_quad_t off, endoff, toff; 2641 struct ucred* wcred = NULL; 2642 struct buf **bvec = NULL; 2643 struct bufobj *bo; 2644#ifndef NFS_COMMITBVECSIZ 2645#define NFS_COMMITBVECSIZ 20 2646#endif 2647 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ]; 2648 int bvecsize = 0, bveccount; 2649 2650 if (called_from_renewthread != 0) 2651 slptimeo = hz; 2652 if (nmp->nm_flag & NFSMNT_INT) 2653 slpflag = PCATCH; 2654 if (!commit) 2655 passone = 0; 2656 bo = &vp->v_bufobj; 2657 /* 2658 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the 2659 * server, but has not been committed to stable storage on the server 2660 * yet. On the first pass, the byte range is worked out and the commit 2661 * rpc is done. On the second pass, ncl_writebp() is called to do the 2662 * job. 2663 */ 2664again: 2665 off = (u_quad_t)-1; 2666 endoff = 0; 2667 bvecpos = 0; 2668 if (NFS_ISV34(vp) && commit) { 2669 if (bvec != NULL && bvec != bvec_on_stack) 2670 free(bvec, M_TEMP); 2671 /* 2672 * Count up how many buffers waiting for a commit. 2673 */ 2674 bveccount = 0; 2675 BO_LOCK(bo); 2676 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2677 if (!BUF_ISLOCKED(bp) && 2678 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) 2679 == (B_DELWRI | B_NEEDCOMMIT)) 2680 bveccount++; 2681 } 2682 /* 2683 * Allocate space to remember the list of bufs to commit. It is 2684 * important to use M_NOWAIT here to avoid a race with nfs_write. 2685 * If we can't get memory (for whatever reason), we will end up 2686 * committing the buffers one-by-one in the loop below. 2687 */ 2688 if (bveccount > NFS_COMMITBVECSIZ) { 2689 /* 2690 * Release the vnode interlock to avoid a lock 2691 * order reversal. 2692 */ 2693 BO_UNLOCK(bo); 2694 bvec = (struct buf **) 2695 malloc(bveccount * sizeof(struct buf *), 2696 M_TEMP, M_NOWAIT); 2697 BO_LOCK(bo); 2698 if (bvec == NULL) { 2699 bvec = bvec_on_stack; 2700 bvecsize = NFS_COMMITBVECSIZ; 2701 } else 2702 bvecsize = bveccount; 2703 } else { 2704 bvec = bvec_on_stack; 2705 bvecsize = NFS_COMMITBVECSIZ; 2706 } 2707 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2708 if (bvecpos >= bvecsize) 2709 break; 2710 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { 2711 nbp = TAILQ_NEXT(bp, b_bobufs); 2712 continue; 2713 } 2714 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) != 2715 (B_DELWRI | B_NEEDCOMMIT)) { 2716 BUF_UNLOCK(bp); 2717 nbp = TAILQ_NEXT(bp, b_bobufs); 2718 continue; 2719 } 2720 BO_UNLOCK(bo); 2721 bremfree(bp); 2722 /* 2723 * Work out if all buffers are using the same cred 2724 * so we can deal with them all with one commit. 2725 * 2726 * NOTE: we are not clearing B_DONE here, so we have 2727 * to do it later on in this routine if we intend to 2728 * initiate I/O on the bp. 2729 * 2730 * Note: to avoid loopback deadlocks, we do not 2731 * assign b_runningbufspace. 2732 */ 2733 if (wcred == NULL) 2734 wcred = bp->b_wcred; 2735 else if (wcred != bp->b_wcred) 2736 wcred = NOCRED; 2737 vfs_busy_pages(bp, 1); 2738 2739 BO_LOCK(bo); 2740 /* 2741 * bp is protected by being locked, but nbp is not 2742 * and vfs_busy_pages() may sleep. We have to 2743 * recalculate nbp. 2744 */ 2745 nbp = TAILQ_NEXT(bp, b_bobufs); 2746 2747 /* 2748 * A list of these buffers is kept so that the 2749 * second loop knows which buffers have actually 2750 * been committed. This is necessary, since there 2751 * may be a race between the commit rpc and new 2752 * uncommitted writes on the file. 2753 */ 2754 bvec[bvecpos++] = bp; 2755 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2756 bp->b_dirtyoff; 2757 if (toff < off) 2758 off = toff; 2759 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff); 2760 if (toff > endoff) 2761 endoff = toff; 2762 } 2763 BO_UNLOCK(bo); 2764 } 2765 if (bvecpos > 0) { 2766 /* 2767 * Commit data on the server, as required. 2768 * If all bufs are using the same wcred, then use that with 2769 * one call for all of them, otherwise commit each one 2770 * separately. 2771 */ 2772 if (wcred != NOCRED) 2773 retv = ncl_commit(vp, off, (int)(endoff - off), 2774 wcred, td); 2775 else { 2776 retv = 0; 2777 for (i = 0; i < bvecpos; i++) { 2778 off_t off, size; 2779 bp = bvec[i]; 2780 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2781 bp->b_dirtyoff; 2782 size = (u_quad_t)(bp->b_dirtyend 2783 - bp->b_dirtyoff); 2784 retv = ncl_commit(vp, off, (int)size, 2785 bp->b_wcred, td); 2786 if (retv) break; 2787 } 2788 } 2789 2790 if (retv == NFSERR_STALEWRITEVERF) 2791 ncl_clearcommit(vp->v_mount); 2792 2793 /* 2794 * Now, either mark the blocks I/O done or mark the 2795 * blocks dirty, depending on whether the commit 2796 * succeeded. 2797 */ 2798 for (i = 0; i < bvecpos; i++) { 2799 bp = bvec[i]; 2800 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 2801 if (retv) { 2802 /* 2803 * Error, leave B_DELWRI intact 2804 */ 2805 vfs_unbusy_pages(bp); 2806 brelse(bp); 2807 } else { 2808 /* 2809 * Success, remove B_DELWRI ( bundirty() ). 2810 * 2811 * b_dirtyoff/b_dirtyend seem to be NFS 2812 * specific. We should probably move that 2813 * into bundirty(). XXX 2814 */ 2815 bufobj_wref(bo); 2816 bp->b_flags |= B_ASYNC; 2817 bundirty(bp); 2818 bp->b_flags &= ~B_DONE; 2819 bp->b_ioflags &= ~BIO_ERROR; 2820 bp->b_dirtyoff = bp->b_dirtyend = 0; 2821 bufdone(bp); 2822 } 2823 } 2824 } 2825 2826 /* 2827 * Start/do any write(s) that are required. 2828 */ 2829loop: 2830 BO_LOCK(bo); 2831 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2832 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { 2833 if (waitfor != MNT_WAIT || passone) 2834 continue; 2835 2836 error = BUF_TIMELOCK(bp, 2837 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2838 BO_LOCKPTR(bo), "nfsfsync", slpflag, slptimeo); 2839 if (error == 0) { 2840 BUF_UNLOCK(bp); 2841 goto loop; 2842 } 2843 if (error == ENOLCK) { 2844 error = 0; 2845 goto loop; 2846 } 2847 if (called_from_renewthread != 0) { 2848 /* 2849 * Return EIO so the flush will be retried 2850 * later. 2851 */ 2852 error = EIO; 2853 goto done; 2854 } 2855 if (newnfs_sigintr(nmp, td)) { 2856 error = EINTR; 2857 goto done; 2858 } 2859 if (slpflag == PCATCH) { 2860 slpflag = 0; 2861 slptimeo = 2 * hz; 2862 } 2863 goto loop; 2864 } 2865 if ((bp->b_flags & B_DELWRI) == 0) 2866 panic("nfs_fsync: not dirty"); 2867 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) { 2868 BUF_UNLOCK(bp); 2869 continue; 2870 } 2871 BO_UNLOCK(bo); 2872 bremfree(bp); 2873 if (passone || !commit) 2874 bp->b_flags |= B_ASYNC; 2875 else 2876 bp->b_flags |= B_ASYNC; 2877 bwrite(bp); 2878 if (newnfs_sigintr(nmp, td)) { 2879 error = EINTR; 2880 goto done; 2881 } 2882 goto loop; 2883 } 2884 if (passone) { 2885 passone = 0; 2886 BO_UNLOCK(bo); 2887 goto again; 2888 } 2889 if (waitfor == MNT_WAIT) { 2890 while (bo->bo_numoutput) { 2891 error = bufobj_wwait(bo, slpflag, slptimeo); 2892 if (error) { 2893 BO_UNLOCK(bo); 2894 if (called_from_renewthread != 0) { 2895 /* 2896 * Return EIO so that the flush will be 2897 * retried later. 2898 */ 2899 error = EIO; 2900 goto done; 2901 } 2902 error = newnfs_sigintr(nmp, td); 2903 if (error) 2904 goto done; 2905 if (slpflag == PCATCH) { 2906 slpflag = 0; 2907 slptimeo = 2 * hz; 2908 } 2909 BO_LOCK(bo); 2910 } 2911 } 2912 if (bo->bo_dirty.bv_cnt != 0 && commit) { 2913 BO_UNLOCK(bo); 2914 goto loop; 2915 } 2916 /* 2917 * Wait for all the async IO requests to drain 2918 */ 2919 BO_UNLOCK(bo); 2920 mtx_lock(&np->n_mtx); 2921 while (np->n_directio_asyncwr > 0) { 2922 np->n_flag |= NFSYNCWAIT; 2923 error = newnfs_msleep(td, &np->n_directio_asyncwr, 2924 &np->n_mtx, slpflag | (PRIBIO + 1), 2925 "nfsfsync", 0); 2926 if (error) { 2927 if (newnfs_sigintr(nmp, td)) { 2928 mtx_unlock(&np->n_mtx); 2929 error = EINTR; 2930 goto done; 2931 } 2932 } 2933 } 2934 mtx_unlock(&np->n_mtx); 2935 } else 2936 BO_UNLOCK(bo); 2937 if (NFSHASPNFS(nmp)) { 2938 nfscl_layoutcommit(vp, td); 2939 /* 2940 * Invalidate the attribute cache, since writes to a DS 2941 * won't update the size attribute. 2942 */ 2943 mtx_lock(&np->n_mtx); 2944 np->n_attrstamp = 0; 2945 } else 2946 mtx_lock(&np->n_mtx); 2947 if (np->n_flag & NWRITEERR) { 2948 error = np->n_error; 2949 np->n_flag &= ~NWRITEERR; 2950 } 2951 if (commit && bo->bo_dirty.bv_cnt == 0 && 2952 bo->bo_numoutput == 0 && np->n_directio_asyncwr == 0) 2953 np->n_flag &= ~NMODIFIED; 2954 mtx_unlock(&np->n_mtx); 2955done: 2956 if (bvec != NULL && bvec != bvec_on_stack) 2957 free(bvec, M_TEMP); 2958 if (error == 0 && commit != 0 && waitfor == MNT_WAIT && 2959 (bo->bo_dirty.bv_cnt != 0 || bo->bo_numoutput != 0 || 2960 np->n_directio_asyncwr != 0) && trycnt++ < 5) { 2961 /* try, try again... */ 2962 passone = 1; 2963 wcred = NULL; 2964 bvec = NULL; 2965 bvecsize = 0; 2966printf("try%d\n", trycnt); 2967 goto again; 2968 } 2969 return (error); 2970} 2971 2972/* 2973 * NFS advisory byte-level locks. 2974 */ 2975static int 2976nfs_advlock(struct vop_advlock_args *ap) 2977{ 2978 struct vnode *vp = ap->a_vp; 2979 struct ucred *cred; 2980 struct nfsnode *np = VTONFS(ap->a_vp); 2981 struct proc *p = (struct proc *)ap->a_id; 2982 struct thread *td = curthread; /* XXX */ 2983 struct vattr va; 2984 int ret, error = EOPNOTSUPP; 2985 u_quad_t size; 2986 2987 if (NFS_ISV4(vp) && (ap->a_flags & (F_POSIX | F_FLOCK)) != 0) { 2988 if (vp->v_type != VREG) 2989 return (EINVAL); 2990 if ((ap->a_flags & F_POSIX) != 0) 2991 cred = p->p_ucred; 2992 else 2993 cred = td->td_ucred; 2994 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); 2995 if (vp->v_iflag & VI_DOOMED) { 2996 NFSVOPUNLOCK(vp, 0); 2997 return (EBADF); 2998 } 2999 3000 /* 3001 * If this is unlocking a write locked region, flush and 3002 * commit them before unlocking. This is required by 3003 * RFC3530 Sec. 9.3.2. 3004 */ 3005 if (ap->a_op == F_UNLCK && 3006 nfscl_checkwritelocked(vp, ap->a_fl, cred, td, ap->a_id, 3007 ap->a_flags)) 3008 (void) ncl_flush(vp, MNT_WAIT, cred, td, 1, 0); 3009 3010 /* 3011 * Loop around doing the lock op, while a blocking lock 3012 * must wait for the lock op to succeed. 3013 */ 3014 do { 3015 ret = nfsrpc_advlock(vp, np->n_size, ap->a_op, 3016 ap->a_fl, 0, cred, td, ap->a_id, ap->a_flags); 3017 if (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) && 3018 ap->a_op == F_SETLK) { 3019 NFSVOPUNLOCK(vp, 0); 3020 error = nfs_catnap(PZERO | PCATCH, ret, 3021 "ncladvl"); 3022 if (error) 3023 return (EINTR); 3024 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); 3025 if (vp->v_iflag & VI_DOOMED) { 3026 NFSVOPUNLOCK(vp, 0); 3027 return (EBADF); 3028 } 3029 } 3030 } while (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) && 3031 ap->a_op == F_SETLK); 3032 if (ret == NFSERR_DENIED) { 3033 NFSVOPUNLOCK(vp, 0); 3034 return (EAGAIN); 3035 } else if (ret == EINVAL || ret == EBADF || ret == EINTR) { 3036 NFSVOPUNLOCK(vp, 0); 3037 return (ret); 3038 } else if (ret != 0) { 3039 NFSVOPUNLOCK(vp, 0); 3040 return (EACCES); 3041 } 3042 3043 /* 3044 * Now, if we just got a lock, invalidate data in the buffer 3045 * cache, as required, so that the coherency conforms with 3046 * RFC3530 Sec. 9.3.2. 3047 */ 3048 if (ap->a_op == F_SETLK) { 3049 if ((np->n_flag & NMODIFIED) == 0) { 3050 np->n_attrstamp = 0; 3051 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 3052 ret = VOP_GETATTR(vp, &va, cred); 3053 } 3054 if ((np->n_flag & NMODIFIED) || ret || 3055 np->n_change != va.va_filerev) { 3056 (void) ncl_vinvalbuf(vp, V_SAVE, td, 1); 3057 np->n_attrstamp = 0; 3058 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 3059 ret = VOP_GETATTR(vp, &va, cred); 3060 if (!ret) { 3061 np->n_mtime = va.va_mtime; 3062 np->n_change = va.va_filerev; 3063 } 3064 } 3065 /* Mark that a file lock has been acquired. */ 3066 mtx_lock(&np->n_mtx); 3067 np->n_flag |= NHASBEENLOCKED; 3068 mtx_unlock(&np->n_mtx); 3069 } 3070 NFSVOPUNLOCK(vp, 0); 3071 return (0); 3072 } else if (!NFS_ISV4(vp)) { 3073 error = NFSVOPLOCK(vp, LK_SHARED); 3074 if (error) 3075 return (error); 3076 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) { 3077 size = VTONFS(vp)->n_size; 3078 NFSVOPUNLOCK(vp, 0); 3079 error = lf_advlock(ap, &(vp->v_lockf), size); 3080 } else { 3081 if (nfs_advlock_p != NULL) 3082 error = nfs_advlock_p(ap); 3083 else { 3084 NFSVOPUNLOCK(vp, 0); 3085 error = ENOLCK; 3086 } 3087 } 3088 if (error == 0 && ap->a_op == F_SETLK) { 3089 /* Mark that a file lock has been acquired. */ 3090 mtx_lock(&np->n_mtx); 3091 np->n_flag |= NHASBEENLOCKED; 3092 mtx_unlock(&np->n_mtx); 3093 } 3094 } 3095 return (error); 3096} 3097 3098/* 3099 * NFS advisory byte-level locks. 3100 */ 3101static int 3102nfs_advlockasync(struct vop_advlockasync_args *ap) 3103{ 3104 struct vnode *vp = ap->a_vp; 3105 u_quad_t size; 3106 int error; 3107 3108 if (NFS_ISV4(vp)) 3109 return (EOPNOTSUPP); 3110 error = NFSVOPLOCK(vp, LK_SHARED); 3111 if (error) 3112 return (error); 3113 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) { 3114 size = VTONFS(vp)->n_size; 3115 NFSVOPUNLOCK(vp, 0); 3116 error = lf_advlockasync(ap, &(vp->v_lockf), size); 3117 } else { 3118 NFSVOPUNLOCK(vp, 0); 3119 error = EOPNOTSUPP; 3120 } 3121 return (error); 3122} 3123 3124/* 3125 * Print out the contents of an nfsnode. 3126 */ 3127static int 3128nfs_print(struct vop_print_args *ap) 3129{ 3130 struct vnode *vp = ap->a_vp; 3131 struct nfsnode *np = VTONFS(vp); 3132 3133 ncl_printf("\tfileid %ld fsid 0x%x", 3134 np->n_vattr.na_fileid, np->n_vattr.na_fsid); 3135 if (vp->v_type == VFIFO) 3136 fifo_printinfo(vp); 3137 printf("\n"); 3138 return (0); 3139} 3140 3141/* 3142 * This is the "real" nfs::bwrite(struct buf*). 3143 * We set B_CACHE if this is a VMIO buffer. 3144 */ 3145int 3146ncl_writebp(struct buf *bp, int force __unused, struct thread *td) 3147{ 3148 int s; 3149 int oldflags = bp->b_flags; 3150#if 0 3151 int retv = 1; 3152 off_t off; 3153#endif 3154 3155 BUF_ASSERT_HELD(bp); 3156 3157 if (bp->b_flags & B_INVAL) { 3158 brelse(bp); 3159 return(0); 3160 } 3161 3162 bp->b_flags |= B_CACHE; 3163 3164 /* 3165 * Undirty the bp. We will redirty it later if the I/O fails. 3166 */ 3167 3168 s = splbio(); 3169 bundirty(bp); 3170 bp->b_flags &= ~B_DONE; 3171 bp->b_ioflags &= ~BIO_ERROR; 3172 bp->b_iocmd = BIO_WRITE; 3173 3174 bufobj_wref(bp->b_bufobj); 3175 curthread->td_ru.ru_oublock++; 3176 splx(s); 3177 3178 /* 3179 * Note: to avoid loopback deadlocks, we do not 3180 * assign b_runningbufspace. 3181 */ 3182 vfs_busy_pages(bp, 1); 3183 3184 BUF_KERNPROC(bp); 3185 bp->b_iooffset = dbtob(bp->b_blkno); 3186 bstrategy(bp); 3187 3188 if( (oldflags & B_ASYNC) == 0) { 3189 int rtval = bufwait(bp); 3190 3191 if (oldflags & B_DELWRI) { 3192 s = splbio(); 3193 reassignbuf(bp); 3194 splx(s); 3195 } 3196 brelse(bp); 3197 return (rtval); 3198 } 3199 3200 return (0); 3201} 3202 3203/* 3204 * nfs special file access vnode op. 3205 * Essentially just get vattr and then imitate iaccess() since the device is 3206 * local to the client. 3207 */ 3208static int 3209nfsspec_access(struct vop_access_args *ap) 3210{ 3211 struct vattr *vap; 3212 struct ucred *cred = ap->a_cred; 3213 struct vnode *vp = ap->a_vp; 3214 accmode_t accmode = ap->a_accmode; 3215 struct vattr vattr; 3216 int error; 3217 3218 /* 3219 * Disallow write attempts on filesystems mounted read-only; 3220 * unless the file is a socket, fifo, or a block or character 3221 * device resident on the filesystem. 3222 */ 3223 if ((accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3224 switch (vp->v_type) { 3225 case VREG: 3226 case VDIR: 3227 case VLNK: 3228 return (EROFS); 3229 default: 3230 break; 3231 } 3232 } 3233 vap = &vattr; 3234 error = VOP_GETATTR(vp, vap, cred); 3235 if (error) 3236 goto out; 3237 error = vaccess(vp->v_type, vap->va_mode, vap->va_uid, vap->va_gid, 3238 accmode, cred, NULL); 3239out: 3240 return error; 3241} 3242 3243/* 3244 * Read wrapper for fifos. 3245 */ 3246static int 3247nfsfifo_read(struct vop_read_args *ap) 3248{ 3249 struct nfsnode *np = VTONFS(ap->a_vp); 3250 int error; 3251 3252 /* 3253 * Set access flag. 3254 */ 3255 mtx_lock(&np->n_mtx); 3256 np->n_flag |= NACC; 3257 vfs_timestamp(&np->n_atim); 3258 mtx_unlock(&np->n_mtx); 3259 error = fifo_specops.vop_read(ap); 3260 return error; 3261} 3262 3263/* 3264 * Write wrapper for fifos. 3265 */ 3266static int 3267nfsfifo_write(struct vop_write_args *ap) 3268{ 3269 struct nfsnode *np = VTONFS(ap->a_vp); 3270 3271 /* 3272 * Set update flag. 3273 */ 3274 mtx_lock(&np->n_mtx); 3275 np->n_flag |= NUPD; 3276 vfs_timestamp(&np->n_mtim); 3277 mtx_unlock(&np->n_mtx); 3278 return(fifo_specops.vop_write(ap)); 3279} 3280 3281/* 3282 * Close wrapper for fifos. 3283 * 3284 * Update the times on the nfsnode then do fifo close. 3285 */ 3286static int 3287nfsfifo_close(struct vop_close_args *ap) 3288{ 3289 struct vnode *vp = ap->a_vp; 3290 struct nfsnode *np = VTONFS(vp); 3291 struct vattr vattr; 3292 struct timespec ts; 3293 3294 mtx_lock(&np->n_mtx); 3295 if (np->n_flag & (NACC | NUPD)) { 3296 vfs_timestamp(&ts); 3297 if (np->n_flag & NACC) 3298 np->n_atim = ts; 3299 if (np->n_flag & NUPD) 3300 np->n_mtim = ts; 3301 np->n_flag |= NCHG; 3302 if (vrefcnt(vp) == 1 && 3303 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3304 VATTR_NULL(&vattr); 3305 if (np->n_flag & NACC) 3306 vattr.va_atime = np->n_atim; 3307 if (np->n_flag & NUPD) 3308 vattr.va_mtime = np->n_mtim; 3309 mtx_unlock(&np->n_mtx); 3310 (void)VOP_SETATTR(vp, &vattr, ap->a_cred); 3311 goto out; 3312 } 3313 } 3314 mtx_unlock(&np->n_mtx); 3315out: 3316 return (fifo_specops.vop_close(ap)); 3317} 3318 3319/* 3320 * Just call ncl_writebp() with the force argument set to 1. 3321 * 3322 * NOTE: B_DONE may or may not be set in a_bp on call. 3323 */ 3324static int 3325nfs_bwrite(struct buf *bp) 3326{ 3327 3328 return (ncl_writebp(bp, 1, curthread)); 3329} 3330 3331struct buf_ops buf_ops_newnfs = { 3332 .bop_name = "buf_ops_nfs", 3333 .bop_write = nfs_bwrite, 3334 .bop_strategy = bufstrategy, 3335 .bop_sync = bufsync, 3336 .bop_bdflush = bufbdflush, 3337}; 3338 3339/* 3340 * Cloned from vop_stdlock(), and then the ugly hack added. 3341 */ 3342static int 3343nfs_lock1(struct vop_lock1_args *ap) 3344{ 3345 struct vnode *vp = ap->a_vp; 3346 int error = 0; 3347 3348 /* 3349 * Since vfs_hash_get() calls vget() and it will no longer work 3350 * for FreeBSD8 with flags == 0, I can only think of this horrible 3351 * hack to work around it. I call vfs_hash_get() with LK_EXCLOTHER 3352 * and then handle it here. All I want for this case is a v_usecount 3353 * on the vnode to use for recovery, while another thread might 3354 * hold a lock on the vnode. I have the other threads blocked, so 3355 * there isn't any race problem. 3356 */ 3357 if ((ap->a_flags & LK_TYPE_MASK) == LK_EXCLOTHER) { 3358 if ((ap->a_flags & LK_INTERLOCK) == 0) 3359 panic("ncllock1"); 3360 if ((vp->v_iflag & VI_DOOMED)) 3361 error = ENOENT; 3362 VI_UNLOCK(vp); 3363 return (error); 3364 } 3365 return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp), 3366 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file, 3367 ap->a_line)); 3368} 3369 3370static int 3371nfs_getacl(struct vop_getacl_args *ap) 3372{ 3373 int error; 3374 3375 if (ap->a_type != ACL_TYPE_NFS4) 3376 return (EOPNOTSUPP); 3377 error = nfsrpc_getacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp, 3378 NULL); 3379 if (error > NFSERR_STALE) { 3380 (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0); 3381 error = EPERM; 3382 } 3383 return (error); 3384} 3385 3386static int 3387nfs_setacl(struct vop_setacl_args *ap) 3388{ 3389 int error; 3390 3391 if (ap->a_type != ACL_TYPE_NFS4) 3392 return (EOPNOTSUPP); 3393 error = nfsrpc_setacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp, 3394 NULL); 3395 if (error > NFSERR_STALE) { 3396 (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0); 3397 error = EPERM; 3398 } 3399 return (error); 3400} 3401 3402/* 3403 * Return POSIX pathconf information applicable to nfs filesystems. 3404 */ 3405static int 3406nfs_pathconf(struct vop_pathconf_args *ap) 3407{ 3408 struct nfsv3_pathconf pc; 3409 struct nfsvattr nfsva; 3410 struct vnode *vp = ap->a_vp; 3411 struct thread *td = curthread; 3412 int attrflag, error; 3413 3414 if ((NFS_ISV34(vp) && (ap->a_name == _PC_LINK_MAX || 3415 ap->a_name == _PC_NAME_MAX || ap->a_name == _PC_CHOWN_RESTRICTED || 3416 ap->a_name == _PC_NO_TRUNC)) || 3417 (NFS_ISV4(vp) && ap->a_name == _PC_ACL_NFS4)) { 3418 /* 3419 * Since only the above 4 a_names are returned by the NFSv3 3420 * Pathconf RPC, there is no point in doing it for others. 3421 * For NFSv4, the Pathconf RPC (actually a Getattr Op.) can 3422 * be used for _PC_NFS4_ACL as well. 3423 */ 3424 error = nfsrpc_pathconf(vp, &pc, td->td_ucred, td, &nfsva, 3425 &attrflag, NULL); 3426 if (attrflag != 0) 3427 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 3428 1); 3429 if (error != 0) 3430 return (error); 3431 } else { 3432 /* 3433 * For NFSv2 (or NFSv3 when not one of the above 4 a_names), 3434 * just fake them. 3435 */ 3436 pc.pc_linkmax = LINK_MAX; 3437 pc.pc_namemax = NFS_MAXNAMLEN; 3438 pc.pc_notrunc = 1; 3439 pc.pc_chownrestricted = 1; 3440 pc.pc_caseinsensitive = 0; 3441 pc.pc_casepreserving = 1; 3442 error = 0; 3443 } 3444 switch (ap->a_name) { 3445 case _PC_LINK_MAX: 3446 *ap->a_retval = pc.pc_linkmax; 3447 break; 3448 case _PC_NAME_MAX: 3449 *ap->a_retval = pc.pc_namemax; 3450 break; 3451 case _PC_PATH_MAX: 3452 *ap->a_retval = PATH_MAX; 3453 break; 3454 case _PC_PIPE_BUF: 3455 *ap->a_retval = PIPE_BUF; 3456 break; 3457 case _PC_CHOWN_RESTRICTED: 3458 *ap->a_retval = pc.pc_chownrestricted; 3459 break; 3460 case _PC_NO_TRUNC: 3461 *ap->a_retval = pc.pc_notrunc; 3462 break; 3463 case _PC_ACL_EXTENDED: 3464 *ap->a_retval = 0; 3465 break; 3466 case _PC_ACL_NFS4: 3467 if (NFS_ISV4(vp) && nfsrv_useacl != 0 && attrflag != 0 && 3468 NFSISSET_ATTRBIT(&nfsva.na_suppattr, NFSATTRBIT_ACL)) 3469 *ap->a_retval = 1; 3470 else 3471 *ap->a_retval = 0; 3472 break; 3473 case _PC_ACL_PATH_MAX: 3474 if (NFS_ISV4(vp)) 3475 *ap->a_retval = ACL_MAX_ENTRIES; 3476 else 3477 *ap->a_retval = 3; 3478 break; 3479 case _PC_MAC_PRESENT: 3480 *ap->a_retval = 0; 3481 break; 3482 case _PC_ASYNC_IO: 3483 /* _PC_ASYNC_IO should have been handled by upper layers. */ 3484 KASSERT(0, ("_PC_ASYNC_IO should not get here")); 3485 error = EINVAL; 3486 break; 3487 case _PC_PRIO_IO: 3488 *ap->a_retval = 0; 3489 break; 3490 case _PC_SYNC_IO: 3491 *ap->a_retval = 0; 3492 break; 3493 case _PC_ALLOC_SIZE_MIN: 3494 *ap->a_retval = vp->v_mount->mnt_stat.f_bsize; 3495 break; 3496 case _PC_FILESIZEBITS: 3497 if (NFS_ISV34(vp)) 3498 *ap->a_retval = 64; 3499 else 3500 *ap->a_retval = 32; 3501 break; 3502 case _PC_REC_INCR_XFER_SIZE: 3503 *ap->a_retval = vp->v_mount->mnt_stat.f_iosize; 3504 break; 3505 case _PC_REC_MAX_XFER_SIZE: 3506 *ap->a_retval = -1; /* means ``unlimited'' */ 3507 break; 3508 case _PC_REC_MIN_XFER_SIZE: 3509 *ap->a_retval = vp->v_mount->mnt_stat.f_iosize; 3510 break; 3511 case _PC_REC_XFER_ALIGN: 3512 *ap->a_retval = PAGE_SIZE; 3513 break; 3514 case _PC_SYMLINK_MAX: 3515 *ap->a_retval = NFS_MAXPATHLEN; 3516 break; 3517 3518 default: 3519 error = EINVAL; 3520 break; 3521 } 3522 return (error); 3523} 3524 3525