1/*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org> 11 * Copyright (c) 2013, 2014 The FreeBSD Foundation 12 * 13 * Portions of this software were developed by Konstantin Belousov 14 * under sponsorship from the FreeBSD Foundation. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 41 */ 42 43#include <sys/cdefs.h> 44__FBSDID("$FreeBSD$"); 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/disk.h> 49#include <sys/fcntl.h> 50#include <sys/file.h> 51#include <sys/kdb.h> 52#include <sys/stat.h> 53#include <sys/priv.h> 54#include <sys/proc.h> 55#include <sys/limits.h> 56#include <sys/lock.h> 57#include <sys/mount.h> 58#include <sys/mutex.h> 59#include <sys/namei.h> 60#include <sys/vnode.h> 61#include <sys/bio.h> 62#include <sys/buf.h> 63#include <sys/filio.h> 64#include <sys/resourcevar.h> 65#include <sys/rwlock.h> 66#include <sys/sx.h> 67#include <sys/sysctl.h> 68#include <sys/ttycom.h> 69#include <sys/conf.h> 70#include <sys/syslog.h> 71#include <sys/unistd.h> 72 73#include <security/audit/audit.h> 74#include <security/mac/mac_framework.h> 75 76#include <vm/vm.h> 77#include <vm/vm_extern.h> 78#include <vm/pmap.h> 79#include <vm/vm_map.h> 80#include <vm/vm_object.h> 81#include <vm/vm_page.h> 82 83static fo_rdwr_t vn_read; 84static fo_rdwr_t vn_write; 85static fo_rdwr_t vn_io_fault; 86static fo_truncate_t vn_truncate; 87static fo_ioctl_t vn_ioctl; 88static fo_poll_t vn_poll; 89static fo_kqfilter_t vn_kqfilter; 90static fo_stat_t vn_statfile; 91static fo_close_t vn_closefile; 92 93struct fileops vnops = { 94 .fo_read = vn_io_fault, 95 .fo_write = vn_io_fault, 96 .fo_truncate = vn_truncate, 97 .fo_ioctl = vn_ioctl, 98 .fo_poll = vn_poll, 99 .fo_kqfilter = vn_kqfilter, 100 .fo_stat = vn_statfile, 101 .fo_close = vn_closefile, 102 .fo_chmod = vn_chmod, 103 .fo_chown = vn_chown, 104 .fo_sendfile = vn_sendfile, 105 .fo_seek = vn_seek, 106 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 107}; 108 109static const int io_hold_cnt = 16; 110static int vn_io_fault_enable = 1; 111SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_enable, CTLFLAG_RW, 112 &vn_io_fault_enable, 0, "Enable vn_io_fault lock avoidance"); 113static u_long vn_io_faults_cnt; 114SYSCTL_ULONG(_debug, OID_AUTO, vn_io_faults, CTLFLAG_RD, 115 &vn_io_faults_cnt, 0, "Count of vn_io_fault lock avoidance triggers"); 116 117/* 118 * Returns true if vn_io_fault mode of handling the i/o request should 119 * be used. 120 */ 121static bool 122do_vn_io_fault(struct vnode *vp, struct uio *uio) 123{ 124 struct mount *mp; 125 126 return (uio->uio_segflg == UIO_USERSPACE && vp->v_type == VREG && 127 (mp = vp->v_mount) != NULL && 128 (mp->mnt_kern_flag & MNTK_NO_IOPF) != 0 && vn_io_fault_enable); 129} 130 131/* 132 * Structure used to pass arguments to vn_io_fault1(), to do either 133 * file- or vnode-based I/O calls. 134 */ 135struct vn_io_fault_args { 136 enum { 137 VN_IO_FAULT_FOP, 138 VN_IO_FAULT_VOP 139 } kind; 140 struct ucred *cred; 141 int flags; 142 union { 143 struct fop_args_tag { 144 struct file *fp; 145 fo_rdwr_t *doio; 146 } fop_args; 147 struct vop_args_tag { 148 struct vnode *vp; 149 } vop_args; 150 } args; 151}; 152 153static int vn_io_fault1(struct vnode *vp, struct uio *uio, 154 struct vn_io_fault_args *args, struct thread *td); 155 156int 157vn_open(ndp, flagp, cmode, fp) 158 struct nameidata *ndp; 159 int *flagp, cmode; 160 struct file *fp; 161{ 162 struct thread *td = ndp->ni_cnd.cn_thread; 163 164 return (vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp)); 165} 166 167/* 168 * Common code for vnode open operations via a name lookup. 169 * Lookup the vnode and invoke VOP_CREATE if needed. 170 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 171 * 172 * Note that this does NOT free nameidata for the successful case, 173 * due to the NDINIT being done elsewhere. 174 */ 175int 176vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags, 177 struct ucred *cred, struct file *fp) 178{ 179 struct vnode *vp; 180 struct mount *mp; 181 struct thread *td = ndp->ni_cnd.cn_thread; 182 struct vattr vat; 183 struct vattr *vap = &vat; 184 int fmode, error; 185 186restart: 187 fmode = *flagp; 188 if (fmode & O_CREAT) { 189 ndp->ni_cnd.cn_nameiop = CREATE; 190 ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF; 191 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 192 ndp->ni_cnd.cn_flags |= FOLLOW; 193 if (!(vn_open_flags & VN_OPEN_NOAUDIT)) 194 ndp->ni_cnd.cn_flags |= AUDITVNODE1; 195 if (vn_open_flags & VN_OPEN_NOCAPCHECK) 196 ndp->ni_cnd.cn_flags |= NOCAPCHECK; 197 bwillwrite(); 198 if ((error = namei(ndp)) != 0) 199 return (error); 200 if (ndp->ni_vp == NULL) { 201 VATTR_NULL(vap); 202 vap->va_type = VREG; 203 vap->va_mode = cmode; 204 if (fmode & O_EXCL) 205 vap->va_vaflags |= VA_EXCLUSIVE; 206 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 207 NDFREE(ndp, NDF_ONLY_PNBUF); 208 vput(ndp->ni_dvp); 209 if ((error = vn_start_write(NULL, &mp, 210 V_XSLEEP | PCATCH)) != 0) 211 return (error); 212 goto restart; 213 } 214#ifdef MAC 215 error = mac_vnode_check_create(cred, ndp->ni_dvp, 216 &ndp->ni_cnd, vap); 217 if (error == 0) 218#endif 219 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 220 &ndp->ni_cnd, vap); 221 vput(ndp->ni_dvp); 222 vn_finished_write(mp); 223 if (error) { 224 NDFREE(ndp, NDF_ONLY_PNBUF); 225 return (error); 226 } 227 fmode &= ~O_TRUNC; 228 vp = ndp->ni_vp; 229 } else { 230 if (ndp->ni_dvp == ndp->ni_vp) 231 vrele(ndp->ni_dvp); 232 else 233 vput(ndp->ni_dvp); 234 ndp->ni_dvp = NULL; 235 vp = ndp->ni_vp; 236 if (fmode & O_EXCL) { 237 error = EEXIST; 238 goto bad; 239 } 240 fmode &= ~O_CREAT; 241 } 242 } else { 243 ndp->ni_cnd.cn_nameiop = LOOKUP; 244 ndp->ni_cnd.cn_flags = ISOPEN | 245 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; 246 if (!(fmode & FWRITE)) 247 ndp->ni_cnd.cn_flags |= LOCKSHARED; 248 if (!(vn_open_flags & VN_OPEN_NOAUDIT)) 249 ndp->ni_cnd.cn_flags |= AUDITVNODE1; 250 if (vn_open_flags & VN_OPEN_NOCAPCHECK) 251 ndp->ni_cnd.cn_flags |= NOCAPCHECK; 252 if ((error = namei(ndp)) != 0) 253 return (error); 254 vp = ndp->ni_vp; 255 } 256 error = vn_open_vnode(vp, fmode, cred, td, fp); 257 if (error) 258 goto bad; 259 *flagp = fmode; 260 return (0); 261bad: 262 NDFREE(ndp, NDF_ONLY_PNBUF); 263 vput(vp); 264 *flagp = fmode; 265 ndp->ni_vp = NULL; 266 return (error); 267} 268 269/* 270 * Common code for vnode open operations once a vnode is located. 271 * Check permissions, and call the VOP_OPEN routine. 272 */ 273int 274vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred, 275 struct thread *td, struct file *fp) 276{ 277 struct mount *mp; 278 accmode_t accmode; 279 struct flock lf; 280 int error, have_flock, lock_flags, type; 281 282 if (vp->v_type == VLNK) 283 return (EMLINK); 284 if (vp->v_type == VSOCK) 285 return (EOPNOTSUPP); 286 if (vp->v_type != VDIR && fmode & O_DIRECTORY) 287 return (ENOTDIR); 288 accmode = 0; 289 if (fmode & (FWRITE | O_TRUNC)) { 290 if (vp->v_type == VDIR) 291 return (EISDIR); 292 accmode |= VWRITE; 293 } 294 if (fmode & FREAD) 295 accmode |= VREAD; 296 if (fmode & FEXEC) 297 accmode |= VEXEC; 298 if ((fmode & O_APPEND) && (fmode & FWRITE)) 299 accmode |= VAPPEND; 300#ifdef MAC 301 error = mac_vnode_check_open(cred, vp, accmode); 302 if (error) 303 return (error); 304#endif 305 if ((fmode & O_CREAT) == 0) { 306 if (accmode & VWRITE) { 307 error = vn_writechk(vp); 308 if (error) 309 return (error); 310 } 311 if (accmode) { 312 error = VOP_ACCESS(vp, accmode, cred, td); 313 if (error) 314 return (error); 315 } 316 } 317 if (vp->v_type == VFIFO && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 318 vn_lock(vp, LK_UPGRADE | LK_RETRY); 319 if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0) 320 return (error); 321 322 if (fmode & (O_EXLOCK | O_SHLOCK)) { 323 KASSERT(fp != NULL, ("open with flock requires fp")); 324 lock_flags = VOP_ISLOCKED(vp); 325 VOP_UNLOCK(vp, 0); 326 lf.l_whence = SEEK_SET; 327 lf.l_start = 0; 328 lf.l_len = 0; 329 if (fmode & O_EXLOCK) 330 lf.l_type = F_WRLCK; 331 else 332 lf.l_type = F_RDLCK; 333 type = F_FLOCK; 334 if ((fmode & FNONBLOCK) == 0) 335 type |= F_WAIT; 336 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type); 337 have_flock = (error == 0); 338 vn_lock(vp, lock_flags | LK_RETRY); 339 if (error == 0 && vp->v_iflag & VI_DOOMED) 340 error = ENOENT; 341 /* 342 * Another thread might have used this vnode as an 343 * executable while the vnode lock was dropped. 344 * Ensure the vnode is still able to be opened for 345 * writing after the lock has been obtained. 346 */ 347 if (error == 0 && accmode & VWRITE) 348 error = vn_writechk(vp); 349 if (error) { 350 VOP_UNLOCK(vp, 0); 351 if (have_flock) { 352 lf.l_whence = SEEK_SET; 353 lf.l_start = 0; 354 lf.l_len = 0; 355 lf.l_type = F_UNLCK; 356 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, 357 F_FLOCK); 358 } 359 vn_start_write(vp, &mp, V_WAIT); 360 vn_lock(vp, lock_flags | LK_RETRY); 361 (void)VOP_CLOSE(vp, fmode, cred, td); 362 vn_finished_write(mp); 363 /* Prevent second close from fdrop()->vn_close(). */ 364 if (fp != NULL) 365 fp->f_ops= &badfileops; 366 return (error); 367 } 368 fp->f_flag |= FHASLOCK; 369 } 370 if (fmode & FWRITE) { 371 VOP_ADD_WRITECOUNT(vp, 1); 372 CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", 373 __func__, vp, vp->v_writecount); 374 } 375 ASSERT_VOP_LOCKED(vp, "vn_open_vnode"); 376 return (0); 377} 378 379/* 380 * Check for write permissions on the specified vnode. 381 * Prototype text segments cannot be written. 382 */ 383int 384vn_writechk(vp) 385 register struct vnode *vp; 386{ 387 388 ASSERT_VOP_LOCKED(vp, "vn_writechk"); 389 /* 390 * If there's shared text associated with 391 * the vnode, try to free it up once. If 392 * we fail, we can't allow writing. 393 */ 394 if (VOP_IS_TEXT(vp)) 395 return (ETXTBSY); 396 397 return (0); 398} 399 400/* 401 * Vnode close call 402 */ 403int 404vn_close(vp, flags, file_cred, td) 405 register struct vnode *vp; 406 int flags; 407 struct ucred *file_cred; 408 struct thread *td; 409{ 410 struct mount *mp; 411 int error, lock_flags; 412 413 if (vp->v_type != VFIFO && (flags & FWRITE) == 0 && 414 MNT_EXTENDED_SHARED(vp->v_mount)) 415 lock_flags = LK_SHARED; 416 else 417 lock_flags = LK_EXCLUSIVE; 418 419 vn_start_write(vp, &mp, V_WAIT); 420 vn_lock(vp, lock_flags | LK_RETRY); 421 if (flags & FWRITE) { 422 VNASSERT(vp->v_writecount > 0, vp, 423 ("vn_close: negative writecount")); 424 VOP_ADD_WRITECOUNT(vp, -1); 425 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 426 __func__, vp, vp->v_writecount); 427 } 428 error = VOP_CLOSE(vp, flags, file_cred, td); 429 vput(vp); 430 vn_finished_write(mp); 431 return (error); 432} 433 434/* 435 * Heuristic to detect sequential operation. 436 */ 437static int 438sequential_heuristic(struct uio *uio, struct file *fp) 439{ 440 441 ASSERT_VOP_LOCKED(fp->f_vnode, __func__); 442 if (fp->f_flag & FRDAHEAD) 443 return (fp->f_seqcount << IO_SEQSHIFT); 444 445 /* 446 * Offset 0 is handled specially. open() sets f_seqcount to 1 so 447 * that the first I/O is normally considered to be slightly 448 * sequential. Seeking to offset 0 doesn't change sequentiality 449 * unless previous seeks have reduced f_seqcount to 0, in which 450 * case offset 0 is not special. 451 */ 452 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 453 uio->uio_offset == fp->f_nextoff) { 454 /* 455 * f_seqcount is in units of fixed-size blocks so that it 456 * depends mainly on the amount of sequential I/O and not 457 * much on the number of sequential I/O's. The fixed size 458 * of 16384 is hard-coded here since it is (not quite) just 459 * a magic size that works well here. This size is more 460 * closely related to the best I/O size for real disks than 461 * to any block size used by software. 462 */ 463 fp->f_seqcount += howmany(uio->uio_resid, 16384); 464 if (fp->f_seqcount > IO_SEQMAX) 465 fp->f_seqcount = IO_SEQMAX; 466 return (fp->f_seqcount << IO_SEQSHIFT); 467 } 468 469 /* Not sequential. Quickly draw-down sequentiality. */ 470 if (fp->f_seqcount > 1) 471 fp->f_seqcount = 1; 472 else 473 fp->f_seqcount = 0; 474 return (0); 475} 476 477/* 478 * Package up an I/O request on a vnode into a uio and do it. 479 */ 480int 481vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset, 482 enum uio_seg segflg, int ioflg, struct ucred *active_cred, 483 struct ucred *file_cred, ssize_t *aresid, struct thread *td) 484{ 485 struct uio auio; 486 struct iovec aiov; 487 struct mount *mp; 488 struct ucred *cred; 489 void *rl_cookie; 490 struct vn_io_fault_args args; 491 int error, lock_flags; 492 493 auio.uio_iov = &aiov; 494 auio.uio_iovcnt = 1; 495 aiov.iov_base = base; 496 aiov.iov_len = len; 497 auio.uio_resid = len; 498 auio.uio_offset = offset; 499 auio.uio_segflg = segflg; 500 auio.uio_rw = rw; 501 auio.uio_td = td; 502 error = 0; 503 504 if ((ioflg & IO_NODELOCKED) == 0) { 505 if (rw == UIO_READ) { 506 rl_cookie = vn_rangelock_rlock(vp, offset, 507 offset + len); 508 } else { 509 rl_cookie = vn_rangelock_wlock(vp, offset, 510 offset + len); 511 } 512 mp = NULL; 513 if (rw == UIO_WRITE) { 514 if (vp->v_type != VCHR && 515 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) 516 != 0) 517 goto out; 518 if (MNT_SHARED_WRITES(mp) || 519 ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount))) 520 lock_flags = LK_SHARED; 521 else 522 lock_flags = LK_EXCLUSIVE; 523 } else 524 lock_flags = LK_SHARED; 525 vn_lock(vp, lock_flags | LK_RETRY); 526 } else 527 rl_cookie = NULL; 528 529 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 530#ifdef MAC 531 if ((ioflg & IO_NOMACCHECK) == 0) { 532 if (rw == UIO_READ) 533 error = mac_vnode_check_read(active_cred, file_cred, 534 vp); 535 else 536 error = mac_vnode_check_write(active_cred, file_cred, 537 vp); 538 } 539#endif 540 if (error == 0) { 541 if (file_cred != NULL) 542 cred = file_cred; 543 else 544 cred = active_cred; 545 if (do_vn_io_fault(vp, &auio)) { 546 args.kind = VN_IO_FAULT_VOP; 547 args.cred = cred; 548 args.flags = ioflg; 549 args.args.vop_args.vp = vp; 550 error = vn_io_fault1(vp, &auio, &args, td); 551 } else if (rw == UIO_READ) { 552 error = VOP_READ(vp, &auio, ioflg, cred); 553 } else /* if (rw == UIO_WRITE) */ { 554 error = VOP_WRITE(vp, &auio, ioflg, cred); 555 } 556 } 557 if (aresid) 558 *aresid = auio.uio_resid; 559 else 560 if (auio.uio_resid && error == 0) 561 error = EIO; 562 if ((ioflg & IO_NODELOCKED) == 0) { 563 VOP_UNLOCK(vp, 0); 564 if (mp != NULL) 565 vn_finished_write(mp); 566 } 567 out: 568 if (rl_cookie != NULL) 569 vn_rangelock_unlock(vp, rl_cookie); 570 return (error); 571} 572 573/* 574 * Package up an I/O request on a vnode into a uio and do it. The I/O 575 * request is split up into smaller chunks and we try to avoid saturating 576 * the buffer cache while potentially holding a vnode locked, so we 577 * check bwillwrite() before calling vn_rdwr(). We also call kern_yield() 578 * to give other processes a chance to lock the vnode (either other processes 579 * core'ing the same binary, or unrelated processes scanning the directory). 580 */ 581int 582vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred, 583 file_cred, aresid, td) 584 enum uio_rw rw; 585 struct vnode *vp; 586 void *base; 587 size_t len; 588 off_t offset; 589 enum uio_seg segflg; 590 int ioflg; 591 struct ucred *active_cred; 592 struct ucred *file_cred; 593 size_t *aresid; 594 struct thread *td; 595{ 596 int error = 0; 597 ssize_t iaresid; 598 599 do { 600 int chunk; 601 602 /* 603 * Force `offset' to a multiple of MAXBSIZE except possibly 604 * for the first chunk, so that filesystems only need to 605 * write full blocks except possibly for the first and last 606 * chunks. 607 */ 608 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; 609 610 if (chunk > len) 611 chunk = len; 612 if (rw != UIO_READ && vp->v_type == VREG) 613 bwillwrite(); 614 iaresid = 0; 615 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 616 ioflg, active_cred, file_cred, &iaresid, td); 617 len -= chunk; /* aresid calc already includes length */ 618 if (error) 619 break; 620 offset += chunk; 621 base = (char *)base + chunk; 622 kern_yield(PRI_USER); 623 } while (len); 624 if (aresid) 625 *aresid = len + iaresid; 626 return (error); 627} 628 629off_t 630foffset_lock(struct file *fp, int flags) 631{ 632 struct mtx *mtxp; 633 off_t res; 634 635 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 636 637#if OFF_MAX <= LONG_MAX 638 /* 639 * Caller only wants the current f_offset value. Assume that 640 * the long and shorter integer types reads are atomic. 641 */ 642 if ((flags & FOF_NOLOCK) != 0) 643 return (fp->f_offset); 644#endif 645 646 /* 647 * According to McKusick the vn lock was protecting f_offset here. 648 * It is now protected by the FOFFSET_LOCKED flag. 649 */ 650 mtxp = mtx_pool_find(mtxpool_sleep, fp); 651 mtx_lock(mtxp); 652 if ((flags & FOF_NOLOCK) == 0) { 653 while (fp->f_vnread_flags & FOFFSET_LOCKED) { 654 fp->f_vnread_flags |= FOFFSET_LOCK_WAITING; 655 msleep(&fp->f_vnread_flags, mtxp, PUSER -1, 656 "vofflock", 0); 657 } 658 fp->f_vnread_flags |= FOFFSET_LOCKED; 659 } 660 res = fp->f_offset; 661 mtx_unlock(mtxp); 662 return (res); 663} 664 665void 666foffset_unlock(struct file *fp, off_t val, int flags) 667{ 668 struct mtx *mtxp; 669 670 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 671 672#if OFF_MAX <= LONG_MAX 673 if ((flags & FOF_NOLOCK) != 0) { 674 if ((flags & FOF_NOUPDATE) == 0) 675 fp->f_offset = val; 676 if ((flags & FOF_NEXTOFF) != 0) 677 fp->f_nextoff = val; 678 return; 679 } 680#endif 681 682 mtxp = mtx_pool_find(mtxpool_sleep, fp); 683 mtx_lock(mtxp); 684 if ((flags & FOF_NOUPDATE) == 0) 685 fp->f_offset = val; 686 if ((flags & FOF_NEXTOFF) != 0) 687 fp->f_nextoff = val; 688 if ((flags & FOF_NOLOCK) == 0) { 689 KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0, 690 ("Lost FOFFSET_LOCKED")); 691 if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING) 692 wakeup(&fp->f_vnread_flags); 693 fp->f_vnread_flags = 0; 694 } 695 mtx_unlock(mtxp); 696} 697 698void 699foffset_lock_uio(struct file *fp, struct uio *uio, int flags) 700{ 701 702 if ((flags & FOF_OFFSET) == 0) 703 uio->uio_offset = foffset_lock(fp, flags); 704} 705 706void 707foffset_unlock_uio(struct file *fp, struct uio *uio, int flags) 708{ 709 710 if ((flags & FOF_OFFSET) == 0) 711 foffset_unlock(fp, uio->uio_offset, flags); 712} 713 714static int 715get_advice(struct file *fp, struct uio *uio) 716{ 717 struct mtx *mtxp; 718 int ret; 719 720 ret = POSIX_FADV_NORMAL; 721 if (fp->f_advice == NULL) 722 return (ret); 723 724 mtxp = mtx_pool_find(mtxpool_sleep, fp); 725 mtx_lock(mtxp); 726 if (uio->uio_offset >= fp->f_advice->fa_start && 727 uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end) 728 ret = fp->f_advice->fa_advice; 729 mtx_unlock(mtxp); 730 return (ret); 731} 732 733/* 734 * File table vnode read routine. 735 */ 736static int 737vn_read(fp, uio, active_cred, flags, td) 738 struct file *fp; 739 struct uio *uio; 740 struct ucred *active_cred; 741 int flags; 742 struct thread *td; 743{ 744 struct vnode *vp; 745 struct mtx *mtxp; 746 int error, ioflag; 747 int advice; 748 off_t offset, start, end; 749 750 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 751 uio->uio_td, td)); 752 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); 753 vp = fp->f_vnode; 754 ioflag = 0; 755 if (fp->f_flag & FNONBLOCK) 756 ioflag |= IO_NDELAY; 757 if (fp->f_flag & O_DIRECT) 758 ioflag |= IO_DIRECT; 759 advice = get_advice(fp, uio); 760 vn_lock(vp, LK_SHARED | LK_RETRY); 761 762 switch (advice) { 763 case POSIX_FADV_NORMAL: 764 case POSIX_FADV_SEQUENTIAL: 765 case POSIX_FADV_NOREUSE: 766 ioflag |= sequential_heuristic(uio, fp); 767 break; 768 case POSIX_FADV_RANDOM: 769 /* Disable read-ahead for random I/O. */ 770 break; 771 } 772 offset = uio->uio_offset; 773 774#ifdef MAC 775 error = mac_vnode_check_read(active_cred, fp->f_cred, vp); 776 if (error == 0) 777#endif 778 error = VOP_READ(vp, uio, ioflag, fp->f_cred); 779 fp->f_nextoff = uio->uio_offset; 780 VOP_UNLOCK(vp, 0); 781 if (error == 0 && advice == POSIX_FADV_NOREUSE && 782 offset != uio->uio_offset) { 783 /* 784 * Use POSIX_FADV_DONTNEED to flush clean pages and 785 * buffers for the backing file after a 786 * POSIX_FADV_NOREUSE read(2). To optimize the common 787 * case of using POSIX_FADV_NOREUSE with sequential 788 * access, track the previous implicit DONTNEED 789 * request and grow this request to include the 790 * current read(2) in addition to the previous 791 * DONTNEED. With purely sequential access this will 792 * cause the DONTNEED requests to continously grow to 793 * cover all of the previously read regions of the 794 * file. This allows filesystem blocks that are 795 * accessed by multiple calls to read(2) to be flushed 796 * once the last read(2) finishes. 797 */ 798 start = offset; 799 end = uio->uio_offset - 1; 800 mtxp = mtx_pool_find(mtxpool_sleep, fp); 801 mtx_lock(mtxp); 802 if (fp->f_advice != NULL && 803 fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) { 804 if (start != 0 && fp->f_advice->fa_prevend + 1 == start) 805 start = fp->f_advice->fa_prevstart; 806 else if (fp->f_advice->fa_prevstart != 0 && 807 fp->f_advice->fa_prevstart == end + 1) 808 end = fp->f_advice->fa_prevend; 809 fp->f_advice->fa_prevstart = start; 810 fp->f_advice->fa_prevend = end; 811 } 812 mtx_unlock(mtxp); 813 error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED); 814 } 815 return (error); 816} 817 818/* 819 * File table vnode write routine. 820 */ 821static int 822vn_write(fp, uio, active_cred, flags, td) 823 struct file *fp; 824 struct uio *uio; 825 struct ucred *active_cred; 826 int flags; 827 struct thread *td; 828{ 829 struct vnode *vp; 830 struct mount *mp; 831 struct mtx *mtxp; 832 int error, ioflag, lock_flags; 833 int advice; 834 off_t offset, start, end; 835 836 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 837 uio->uio_td, td)); 838 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); 839 vp = fp->f_vnode; 840 if (vp->v_type == VREG) 841 bwillwrite(); 842 ioflag = IO_UNIT; 843 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 844 ioflag |= IO_APPEND; 845 if (fp->f_flag & FNONBLOCK) 846 ioflag |= IO_NDELAY; 847 if (fp->f_flag & O_DIRECT) 848 ioflag |= IO_DIRECT; 849 if ((fp->f_flag & O_FSYNC) || 850 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 851 ioflag |= IO_SYNC; 852 mp = NULL; 853 if (vp->v_type != VCHR && 854 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) 855 goto unlock; 856 857 advice = get_advice(fp, uio); 858 859 if (MNT_SHARED_WRITES(mp) || 860 (mp == NULL && MNT_SHARED_WRITES(vp->v_mount))) { 861 lock_flags = LK_SHARED; 862 } else { 863 lock_flags = LK_EXCLUSIVE; 864 } 865 866 vn_lock(vp, lock_flags | LK_RETRY); 867 switch (advice) { 868 case POSIX_FADV_NORMAL: 869 case POSIX_FADV_SEQUENTIAL: 870 case POSIX_FADV_NOREUSE: 871 ioflag |= sequential_heuristic(uio, fp); 872 break; 873 case POSIX_FADV_RANDOM: 874 /* XXX: Is this correct? */ 875 break; 876 } 877 offset = uio->uio_offset; 878 879#ifdef MAC 880 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); 881 if (error == 0) 882#endif 883 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred); 884 fp->f_nextoff = uio->uio_offset; 885 VOP_UNLOCK(vp, 0); 886 if (vp->v_type != VCHR) 887 vn_finished_write(mp); 888 if (error == 0 && advice == POSIX_FADV_NOREUSE && 889 offset != uio->uio_offset) { 890 /* 891 * Use POSIX_FADV_DONTNEED to flush clean pages and 892 * buffers for the backing file after a 893 * POSIX_FADV_NOREUSE write(2). To optimize the 894 * common case of using POSIX_FADV_NOREUSE with 895 * sequential access, track the previous implicit 896 * DONTNEED request and grow this request to include 897 * the current write(2) in addition to the previous 898 * DONTNEED. With purely sequential access this will 899 * cause the DONTNEED requests to continously grow to 900 * cover all of the previously written regions of the 901 * file. 902 * 903 * Note that the blocks just written are almost 904 * certainly still dirty, so this only works when 905 * VOP_ADVISE() calls from subsequent writes push out 906 * the data written by this write(2) once the backing 907 * buffers are clean. However, as compared to forcing 908 * IO_DIRECT, this gives much saner behavior. Write 909 * clustering is still allowed, and clean pages are 910 * merely moved to the cache page queue rather than 911 * outright thrown away. This means a subsequent 912 * read(2) can still avoid hitting the disk if the 913 * pages have not been reclaimed. 914 * 915 * This does make POSIX_FADV_NOREUSE largely useless 916 * with non-sequential access. However, sequential 917 * access is the more common use case and the flag is 918 * merely advisory. 919 */ 920 start = offset; 921 end = uio->uio_offset - 1; 922 mtxp = mtx_pool_find(mtxpool_sleep, fp); 923 mtx_lock(mtxp); 924 if (fp->f_advice != NULL && 925 fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) { 926 if (start != 0 && fp->f_advice->fa_prevend + 1 == start) 927 start = fp->f_advice->fa_prevstart; 928 else if (fp->f_advice->fa_prevstart != 0 && 929 fp->f_advice->fa_prevstart == end + 1) 930 end = fp->f_advice->fa_prevend; 931 fp->f_advice->fa_prevstart = start; 932 fp->f_advice->fa_prevend = end; 933 } 934 mtx_unlock(mtxp); 935 error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED); 936 } 937 938unlock: 939 return (error); 940} 941 942/* 943 * The vn_io_fault() is a wrapper around vn_read() and vn_write() to 944 * prevent the following deadlock: 945 * 946 * Assume that the thread A reads from the vnode vp1 into userspace 947 * buffer buf1 backed by the pages of vnode vp2. If a page in buf1 is 948 * currently not resident, then system ends up with the call chain 949 * vn_read() -> VOP_READ(vp1) -> uiomove() -> [Page Fault] -> 950 * vm_fault(buf1) -> vnode_pager_getpages(vp2) -> VOP_GETPAGES(vp2) 951 * which establishes lock order vp1->vn_lock, then vp2->vn_lock. 952 * If, at the same time, thread B reads from vnode vp2 into buffer buf2 953 * backed by the pages of vnode vp1, and some page in buf2 is not 954 * resident, we get a reversed order vp2->vn_lock, then vp1->vn_lock. 955 * 956 * To prevent the lock order reversal and deadlock, vn_io_fault() does 957 * not allow page faults to happen during VOP_READ() or VOP_WRITE(). 958 * Instead, it first tries to do the whole range i/o with pagefaults 959 * disabled. If all pages in the i/o buffer are resident and mapped, 960 * VOP will succeed (ignoring the genuine filesystem errors). 961 * Otherwise, we get back EFAULT, and vn_io_fault() falls back to do 962 * i/o in chunks, with all pages in the chunk prefaulted and held 963 * using vm_fault_quick_hold_pages(). 964 * 965 * Filesystems using this deadlock avoidance scheme should use the 966 * array of the held pages from uio, saved in the curthread->td_ma, 967 * instead of doing uiomove(). A helper function 968 * vn_io_fault_uiomove() converts uiomove request into 969 * uiomove_fromphys() over td_ma array. 970 * 971 * Since vnode locks do not cover the whole i/o anymore, rangelocks 972 * make the current i/o request atomic with respect to other i/os and 973 * truncations. 974 */ 975 976/* 977 * Decode vn_io_fault_args and perform the corresponding i/o. 978 */ 979static int 980vn_io_fault_doio(struct vn_io_fault_args *args, struct uio *uio, 981 struct thread *td) 982{ 983 984 switch (args->kind) { 985 case VN_IO_FAULT_FOP: 986 return ((args->args.fop_args.doio)(args->args.fop_args.fp, 987 uio, args->cred, args->flags, td)); 988 case VN_IO_FAULT_VOP: 989 if (uio->uio_rw == UIO_READ) { 990 return (VOP_READ(args->args.vop_args.vp, uio, 991 args->flags, args->cred)); 992 } else if (uio->uio_rw == UIO_WRITE) { 993 return (VOP_WRITE(args->args.vop_args.vp, uio, 994 args->flags, args->cred)); 995 } 996 break; 997 } 998 panic("vn_io_fault_doio: unknown kind of io %d %d", args->kind, 999 uio->uio_rw); 1000} 1001 1002/* 1003 * Common code for vn_io_fault(), agnostic to the kind of i/o request. 1004 * Uses vn_io_fault_doio() to make the call to an actual i/o function. 1005 * Used from vn_rdwr() and vn_io_fault(), which encode the i/o request 1006 * into args and call vn_io_fault1() to handle faults during the user 1007 * mode buffer accesses. 1008 */ 1009static int 1010vn_io_fault1(struct vnode *vp, struct uio *uio, struct vn_io_fault_args *args, 1011 struct thread *td) 1012{ 1013 vm_page_t ma[io_hold_cnt + 2]; 1014 struct uio *uio_clone, short_uio; 1015 struct iovec short_iovec[1]; 1016 vm_page_t *prev_td_ma; 1017 vm_prot_t prot; 1018 vm_offset_t addr, end; 1019 size_t len, resid; 1020 ssize_t adv; 1021 int error, cnt, save, saveheld, prev_td_ma_cnt; 1022 1023 prot = uio->uio_rw == UIO_READ ? VM_PROT_WRITE : VM_PROT_READ; 1024 1025 /* 1026 * The UFS follows IO_UNIT directive and replays back both 1027 * uio_offset and uio_resid if an error is encountered during the 1028 * operation. But, since the iovec may be already advanced, 1029 * uio is still in an inconsistent state. 1030 * 1031 * Cache a copy of the original uio, which is advanced to the redo 1032 * point using UIO_NOCOPY below. 1033 */ 1034 uio_clone = cloneuio(uio); 1035 resid = uio->uio_resid; 1036 1037 short_uio.uio_segflg = UIO_USERSPACE; 1038 short_uio.uio_rw = uio->uio_rw; 1039 short_uio.uio_td = uio->uio_td; 1040 1041 save = vm_fault_disable_pagefaults(); 1042 error = vn_io_fault_doio(args, uio, td); 1043 if (error != EFAULT) 1044 goto out; 1045 1046 atomic_add_long(&vn_io_faults_cnt, 1); 1047 uio_clone->uio_segflg = UIO_NOCOPY; 1048 uiomove(NULL, resid - uio->uio_resid, uio_clone); 1049 uio_clone->uio_segflg = uio->uio_segflg; 1050 1051 saveheld = curthread_pflags_set(TDP_UIOHELD); 1052 prev_td_ma = td->td_ma; 1053 prev_td_ma_cnt = td->td_ma_cnt; 1054 1055 while (uio_clone->uio_resid != 0) { 1056 len = uio_clone->uio_iov->iov_len; 1057 if (len == 0) { 1058 KASSERT(uio_clone->uio_iovcnt >= 1, 1059 ("iovcnt underflow")); 1060 uio_clone->uio_iov++; 1061 uio_clone->uio_iovcnt--; 1062 continue; 1063 } 1064 if (len > io_hold_cnt * PAGE_SIZE) 1065 len = io_hold_cnt * PAGE_SIZE; 1066 addr = (uintptr_t)uio_clone->uio_iov->iov_base; 1067 end = round_page(addr + len); 1068 if (end < addr) { 1069 error = EFAULT; 1070 break; 1071 } 1072 cnt = atop(end - trunc_page(addr)); 1073 /* 1074 * A perfectly misaligned address and length could cause 1075 * both the start and the end of the chunk to use partial 1076 * page. +2 accounts for such a situation. 1077 */ 1078 cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map, 1079 addr, len, prot, ma, io_hold_cnt + 2); 1080 if (cnt == -1) { 1081 error = EFAULT; 1082 break; 1083 } 1084 short_uio.uio_iov = &short_iovec[0]; 1085 short_iovec[0].iov_base = (void *)addr; 1086 short_uio.uio_iovcnt = 1; 1087 short_uio.uio_resid = short_iovec[0].iov_len = len; 1088 short_uio.uio_offset = uio_clone->uio_offset; 1089 td->td_ma = ma; 1090 td->td_ma_cnt = cnt; 1091 1092 error = vn_io_fault_doio(args, &short_uio, td); 1093 vm_page_unhold_pages(ma, cnt); 1094 adv = len - short_uio.uio_resid; 1095 1096 uio_clone->uio_iov->iov_base = 1097 (char *)uio_clone->uio_iov->iov_base + adv; 1098 uio_clone->uio_iov->iov_len -= adv; 1099 uio_clone->uio_resid -= adv; 1100 uio_clone->uio_offset += adv; 1101 1102 uio->uio_resid -= adv; 1103 uio->uio_offset += adv; 1104 1105 if (error != 0 || adv == 0) 1106 break; 1107 } 1108 td->td_ma = prev_td_ma; 1109 td->td_ma_cnt = prev_td_ma_cnt; 1110 curthread_pflags_restore(saveheld); 1111out: 1112 vm_fault_enable_pagefaults(save); 1113 free(uio_clone, M_IOV); 1114 return (error); 1115} 1116 1117static int 1118vn_io_fault(struct file *fp, struct uio *uio, struct ucred *active_cred, 1119 int flags, struct thread *td) 1120{ 1121 fo_rdwr_t *doio; 1122 struct vnode *vp; 1123 void *rl_cookie; 1124 struct vn_io_fault_args args; 1125 int error; 1126 1127 doio = uio->uio_rw == UIO_READ ? vn_read : vn_write; 1128 vp = fp->f_vnode; 1129 foffset_lock_uio(fp, uio, flags); 1130 if (do_vn_io_fault(vp, uio)) { 1131 args.kind = VN_IO_FAULT_FOP; 1132 args.args.fop_args.fp = fp; 1133 args.args.fop_args.doio = doio; 1134 args.cred = active_cred; 1135 args.flags = flags | FOF_OFFSET; 1136 if (uio->uio_rw == UIO_READ) { 1137 rl_cookie = vn_rangelock_rlock(vp, uio->uio_offset, 1138 uio->uio_offset + uio->uio_resid); 1139 } else if ((fp->f_flag & O_APPEND) != 0 || 1140 (flags & FOF_OFFSET) == 0) { 1141 /* For appenders, punt and lock the whole range. */ 1142 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 1143 } else { 1144 rl_cookie = vn_rangelock_wlock(vp, uio->uio_offset, 1145 uio->uio_offset + uio->uio_resid); 1146 } 1147 error = vn_io_fault1(vp, uio, &args, td); 1148 vn_rangelock_unlock(vp, rl_cookie); 1149 } else { 1150 error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td); 1151 } 1152 foffset_unlock_uio(fp, uio, flags); 1153 return (error); 1154} 1155 1156/* 1157 * Helper function to perform the requested uiomove operation using 1158 * the held pages for io->uio_iov[0].iov_base buffer instead of 1159 * copyin/copyout. Access to the pages with uiomove_fromphys() 1160 * instead of iov_base prevents page faults that could occur due to 1161 * pmap_collect() invalidating the mapping created by 1162 * vm_fault_quick_hold_pages(), or pageout daemon, page laundry or 1163 * object cleanup revoking the write access from page mappings. 1164 * 1165 * Filesystems specified MNTK_NO_IOPF shall use vn_io_fault_uiomove() 1166 * instead of plain uiomove(). 1167 */ 1168int 1169vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio) 1170{ 1171 struct uio transp_uio; 1172 struct iovec transp_iov[1]; 1173 struct thread *td; 1174 size_t adv; 1175 int error, pgadv; 1176 1177 td = curthread; 1178 if ((td->td_pflags & TDP_UIOHELD) == 0 || 1179 uio->uio_segflg != UIO_USERSPACE) 1180 return (uiomove(data, xfersize, uio)); 1181 1182 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); 1183 transp_iov[0].iov_base = data; 1184 transp_uio.uio_iov = &transp_iov[0]; 1185 transp_uio.uio_iovcnt = 1; 1186 if (xfersize > uio->uio_resid) 1187 xfersize = uio->uio_resid; 1188 transp_uio.uio_resid = transp_iov[0].iov_len = xfersize; 1189 transp_uio.uio_offset = 0; 1190 transp_uio.uio_segflg = UIO_SYSSPACE; 1191 /* 1192 * Since transp_iov points to data, and td_ma page array 1193 * corresponds to original uio->uio_iov, we need to invert the 1194 * direction of the i/o operation as passed to 1195 * uiomove_fromphys(). 1196 */ 1197 switch (uio->uio_rw) { 1198 case UIO_WRITE: 1199 transp_uio.uio_rw = UIO_READ; 1200 break; 1201 case UIO_READ: 1202 transp_uio.uio_rw = UIO_WRITE; 1203 break; 1204 } 1205 transp_uio.uio_td = uio->uio_td; 1206 error = uiomove_fromphys(td->td_ma, 1207 ((vm_offset_t)uio->uio_iov->iov_base) & PAGE_MASK, 1208 xfersize, &transp_uio); 1209 adv = xfersize - transp_uio.uio_resid; 1210 pgadv = 1211 (((vm_offset_t)uio->uio_iov->iov_base + adv) >> PAGE_SHIFT) - 1212 (((vm_offset_t)uio->uio_iov->iov_base) >> PAGE_SHIFT); 1213 td->td_ma += pgadv; 1214 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, 1215 pgadv)); 1216 td->td_ma_cnt -= pgadv; 1217 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + adv; 1218 uio->uio_iov->iov_len -= adv; 1219 uio->uio_resid -= adv; 1220 uio->uio_offset += adv; 1221 return (error); 1222} 1223 1224int 1225vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize, 1226 struct uio *uio) 1227{ 1228 struct thread *td; 1229 vm_offset_t iov_base; 1230 int cnt, pgadv; 1231 1232 td = curthread; 1233 if ((td->td_pflags & TDP_UIOHELD) == 0 || 1234 uio->uio_segflg != UIO_USERSPACE) 1235 return (uiomove_fromphys(ma, offset, xfersize, uio)); 1236 1237 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); 1238 cnt = xfersize > uio->uio_resid ? uio->uio_resid : xfersize; 1239 iov_base = (vm_offset_t)uio->uio_iov->iov_base; 1240 switch (uio->uio_rw) { 1241 case UIO_WRITE: 1242 pmap_copy_pages(td->td_ma, iov_base & PAGE_MASK, ma, 1243 offset, cnt); 1244 break; 1245 case UIO_READ: 1246 pmap_copy_pages(ma, offset, td->td_ma, iov_base & PAGE_MASK, 1247 cnt); 1248 break; 1249 } 1250 pgadv = ((iov_base + cnt) >> PAGE_SHIFT) - (iov_base >> PAGE_SHIFT); 1251 td->td_ma += pgadv; 1252 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, 1253 pgadv)); 1254 td->td_ma_cnt -= pgadv; 1255 uio->uio_iov->iov_base = (char *)(iov_base + cnt); 1256 uio->uio_iov->iov_len -= cnt; 1257 uio->uio_resid -= cnt; 1258 uio->uio_offset += cnt; 1259 return (0); 1260} 1261 1262 1263/* 1264 * File table truncate routine. 1265 */ 1266static int 1267vn_truncate(struct file *fp, off_t length, struct ucred *active_cred, 1268 struct thread *td) 1269{ 1270 struct vattr vattr; 1271 struct mount *mp; 1272 struct vnode *vp; 1273 void *rl_cookie; 1274 int error; 1275 1276 vp = fp->f_vnode; 1277 1278 /* 1279 * Lock the whole range for truncation. Otherwise split i/o 1280 * might happen partly before and partly after the truncation. 1281 */ 1282 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 1283 error = vn_start_write(vp, &mp, V_WAIT | PCATCH); 1284 if (error) 1285 goto out1; 1286 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1287 if (vp->v_type == VDIR) { 1288 error = EISDIR; 1289 goto out; 1290 } 1291#ifdef MAC 1292 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); 1293 if (error) 1294 goto out; 1295#endif 1296 error = vn_writechk(vp); 1297 if (error == 0) { 1298 VATTR_NULL(&vattr); 1299 vattr.va_size = length; 1300 error = VOP_SETATTR(vp, &vattr, fp->f_cred); 1301 } 1302out: 1303 VOP_UNLOCK(vp, 0); 1304 vn_finished_write(mp); 1305out1: 1306 vn_rangelock_unlock(vp, rl_cookie); 1307 return (error); 1308} 1309 1310/* 1311 * File table vnode stat routine. 1312 */ 1313static int 1314vn_statfile(fp, sb, active_cred, td) 1315 struct file *fp; 1316 struct stat *sb; 1317 struct ucred *active_cred; 1318 struct thread *td; 1319{ 1320 struct vnode *vp = fp->f_vnode; 1321 int error; 1322 1323 vn_lock(vp, LK_SHARED | LK_RETRY); 1324 error = vn_stat(vp, sb, active_cred, fp->f_cred, td); 1325 VOP_UNLOCK(vp, 0); 1326 1327 return (error); 1328} 1329 1330/* 1331 * Stat a vnode; implementation for the stat syscall 1332 */ 1333int 1334vn_stat(vp, sb, active_cred, file_cred, td) 1335 struct vnode *vp; 1336 register struct stat *sb; 1337 struct ucred *active_cred; 1338 struct ucred *file_cred; 1339 struct thread *td; 1340{ 1341 struct vattr vattr; 1342 register struct vattr *vap; 1343 int error; 1344 u_short mode; 1345 1346#ifdef MAC 1347 error = mac_vnode_check_stat(active_cred, file_cred, vp); 1348 if (error) 1349 return (error); 1350#endif 1351 1352 vap = &vattr; 1353 1354 /* 1355 * Initialize defaults for new and unusual fields, so that file 1356 * systems which don't support these fields don't need to know 1357 * about them. 1358 */ 1359 vap->va_birthtime.tv_sec = -1; 1360 vap->va_birthtime.tv_nsec = 0; 1361 vap->va_fsid = VNOVAL; 1362 vap->va_rdev = NODEV; 1363 1364 error = VOP_GETATTR(vp, vap, active_cred); 1365 if (error) 1366 return (error); 1367 1368 /* 1369 * Zero the spare stat fields 1370 */ 1371 bzero(sb, sizeof *sb); 1372 1373 /* 1374 * Copy from vattr table 1375 */ 1376 if (vap->va_fsid != VNOVAL) 1377 sb->st_dev = vap->va_fsid; 1378 else 1379 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 1380 sb->st_ino = vap->va_fileid; 1381 mode = vap->va_mode; 1382 switch (vap->va_type) { 1383 case VREG: 1384 mode |= S_IFREG; 1385 break; 1386 case VDIR: 1387 mode |= S_IFDIR; 1388 break; 1389 case VBLK: 1390 mode |= S_IFBLK; 1391 break; 1392 case VCHR: 1393 mode |= S_IFCHR; 1394 break; 1395 case VLNK: 1396 mode |= S_IFLNK; 1397 break; 1398 case VSOCK: 1399 mode |= S_IFSOCK; 1400 break; 1401 case VFIFO: 1402 mode |= S_IFIFO; 1403 break; 1404 default: 1405 return (EBADF); 1406 }; 1407 sb->st_mode = mode; 1408 sb->st_nlink = vap->va_nlink; 1409 sb->st_uid = vap->va_uid; 1410 sb->st_gid = vap->va_gid; 1411 sb->st_rdev = vap->va_rdev; 1412 if (vap->va_size > OFF_MAX) 1413 return (EOVERFLOW); 1414 sb->st_size = vap->va_size; 1415 sb->st_atim = vap->va_atime; 1416 sb->st_mtim = vap->va_mtime; 1417 sb->st_ctim = vap->va_ctime; 1418 sb->st_birthtim = vap->va_birthtime; 1419 1420 /* 1421 * According to www.opengroup.org, the meaning of st_blksize is 1422 * "a filesystem-specific preferred I/O block size for this 1423 * object. In some filesystem types, this may vary from file 1424 * to file" 1425 * Use miminum/default of PAGE_SIZE (e.g. for VCHR). 1426 */ 1427 1428 sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize); 1429 1430 sb->st_flags = vap->va_flags; 1431 if (priv_check(td, PRIV_VFS_GENERATION)) 1432 sb->st_gen = 0; 1433 else 1434 sb->st_gen = vap->va_gen; 1435 1436 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 1437 return (0); 1438} 1439 1440/* 1441 * File table vnode ioctl routine. 1442 */ 1443static int 1444vn_ioctl(fp, com, data, active_cred, td) 1445 struct file *fp; 1446 u_long com; 1447 void *data; 1448 struct ucred *active_cred; 1449 struct thread *td; 1450{ 1451 struct vattr vattr; 1452 struct vnode *vp; 1453 int error; 1454 1455 vp = fp->f_vnode; 1456 switch (vp->v_type) { 1457 case VDIR: 1458 case VREG: 1459 switch (com) { 1460 case FIONREAD: 1461 vn_lock(vp, LK_SHARED | LK_RETRY); 1462 error = VOP_GETATTR(vp, &vattr, active_cred); 1463 VOP_UNLOCK(vp, 0); 1464 if (error == 0) 1465 *(int *)data = vattr.va_size - fp->f_offset; 1466 return (error); 1467 case FIONBIO: 1468 case FIOASYNC: 1469 return (0); 1470 default: 1471 return (VOP_IOCTL(vp, com, data, fp->f_flag, 1472 active_cred, td)); 1473 } 1474 default: 1475 return (ENOTTY); 1476 } 1477} 1478 1479/* 1480 * File table vnode poll routine. 1481 */ 1482static int 1483vn_poll(fp, events, active_cred, td) 1484 struct file *fp; 1485 int events; 1486 struct ucred *active_cred; 1487 struct thread *td; 1488{ 1489 struct vnode *vp; 1490 int error; 1491 1492 vp = fp->f_vnode; 1493#ifdef MAC 1494 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1495 error = mac_vnode_check_poll(active_cred, fp->f_cred, vp); 1496 VOP_UNLOCK(vp, 0); 1497 if (!error) 1498#endif 1499 1500 error = VOP_POLL(vp, events, fp->f_cred, td); 1501 return (error); 1502} 1503 1504/* 1505 * Acquire the requested lock and then check for validity. LK_RETRY 1506 * permits vn_lock to return doomed vnodes. 1507 */ 1508int 1509_vn_lock(struct vnode *vp, int flags, char *file, int line) 1510{ 1511 int error; 1512 1513 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 1514 ("vn_lock called with no locktype.")); 1515 do { 1516#ifdef DEBUG_VFS_LOCKS 1517 KASSERT(vp->v_holdcnt != 0, 1518 ("vn_lock %p: zero hold count", vp)); 1519#endif 1520 error = VOP_LOCK1(vp, flags, file, line); 1521 flags &= ~LK_INTERLOCK; /* Interlock is always dropped. */ 1522 KASSERT((flags & LK_RETRY) == 0 || error == 0, 1523 ("LK_RETRY set with incompatible flags (0x%x) or an error occured (%d)", 1524 flags, error)); 1525 /* 1526 * Callers specify LK_RETRY if they wish to get dead vnodes. 1527 * If RETRY is not set, we return ENOENT instead. 1528 */ 1529 if (error == 0 && vp->v_iflag & VI_DOOMED && 1530 (flags & LK_RETRY) == 0) { 1531 VOP_UNLOCK(vp, 0); 1532 error = ENOENT; 1533 break; 1534 } 1535 } while (flags & LK_RETRY && error != 0); 1536 return (error); 1537} 1538 1539/* 1540 * File table vnode close routine. 1541 */ 1542static int 1543vn_closefile(fp, td) 1544 struct file *fp; 1545 struct thread *td; 1546{ 1547 struct vnode *vp; 1548 struct flock lf; 1549 int error; 1550 1551 vp = fp->f_vnode; 1552 fp->f_ops = &badfileops; 1553 1554 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) 1555 vref(vp); 1556 1557 error = vn_close(vp, fp->f_flag, fp->f_cred, td); 1558 1559 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) { 1560 lf.l_whence = SEEK_SET; 1561 lf.l_start = 0; 1562 lf.l_len = 0; 1563 lf.l_type = F_UNLCK; 1564 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK); 1565 vrele(vp); 1566 } 1567 return (error); 1568} 1569 1570/* 1571 * Preparing to start a filesystem write operation. If the operation is 1572 * permitted, then we bump the count of operations in progress and 1573 * proceed. If a suspend request is in progress, we wait until the 1574 * suspension is over, and then proceed. 1575 */ 1576static int 1577vn_start_write_locked(struct mount *mp, int flags) 1578{ 1579 int error; 1580 1581 mtx_assert(MNT_MTX(mp), MA_OWNED); 1582 error = 0; 1583 1584 /* 1585 * Check on status of suspension. 1586 */ 1587 if ((curthread->td_pflags & TDP_IGNSUSP) == 0 || 1588 mp->mnt_susp_owner != curthread) { 1589 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 1590 if (flags & V_NOWAIT) { 1591 error = EWOULDBLOCK; 1592 goto unlock; 1593 } 1594 error = msleep(&mp->mnt_flag, MNT_MTX(mp), 1595 (PUSER - 1) | (flags & PCATCH), "suspfs", 0); 1596 if (error) 1597 goto unlock; 1598 } 1599 } 1600 if (flags & V_XSLEEP) 1601 goto unlock; 1602 mp->mnt_writeopcount++; 1603unlock: 1604 if (error != 0 || (flags & V_XSLEEP) != 0) 1605 MNT_REL(mp); 1606 MNT_IUNLOCK(mp); 1607 return (error); 1608} 1609 1610int 1611vn_start_write(vp, mpp, flags) 1612 struct vnode *vp; 1613 struct mount **mpp; 1614 int flags; 1615{ 1616 struct mount *mp; 1617 int error; 1618 1619 error = 0; 1620 /* 1621 * If a vnode is provided, get and return the mount point that 1622 * to which it will write. 1623 */ 1624 if (vp != NULL) { 1625 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 1626 *mpp = NULL; 1627 if (error != EOPNOTSUPP) 1628 return (error); 1629 return (0); 1630 } 1631 } 1632 if ((mp = *mpp) == NULL) 1633 return (0); 1634 1635 /* 1636 * VOP_GETWRITEMOUNT() returns with the mp refcount held through 1637 * a vfs_ref(). 1638 * As long as a vnode is not provided we need to acquire a 1639 * refcount for the provided mountpoint too, in order to 1640 * emulate a vfs_ref(). 1641 */ 1642 MNT_ILOCK(mp); 1643 if (vp == NULL) 1644 MNT_REF(mp); 1645 1646 return (vn_start_write_locked(mp, flags)); 1647} 1648 1649/* 1650 * Secondary suspension. Used by operations such as vop_inactive 1651 * routines that are needed by the higher level functions. These 1652 * are allowed to proceed until all the higher level functions have 1653 * completed (indicated by mnt_writeopcount dropping to zero). At that 1654 * time, these operations are halted until the suspension is over. 1655 */ 1656int 1657vn_start_secondary_write(vp, mpp, flags) 1658 struct vnode *vp; 1659 struct mount **mpp; 1660 int flags; 1661{ 1662 struct mount *mp; 1663 int error; 1664 1665 retry: 1666 if (vp != NULL) { 1667 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 1668 *mpp = NULL; 1669 if (error != EOPNOTSUPP) 1670 return (error); 1671 return (0); 1672 } 1673 } 1674 /* 1675 * If we are not suspended or have not yet reached suspended 1676 * mode, then let the operation proceed. 1677 */ 1678 if ((mp = *mpp) == NULL) 1679 return (0); 1680 1681 /* 1682 * VOP_GETWRITEMOUNT() returns with the mp refcount held through 1683 * a vfs_ref(). 1684 * As long as a vnode is not provided we need to acquire a 1685 * refcount for the provided mountpoint too, in order to 1686 * emulate a vfs_ref(). 1687 */ 1688 MNT_ILOCK(mp); 1689 if (vp == NULL) 1690 MNT_REF(mp); 1691 if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) { 1692 mp->mnt_secondary_writes++; 1693 mp->mnt_secondary_accwrites++; 1694 MNT_IUNLOCK(mp); 1695 return (0); 1696 } 1697 if (flags & V_NOWAIT) { 1698 MNT_REL(mp); 1699 MNT_IUNLOCK(mp); 1700 return (EWOULDBLOCK); 1701 } 1702 /* 1703 * Wait for the suspension to finish. 1704 */ 1705 error = msleep(&mp->mnt_flag, MNT_MTX(mp), 1706 (PUSER - 1) | (flags & PCATCH) | PDROP, "suspfs", 0); 1707 vfs_rel(mp); 1708 if (error == 0) 1709 goto retry; 1710 return (error); 1711} 1712 1713/* 1714 * Filesystem write operation has completed. If we are suspending and this 1715 * operation is the last one, notify the suspender that the suspension is 1716 * now in effect. 1717 */ 1718void 1719vn_finished_write(mp) 1720 struct mount *mp; 1721{ 1722 if (mp == NULL) 1723 return; 1724 MNT_ILOCK(mp); 1725 MNT_REL(mp); 1726 mp->mnt_writeopcount--; 1727 if (mp->mnt_writeopcount < 0) 1728 panic("vn_finished_write: neg cnt"); 1729 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1730 mp->mnt_writeopcount <= 0) 1731 wakeup(&mp->mnt_writeopcount); 1732 MNT_IUNLOCK(mp); 1733} 1734 1735 1736/* 1737 * Filesystem secondary write operation has completed. If we are 1738 * suspending and this operation is the last one, notify the suspender 1739 * that the suspension is now in effect. 1740 */ 1741void 1742vn_finished_secondary_write(mp) 1743 struct mount *mp; 1744{ 1745 if (mp == NULL) 1746 return; 1747 MNT_ILOCK(mp); 1748 MNT_REL(mp); 1749 mp->mnt_secondary_writes--; 1750 if (mp->mnt_secondary_writes < 0) 1751 panic("vn_finished_secondary_write: neg cnt"); 1752 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1753 mp->mnt_secondary_writes <= 0) 1754 wakeup(&mp->mnt_secondary_writes); 1755 MNT_IUNLOCK(mp); 1756} 1757 1758 1759 1760/* 1761 * Request a filesystem to suspend write operations. 1762 */ 1763int 1764vfs_write_suspend(struct mount *mp, int flags) 1765{ 1766 int error; 1767 1768 MNT_ILOCK(mp); 1769 if (mp->mnt_susp_owner == curthread) { 1770 MNT_IUNLOCK(mp); 1771 return (EALREADY); 1772 } 1773 while (mp->mnt_kern_flag & MNTK_SUSPEND) 1774 msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0); 1775 1776 /* 1777 * Unmount holds a write reference on the mount point. If we 1778 * own busy reference and drain for writers, we deadlock with 1779 * the reference draining in the unmount path. Callers of 1780 * vfs_write_suspend() must specify VS_SKIP_UNMOUNT if 1781 * vfs_busy() reference is owned and caller is not in the 1782 * unmount context. 1783 */ 1784 if ((flags & VS_SKIP_UNMOUNT) != 0 && 1785 (mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { 1786 MNT_IUNLOCK(mp); 1787 return (EBUSY); 1788 } 1789 1790 mp->mnt_kern_flag |= MNTK_SUSPEND; 1791 mp->mnt_susp_owner = curthread; 1792 if (mp->mnt_writeopcount > 0) 1793 (void) msleep(&mp->mnt_writeopcount, 1794 MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0); 1795 else 1796 MNT_IUNLOCK(mp); 1797 if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0) 1798 vfs_write_resume(mp, 0); 1799 return (error); 1800} 1801 1802/* 1803 * Request a filesystem to resume write operations. 1804 */ 1805void 1806vfs_write_resume(struct mount *mp, int flags) 1807{ 1808 1809 MNT_ILOCK(mp); 1810 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 1811 KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner")); 1812 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 | 1813 MNTK_SUSPENDED); 1814 mp->mnt_susp_owner = NULL; 1815 wakeup(&mp->mnt_writeopcount); 1816 wakeup(&mp->mnt_flag); 1817 curthread->td_pflags &= ~TDP_IGNSUSP; 1818 if ((flags & VR_START_WRITE) != 0) { 1819 MNT_REF(mp); 1820 mp->mnt_writeopcount++; 1821 } 1822 MNT_IUNLOCK(mp); 1823 if ((flags & VR_NO_SUSPCLR) == 0) 1824 VFS_SUSP_CLEAN(mp); 1825 } else if ((flags & VR_START_WRITE) != 0) { 1826 MNT_REF(mp); 1827 vn_start_write_locked(mp, 0); 1828 } else { 1829 MNT_IUNLOCK(mp); 1830 } 1831} 1832 1833/* 1834 * Helper loop around vfs_write_suspend() for filesystem unmount VFS 1835 * methods. 1836 */ 1837int 1838vfs_write_suspend_umnt(struct mount *mp) 1839{ 1840 int error; 1841 1842 KASSERT((curthread->td_pflags & TDP_IGNSUSP) == 0, 1843 ("vfs_write_suspend_umnt: recursed")); 1844 1845 /* dounmount() already called vn_start_write(). */ 1846 for (;;) { 1847 vn_finished_write(mp); 1848 error = vfs_write_suspend(mp, 0); 1849 if (error != 0) 1850 return (error); 1851 MNT_ILOCK(mp); 1852 if ((mp->mnt_kern_flag & MNTK_SUSPENDED) != 0) 1853 break; 1854 MNT_IUNLOCK(mp); 1855 vn_start_write(NULL, &mp, V_WAIT); 1856 } 1857 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 1858 wakeup(&mp->mnt_flag); 1859 MNT_IUNLOCK(mp); 1860 curthread->td_pflags |= TDP_IGNSUSP; 1861 return (0); 1862} 1863 1864/* 1865 * Implement kqueues for files by translating it to vnode operation. 1866 */ 1867static int 1868vn_kqfilter(struct file *fp, struct knote *kn) 1869{ 1870 1871 return (VOP_KQFILTER(fp->f_vnode, kn)); 1872} 1873 1874/* 1875 * Simplified in-kernel wrapper calls for extended attribute access. 1876 * Both calls pass in a NULL credential, authorizing as "kernel" access. 1877 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 1878 */ 1879int 1880vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 1881 const char *attrname, int *buflen, char *buf, struct thread *td) 1882{ 1883 struct uio auio; 1884 struct iovec iov; 1885 int error; 1886 1887 iov.iov_len = *buflen; 1888 iov.iov_base = buf; 1889 1890 auio.uio_iov = &iov; 1891 auio.uio_iovcnt = 1; 1892 auio.uio_rw = UIO_READ; 1893 auio.uio_segflg = UIO_SYSSPACE; 1894 auio.uio_td = td; 1895 auio.uio_offset = 0; 1896 auio.uio_resid = *buflen; 1897 1898 if ((ioflg & IO_NODELOCKED) == 0) 1899 vn_lock(vp, LK_SHARED | LK_RETRY); 1900 1901 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 1902 1903 /* authorize attribute retrieval as kernel */ 1904 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 1905 td); 1906 1907 if ((ioflg & IO_NODELOCKED) == 0) 1908 VOP_UNLOCK(vp, 0); 1909 1910 if (error == 0) { 1911 *buflen = *buflen - auio.uio_resid; 1912 } 1913 1914 return (error); 1915} 1916 1917/* 1918 * XXX failure mode if partially written? 1919 */ 1920int 1921vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 1922 const char *attrname, int buflen, char *buf, struct thread *td) 1923{ 1924 struct uio auio; 1925 struct iovec iov; 1926 struct mount *mp; 1927 int error; 1928 1929 iov.iov_len = buflen; 1930 iov.iov_base = buf; 1931 1932 auio.uio_iov = &iov; 1933 auio.uio_iovcnt = 1; 1934 auio.uio_rw = UIO_WRITE; 1935 auio.uio_segflg = UIO_SYSSPACE; 1936 auio.uio_td = td; 1937 auio.uio_offset = 0; 1938 auio.uio_resid = buflen; 1939 1940 if ((ioflg & IO_NODELOCKED) == 0) { 1941 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1942 return (error); 1943 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1944 } 1945 1946 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 1947 1948 /* authorize attribute setting as kernel */ 1949 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); 1950 1951 if ((ioflg & IO_NODELOCKED) == 0) { 1952 vn_finished_write(mp); 1953 VOP_UNLOCK(vp, 0); 1954 } 1955 1956 return (error); 1957} 1958 1959int 1960vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 1961 const char *attrname, struct thread *td) 1962{ 1963 struct mount *mp; 1964 int error; 1965 1966 if ((ioflg & IO_NODELOCKED) == 0) { 1967 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1968 return (error); 1969 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1970 } 1971 1972 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 1973 1974 /* authorize attribute removal as kernel */ 1975 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td); 1976 if (error == EOPNOTSUPP) 1977 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, 1978 NULL, td); 1979 1980 if ((ioflg & IO_NODELOCKED) == 0) { 1981 vn_finished_write(mp); 1982 VOP_UNLOCK(vp, 0); 1983 } 1984 1985 return (error); 1986} 1987 1988static int 1989vn_get_ino_alloc_vget(struct mount *mp, void *arg, int lkflags, 1990 struct vnode **rvp) 1991{ 1992 1993 return (VFS_VGET(mp, *(ino_t *)arg, lkflags, rvp)); 1994} 1995 1996int 1997vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp) 1998{ 1999 2000 return (vn_vget_ino_gen(vp, vn_get_ino_alloc_vget, &ino, 2001 lkflags, rvp)); 2002} 2003 2004int 2005vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg, 2006 int lkflags, struct vnode **rvp) 2007{ 2008 struct mount *mp; 2009 int ltype, error; 2010 2011 ASSERT_VOP_LOCKED(vp, "vn_vget_ino_get"); 2012 mp = vp->v_mount; 2013 ltype = VOP_ISLOCKED(vp); 2014 KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED, 2015 ("vn_vget_ino: vp not locked")); 2016 error = vfs_busy(mp, MBF_NOWAIT); 2017 if (error != 0) { 2018 vfs_ref(mp); 2019 VOP_UNLOCK(vp, 0); 2020 error = vfs_busy(mp, 0); 2021 vn_lock(vp, ltype | LK_RETRY); 2022 vfs_rel(mp); 2023 if (error != 0) 2024 return (ENOENT); 2025 if (vp->v_iflag & VI_DOOMED) { 2026 vfs_unbusy(mp); 2027 return (ENOENT); 2028 } 2029 } 2030 VOP_UNLOCK(vp, 0); 2031 error = alloc(mp, alloc_arg, lkflags, rvp); 2032 vfs_unbusy(mp); 2033 if (*rvp != vp) 2034 vn_lock(vp, ltype | LK_RETRY); 2035 if (vp->v_iflag & VI_DOOMED) { 2036 if (error == 0) { 2037 if (*rvp == vp) 2038 vunref(vp); 2039 else 2040 vput(*rvp); 2041 } 2042 error = ENOENT; 2043 } 2044 return (error); 2045} 2046 2047int 2048vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio, 2049 const struct thread *td) 2050{ 2051 2052 if (vp->v_type != VREG || td == NULL) 2053 return (0); 2054 PROC_LOCK(td->td_proc); 2055 if ((uoff_t)uio->uio_offset + uio->uio_resid > 2056 lim_cur(td->td_proc, RLIMIT_FSIZE)) { 2057 kern_psignal(td->td_proc, SIGXFSZ); 2058 PROC_UNLOCK(td->td_proc); 2059 return (EFBIG); 2060 } 2061 PROC_UNLOCK(td->td_proc); 2062 return (0); 2063} 2064 2065int 2066vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 2067 struct thread *td) 2068{ 2069 struct vnode *vp; 2070 2071 vp = fp->f_vnode; 2072#ifdef AUDIT 2073 vn_lock(vp, LK_SHARED | LK_RETRY); 2074 AUDIT_ARG_VNODE1(vp); 2075 VOP_UNLOCK(vp, 0); 2076#endif 2077 return (setfmode(td, active_cred, vp, mode)); 2078} 2079 2080int 2081vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 2082 struct thread *td) 2083{ 2084 struct vnode *vp; 2085 2086 vp = fp->f_vnode; 2087#ifdef AUDIT 2088 vn_lock(vp, LK_SHARED | LK_RETRY); 2089 AUDIT_ARG_VNODE1(vp); 2090 VOP_UNLOCK(vp, 0); 2091#endif 2092 return (setfown(td, active_cred, vp, uid, gid)); 2093} 2094 2095void 2096vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end) 2097{ 2098 vm_object_t object; 2099 2100 if ((object = vp->v_object) == NULL) 2101 return; 2102 VM_OBJECT_WLOCK(object); 2103 vm_object_page_remove(object, start, end, 0); 2104 VM_OBJECT_WUNLOCK(object); 2105} 2106 2107int 2108vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred) 2109{ 2110 struct vattr va; 2111 daddr_t bn, bnp; 2112 uint64_t bsize; 2113 off_t noff; 2114 int error; 2115 2116 KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA, 2117 ("Wrong command %lu", cmd)); 2118 2119 if (vn_lock(vp, LK_SHARED) != 0) 2120 return (EBADF); 2121 if (vp->v_type != VREG) { 2122 error = ENOTTY; 2123 goto unlock; 2124 } 2125 error = VOP_GETATTR(vp, &va, cred); 2126 if (error != 0) 2127 goto unlock; 2128 noff = *off; 2129 if (noff >= va.va_size) { 2130 error = ENXIO; 2131 goto unlock; 2132 } 2133 bsize = vp->v_mount->mnt_stat.f_iosize; 2134 for (bn = noff / bsize; noff < va.va_size; bn++, noff += bsize) { 2135 error = VOP_BMAP(vp, bn, NULL, &bnp, NULL, NULL); 2136 if (error == EOPNOTSUPP) { 2137 error = ENOTTY; 2138 goto unlock; 2139 } 2140 if ((bnp == -1 && cmd == FIOSEEKHOLE) || 2141 (bnp != -1 && cmd == FIOSEEKDATA)) { 2142 noff = bn * bsize; 2143 if (noff < *off) 2144 noff = *off; 2145 goto unlock; 2146 } 2147 } 2148 if (noff > va.va_size) 2149 noff = va.va_size; 2150 /* noff == va.va_size. There is an implicit hole at the end of file. */ 2151 if (cmd == FIOSEEKDATA) 2152 error = ENXIO; 2153unlock: 2154 VOP_UNLOCK(vp, 0); 2155 if (error == 0) 2156 *off = noff; 2157 return (error); 2158} 2159 2160int 2161vn_seek(struct file *fp, off_t offset, int whence, struct thread *td) 2162{ 2163 struct ucred *cred; 2164 struct vnode *vp; 2165 struct vattr vattr; 2166 off_t foffset, size; 2167 int error, noneg; 2168 2169 cred = td->td_ucred; 2170 vp = fp->f_vnode; 2171 foffset = foffset_lock(fp, 0); 2172 noneg = (vp->v_type != VCHR); 2173 error = 0; 2174 switch (whence) { 2175 case L_INCR: 2176 if (noneg && 2177 (foffset < 0 || 2178 (offset > 0 && foffset > OFF_MAX - offset))) { 2179 error = EOVERFLOW; 2180 break; 2181 } 2182 offset += foffset; 2183 break; 2184 case L_XTND: 2185 vn_lock(vp, LK_SHARED | LK_RETRY); 2186 error = VOP_GETATTR(vp, &vattr, cred); 2187 VOP_UNLOCK(vp, 0); 2188 if (error) 2189 break; 2190 2191 /* 2192 * If the file references a disk device, then fetch 2193 * the media size and use that to determine the ending 2194 * offset. 2195 */ 2196 if (vattr.va_size == 0 && vp->v_type == VCHR && 2197 fo_ioctl(fp, DIOCGMEDIASIZE, &size, cred, td) == 0) 2198 vattr.va_size = size; 2199 if (noneg && 2200 (vattr.va_size > OFF_MAX || 2201 (offset > 0 && vattr.va_size > OFF_MAX - offset))) { 2202 error = EOVERFLOW; 2203 break; 2204 } 2205 offset += vattr.va_size; 2206 break; 2207 case L_SET: 2208 break; 2209 case SEEK_DATA: 2210 error = fo_ioctl(fp, FIOSEEKDATA, &offset, cred, td); 2211 break; 2212 case SEEK_HOLE: 2213 error = fo_ioctl(fp, FIOSEEKHOLE, &offset, cred, td); 2214 break; 2215 default: 2216 error = EINVAL; 2217 } 2218 if (error == 0 && noneg && offset < 0) 2219 error = EINVAL; 2220 if (error != 0) 2221 goto drop; 2222 VFS_KNOTE_UNLOCKED(vp, 0); 2223 *(off_t *)(td->td_retval) = offset; 2224drop: 2225 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); 2226 return (error); 2227} 2228 2229int 2230vn_utimes_perm(struct vnode *vp, struct vattr *vap, struct ucred *cred, 2231 struct thread *td) 2232{ 2233 int error; 2234 2235 error = VOP_ACCESSX(vp, VWRITE_ATTRIBUTES, cred, td); 2236 2237 /* 2238 * From utimes(2): 2239 * Grant permission if the caller is the owner of the file or 2240 * the super-user. If the time pointer is null, then write 2241 * permission on the file is also sufficient. 2242 * 2243 * From NFSv4.1, draft 21, 6.2.1.3.1, Discussion of Mask Attributes: 2244 * A user having ACL_WRITE_DATA or ACL_WRITE_ATTRIBUTES 2245 * will be allowed to set the times [..] to the current 2246 * server time. 2247 */ 2248 if (error != 0 && (vap->va_vaflags & VA_UTIMES_NULL) != 0) 2249 error = VOP_ACCESS(vp, VWRITE, cred, td); 2250 return (error); 2251} 2252