vfs_subr.c revision 279685
1/*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 */ 36 37/* 38 * External virtual filesystem routines 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: stable/10/sys/kern/vfs_subr.c 279685 2015-03-06 09:22:05Z kib $"); 43 44#include "opt_compat.h" 45#include "opt_ddb.h" 46#include "opt_watchdog.h" 47 48#include <sys/param.h> 49#include <sys/systm.h> 50#include <sys/bio.h> 51#include <sys/buf.h> 52#include <sys/condvar.h> 53#include <sys/conf.h> 54#include <sys/dirent.h> 55#include <sys/event.h> 56#include <sys/eventhandler.h> 57#include <sys/extattr.h> 58#include <sys/file.h> 59#include <sys/fcntl.h> 60#include <sys/jail.h> 61#include <sys/kdb.h> 62#include <sys/kernel.h> 63#include <sys/kthread.h> 64#include <sys/lockf.h> 65#include <sys/malloc.h> 66#include <sys/mount.h> 67#include <sys/namei.h> 68#include <sys/pctrie.h> 69#include <sys/priv.h> 70#include <sys/reboot.h> 71#include <sys/rwlock.h> 72#include <sys/sched.h> 73#include <sys/sleepqueue.h> 74#include <sys/smp.h> 75#include <sys/stat.h> 76#include <sys/sysctl.h> 77#include <sys/syslog.h> 78#include <sys/vmmeter.h> 79#include <sys/vnode.h> 80#include <sys/watchdog.h> 81 82#include <machine/stdarg.h> 83 84#include <security/mac/mac_framework.h> 85 86#include <vm/vm.h> 87#include <vm/vm_object.h> 88#include <vm/vm_extern.h> 89#include <vm/pmap.h> 90#include <vm/vm_map.h> 91#include <vm/vm_page.h> 92#include <vm/vm_kern.h> 93#include <vm/uma.h> 94 95#ifdef DDB 96#include <ddb/ddb.h> 97#endif 98 99static void delmntque(struct vnode *vp); 100static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 101 int slpflag, int slptimeo); 102static void syncer_shutdown(void *arg, int howto); 103static int vtryrecycle(struct vnode *vp); 104static void v_incr_usecount(struct vnode *); 105static void v_decr_usecount(struct vnode *); 106static void v_decr_useonly(struct vnode *); 107static void v_upgrade_usecount(struct vnode *); 108static void vnlru_free(int); 109static void vgonel(struct vnode *); 110static void vfs_knllock(void *arg); 111static void vfs_knlunlock(void *arg); 112static void vfs_knl_assert_locked(void *arg); 113static void vfs_knl_assert_unlocked(void *arg); 114static void destroy_vpollinfo(struct vpollinfo *vi); 115 116/* 117 * Number of vnodes in existence. Increased whenever getnewvnode() 118 * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode. 119 */ 120static unsigned long numvnodes; 121 122SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 123 "Number of vnodes in existence"); 124 125/* 126 * Conversion tables for conversion from vnode types to inode formats 127 * and back. 128 */ 129enum vtype iftovt_tab[16] = { 130 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 131 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 132}; 133int vttoif_tab[10] = { 134 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 135 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 136}; 137 138/* 139 * List of vnodes that are ready for recycling. 140 */ 141static TAILQ_HEAD(freelst, vnode) vnode_free_list; 142 143/* 144 * Free vnode target. Free vnodes may simply be files which have been stat'd 145 * but not read. This is somewhat common, and a small cache of such files 146 * should be kept to avoid recreation costs. 147 */ 148static u_long wantfreevnodes; 149SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 150/* Number of vnodes in the free list. */ 151static u_long freevnodes; 152SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, 153 "Number of vnodes in the free list"); 154 155static int vlru_allow_cache_src; 156SYSCTL_INT(_vfs, OID_AUTO, vlru_allow_cache_src, CTLFLAG_RW, 157 &vlru_allow_cache_src, 0, "Allow vlru to reclaim source vnode"); 158 159/* 160 * Various variables used for debugging the new implementation of 161 * reassignbuf(). 162 * XXX these are probably of (very) limited utility now. 163 */ 164static int reassignbufcalls; 165SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, 166 "Number of calls to reassignbuf"); 167 168/* 169 * Cache for the mount type id assigned to NFS. This is used for 170 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c. 171 */ 172int nfs_mount_type = -1; 173 174/* To keep more than one thread at a time from running vfs_getnewfsid */ 175static struct mtx mntid_mtx; 176 177/* 178 * Lock for any access to the following: 179 * vnode_free_list 180 * numvnodes 181 * freevnodes 182 */ 183static struct mtx vnode_free_list_mtx; 184 185/* Publicly exported FS */ 186struct nfs_public nfs_pub; 187 188static uma_zone_t buf_trie_zone; 189 190/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 191static uma_zone_t vnode_zone; 192static uma_zone_t vnodepoll_zone; 193 194/* 195 * The workitem queue. 196 * 197 * It is useful to delay writes of file data and filesystem metadata 198 * for tens of seconds so that quickly created and deleted files need 199 * not waste disk bandwidth being created and removed. To realize this, 200 * we append vnodes to a "workitem" queue. When running with a soft 201 * updates implementation, most pending metadata dependencies should 202 * not wait for more than a few seconds. Thus, mounted on block devices 203 * are delayed only about a half the time that file data is delayed. 204 * Similarly, directory updates are more critical, so are only delayed 205 * about a third the time that file data is delayed. Thus, there are 206 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 207 * one each second (driven off the filesystem syncer process). The 208 * syncer_delayno variable indicates the next queue that is to be processed. 209 * Items that need to be processed soon are placed in this queue: 210 * 211 * syncer_workitem_pending[syncer_delayno] 212 * 213 * A delay of fifteen seconds is done by placing the request fifteen 214 * entries later in the queue: 215 * 216 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 217 * 218 */ 219static int syncer_delayno; 220static long syncer_mask; 221LIST_HEAD(synclist, bufobj); 222static struct synclist *syncer_workitem_pending; 223/* 224 * The sync_mtx protects: 225 * bo->bo_synclist 226 * sync_vnode_count 227 * syncer_delayno 228 * syncer_state 229 * syncer_workitem_pending 230 * syncer_worklist_len 231 * rushjob 232 */ 233static struct mtx sync_mtx; 234static struct cv sync_wakeup; 235 236#define SYNCER_MAXDELAY 32 237static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 238static int syncdelay = 30; /* max time to delay syncing data */ 239static int filedelay = 30; /* time to delay syncing files */ 240SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 241 "Time to delay syncing files (in seconds)"); 242static int dirdelay = 29; /* time to delay syncing directories */ 243SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 244 "Time to delay syncing directories (in seconds)"); 245static int metadelay = 28; /* time to delay syncing metadata */ 246SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 247 "Time to delay syncing metadata (in seconds)"); 248static int rushjob; /* number of slots to run ASAP */ 249static int stat_rush_requests; /* number of times I/O speeded up */ 250SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 251 "Number of times I/O speeded up (rush requests)"); 252 253/* 254 * When shutting down the syncer, run it at four times normal speed. 255 */ 256#define SYNCER_SHUTDOWN_SPEEDUP 4 257static int sync_vnode_count; 258static int syncer_worklist_len; 259static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 260 syncer_state; 261 262/* 263 * Number of vnodes we want to exist at any one time. This is mostly used 264 * to size hash tables in vnode-related code. It is normally not used in 265 * getnewvnode(), as wantfreevnodes is normally nonzero.) 266 * 267 * XXX desiredvnodes is historical cruft and should not exist. 268 */ 269int desiredvnodes; 270SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 271 &desiredvnodes, 0, "Maximum number of vnodes"); 272SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 273 &wantfreevnodes, 0, "Minimum number of vnodes (legacy)"); 274static int vnlru_nowhere; 275SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 276 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 277 278/* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 279static int vnsz2log; 280 281/* 282 * Support for the bufobj clean & dirty pctrie. 283 */ 284static void * 285buf_trie_alloc(struct pctrie *ptree) 286{ 287 288 return uma_zalloc(buf_trie_zone, M_NOWAIT); 289} 290 291static void 292buf_trie_free(struct pctrie *ptree, void *node) 293{ 294 295 uma_zfree(buf_trie_zone, node); 296} 297PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); 298 299/* 300 * Initialize the vnode management data structures. 301 * 302 * Reevaluate the following cap on the number of vnodes after the physical 303 * memory size exceeds 512GB. In the limit, as the physical memory size 304 * grows, the ratio of physical pages to vnodes approaches sixteen to one. 305 */ 306#ifndef MAXVNODES_MAX 307#define MAXVNODES_MAX (512 * (1024 * 1024 * 1024 / (int)PAGE_SIZE / 16)) 308#endif 309static void 310vntblinit(void *dummy __unused) 311{ 312 u_int i; 313 int physvnodes, virtvnodes; 314 315 /* 316 * Desiredvnodes is a function of the physical memory size and the 317 * kernel's heap size. Generally speaking, it scales with the 318 * physical memory size. The ratio of desiredvnodes to physical pages 319 * is one to four until desiredvnodes exceeds 98,304. Thereafter, the 320 * marginal ratio of desiredvnodes to physical pages is one to 321 * sixteen. However, desiredvnodes is limited by the kernel's heap 322 * size. The memory required by desiredvnodes vnodes and vm objects 323 * may not exceed one seventh of the kernel's heap size. 324 */ 325 physvnodes = maxproc + cnt.v_page_count / 16 + 3 * min(98304 * 4, 326 cnt.v_page_count) / 16; 327 virtvnodes = vm_kmem_size / (7 * (sizeof(struct vm_object) + 328 sizeof(struct vnode))); 329 desiredvnodes = min(physvnodes, virtvnodes); 330 if (desiredvnodes > MAXVNODES_MAX) { 331 if (bootverbose) 332 printf("Reducing kern.maxvnodes %d -> %d\n", 333 desiredvnodes, MAXVNODES_MAX); 334 desiredvnodes = MAXVNODES_MAX; 335 } 336 wantfreevnodes = desiredvnodes / 4; 337 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 338 TAILQ_INIT(&vnode_free_list); 339 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 340 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 341 NULL, NULL, UMA_ALIGN_PTR, 0); 342 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 343 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 344 /* 345 * Preallocate enough nodes to support one-per buf so that 346 * we can not fail an insert. reassignbuf() callers can not 347 * tolerate the insertion failure. 348 */ 349 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 350 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 351 UMA_ZONE_NOFREE | UMA_ZONE_VM); 352 uma_prealloc(buf_trie_zone, nbuf); 353 /* 354 * Initialize the filesystem syncer. 355 */ 356 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 357 &syncer_mask); 358 syncer_maxdelay = syncer_mask + 1; 359 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 360 cv_init(&sync_wakeup, "syncer"); 361 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 362 vnsz2log++; 363 vnsz2log--; 364} 365SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 366 367 368/* 369 * Mark a mount point as busy. Used to synchronize access and to delay 370 * unmounting. Eventually, mountlist_mtx is not released on failure. 371 * 372 * vfs_busy() is a custom lock, it can block the caller. 373 * vfs_busy() only sleeps if the unmount is active on the mount point. 374 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 375 * vnode belonging to mp. 376 * 377 * Lookup uses vfs_busy() to traverse mount points. 378 * root fs var fs 379 * / vnode lock A / vnode lock (/var) D 380 * /var vnode lock B /log vnode lock(/var/log) E 381 * vfs_busy lock C vfs_busy lock F 382 * 383 * Within each file system, the lock order is C->A->B and F->D->E. 384 * 385 * When traversing across mounts, the system follows that lock order: 386 * 387 * C->A->B 388 * | 389 * +->F->D->E 390 * 391 * The lookup() process for namei("/var") illustrates the process: 392 * VOP_LOOKUP() obtains B while A is held 393 * vfs_busy() obtains a shared lock on F while A and B are held 394 * vput() releases lock on B 395 * vput() releases lock on A 396 * VFS_ROOT() obtains lock on D while shared lock on F is held 397 * vfs_unbusy() releases shared lock on F 398 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 399 * Attempt to lock A (instead of vp_crossmp) while D is held would 400 * violate the global order, causing deadlocks. 401 * 402 * dounmount() locks B while F is drained. 403 */ 404int 405vfs_busy(struct mount *mp, int flags) 406{ 407 408 MPASS((flags & ~MBF_MASK) == 0); 409 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 410 411 MNT_ILOCK(mp); 412 MNT_REF(mp); 413 /* 414 * If mount point is currenly being unmounted, sleep until the 415 * mount point fate is decided. If thread doing the unmounting fails, 416 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 417 * that this mount point has survived the unmount attempt and vfs_busy 418 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 419 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 420 * about to be really destroyed. vfs_busy needs to release its 421 * reference on the mount point in this case and return with ENOENT, 422 * telling the caller that mount mount it tried to busy is no longer 423 * valid. 424 */ 425 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 426 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 427 MNT_REL(mp); 428 MNT_IUNLOCK(mp); 429 CTR1(KTR_VFS, "%s: failed busying before sleeping", 430 __func__); 431 return (ENOENT); 432 } 433 if (flags & MBF_MNTLSTLOCK) 434 mtx_unlock(&mountlist_mtx); 435 mp->mnt_kern_flag |= MNTK_MWAIT; 436 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 437 if (flags & MBF_MNTLSTLOCK) 438 mtx_lock(&mountlist_mtx); 439 MNT_ILOCK(mp); 440 } 441 if (flags & MBF_MNTLSTLOCK) 442 mtx_unlock(&mountlist_mtx); 443 mp->mnt_lockref++; 444 MNT_IUNLOCK(mp); 445 return (0); 446} 447 448/* 449 * Free a busy filesystem. 450 */ 451void 452vfs_unbusy(struct mount *mp) 453{ 454 455 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 456 MNT_ILOCK(mp); 457 MNT_REL(mp); 458 KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref")); 459 mp->mnt_lockref--; 460 if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 461 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 462 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 463 mp->mnt_kern_flag &= ~MNTK_DRAINING; 464 wakeup(&mp->mnt_lockref); 465 } 466 MNT_IUNLOCK(mp); 467} 468 469/* 470 * Lookup a mount point by filesystem identifier. 471 */ 472struct mount * 473vfs_getvfs(fsid_t *fsid) 474{ 475 struct mount *mp; 476 477 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 478 mtx_lock(&mountlist_mtx); 479 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 480 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 481 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 482 vfs_ref(mp); 483 mtx_unlock(&mountlist_mtx); 484 return (mp); 485 } 486 } 487 mtx_unlock(&mountlist_mtx); 488 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 489 return ((struct mount *) 0); 490} 491 492/* 493 * Lookup a mount point by filesystem identifier, busying it before 494 * returning. 495 * 496 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 497 * cache for popular filesystem identifiers. The cache is lockess, using 498 * the fact that struct mount's are never freed. In worst case we may 499 * get pointer to unmounted or even different filesystem, so we have to 500 * check what we got, and go slow way if so. 501 */ 502struct mount * 503vfs_busyfs(fsid_t *fsid) 504{ 505#define FSID_CACHE_SIZE 256 506 typedef struct mount * volatile vmp_t; 507 static vmp_t cache[FSID_CACHE_SIZE]; 508 struct mount *mp; 509 int error; 510 uint32_t hash; 511 512 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 513 hash = fsid->val[0] ^ fsid->val[1]; 514 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 515 mp = cache[hash]; 516 if (mp == NULL || 517 mp->mnt_stat.f_fsid.val[0] != fsid->val[0] || 518 mp->mnt_stat.f_fsid.val[1] != fsid->val[1]) 519 goto slow; 520 if (vfs_busy(mp, 0) != 0) { 521 cache[hash] = NULL; 522 goto slow; 523 } 524 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 525 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 526 return (mp); 527 else 528 vfs_unbusy(mp); 529 530slow: 531 mtx_lock(&mountlist_mtx); 532 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 533 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 534 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 535 error = vfs_busy(mp, MBF_MNTLSTLOCK); 536 if (error) { 537 cache[hash] = NULL; 538 mtx_unlock(&mountlist_mtx); 539 return (NULL); 540 } 541 cache[hash] = mp; 542 return (mp); 543 } 544 } 545 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 546 mtx_unlock(&mountlist_mtx); 547 return ((struct mount *) 0); 548} 549 550/* 551 * Check if a user can access privileged mount options. 552 */ 553int 554vfs_suser(struct mount *mp, struct thread *td) 555{ 556 int error; 557 558 /* 559 * If the thread is jailed, but this is not a jail-friendly file 560 * system, deny immediately. 561 */ 562 if (!(mp->mnt_vfc->vfc_flags & VFCF_JAIL) && jailed(td->td_ucred)) 563 return (EPERM); 564 565 /* 566 * If the file system was mounted outside the jail of the calling 567 * thread, deny immediately. 568 */ 569 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 570 return (EPERM); 571 572 /* 573 * If file system supports delegated administration, we don't check 574 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 575 * by the file system itself. 576 * If this is not the user that did original mount, we check for 577 * the PRIV_VFS_MOUNT_OWNER privilege. 578 */ 579 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 580 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 581 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 582 return (error); 583 } 584 return (0); 585} 586 587/* 588 * Get a new unique fsid. Try to make its val[0] unique, since this value 589 * will be used to create fake device numbers for stat(). Also try (but 590 * not so hard) make its val[0] unique mod 2^16, since some emulators only 591 * support 16-bit device numbers. We end up with unique val[0]'s for the 592 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 593 * 594 * Keep in mind that several mounts may be running in parallel. Starting 595 * the search one past where the previous search terminated is both a 596 * micro-optimization and a defense against returning the same fsid to 597 * different mounts. 598 */ 599void 600vfs_getnewfsid(struct mount *mp) 601{ 602 static uint16_t mntid_base; 603 struct mount *nmp; 604 fsid_t tfsid; 605 int mtype; 606 607 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 608 mtx_lock(&mntid_mtx); 609 mtype = mp->mnt_vfc->vfc_typenum; 610 tfsid.val[1] = mtype; 611 mtype = (mtype & 0xFF) << 24; 612 for (;;) { 613 tfsid.val[0] = makedev(255, 614 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 615 mntid_base++; 616 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 617 break; 618 vfs_rel(nmp); 619 } 620 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 621 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 622 mtx_unlock(&mntid_mtx); 623} 624 625/* 626 * Knob to control the precision of file timestamps: 627 * 628 * 0 = seconds only; nanoseconds zeroed. 629 * 1 = seconds and nanoseconds, accurate within 1/HZ. 630 * 2 = seconds and nanoseconds, truncated to microseconds. 631 * >=3 = seconds and nanoseconds, maximum precision. 632 */ 633enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 634 635static int timestamp_precision = TSP_SEC; 636SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 637 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 638 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to ms, " 639 "3+: sec + ns (max. precision))"); 640 641/* 642 * Get a current timestamp. 643 */ 644void 645vfs_timestamp(struct timespec *tsp) 646{ 647 struct timeval tv; 648 649 switch (timestamp_precision) { 650 case TSP_SEC: 651 tsp->tv_sec = time_second; 652 tsp->tv_nsec = 0; 653 break; 654 case TSP_HZ: 655 getnanotime(tsp); 656 break; 657 case TSP_USEC: 658 microtime(&tv); 659 TIMEVAL_TO_TIMESPEC(&tv, tsp); 660 break; 661 case TSP_NSEC: 662 default: 663 nanotime(tsp); 664 break; 665 } 666} 667 668/* 669 * Set vnode attributes to VNOVAL 670 */ 671void 672vattr_null(struct vattr *vap) 673{ 674 675 vap->va_type = VNON; 676 vap->va_size = VNOVAL; 677 vap->va_bytes = VNOVAL; 678 vap->va_mode = VNOVAL; 679 vap->va_nlink = VNOVAL; 680 vap->va_uid = VNOVAL; 681 vap->va_gid = VNOVAL; 682 vap->va_fsid = VNOVAL; 683 vap->va_fileid = VNOVAL; 684 vap->va_blocksize = VNOVAL; 685 vap->va_rdev = VNOVAL; 686 vap->va_atime.tv_sec = VNOVAL; 687 vap->va_atime.tv_nsec = VNOVAL; 688 vap->va_mtime.tv_sec = VNOVAL; 689 vap->va_mtime.tv_nsec = VNOVAL; 690 vap->va_ctime.tv_sec = VNOVAL; 691 vap->va_ctime.tv_nsec = VNOVAL; 692 vap->va_birthtime.tv_sec = VNOVAL; 693 vap->va_birthtime.tv_nsec = VNOVAL; 694 vap->va_flags = VNOVAL; 695 vap->va_gen = VNOVAL; 696 vap->va_vaflags = 0; 697} 698 699/* 700 * This routine is called when we have too many vnodes. It attempts 701 * to free <count> vnodes and will potentially free vnodes that still 702 * have VM backing store (VM backing store is typically the cause 703 * of a vnode blowout so we want to do this). Therefore, this operation 704 * is not considered cheap. 705 * 706 * A number of conditions may prevent a vnode from being reclaimed. 707 * the buffer cache may have references on the vnode, a directory 708 * vnode may still have references due to the namei cache representing 709 * underlying files, or the vnode may be in active use. It is not 710 * desireable to reuse such vnodes. These conditions may cause the 711 * number of vnodes to reach some minimum value regardless of what 712 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 713 */ 714static int 715vlrureclaim(struct mount *mp) 716{ 717 struct vnode *vp; 718 int done; 719 int trigger; 720 int usevnodes; 721 int count; 722 723 /* 724 * Calculate the trigger point, don't allow user 725 * screwups to blow us up. This prevents us from 726 * recycling vnodes with lots of resident pages. We 727 * aren't trying to free memory, we are trying to 728 * free vnodes. 729 */ 730 usevnodes = desiredvnodes; 731 if (usevnodes <= 0) 732 usevnodes = 1; 733 trigger = cnt.v_page_count * 2 / usevnodes; 734 done = 0; 735 vn_start_write(NULL, &mp, V_WAIT); 736 MNT_ILOCK(mp); 737 count = mp->mnt_nvnodelistsize / 10 + 1; 738 while (count != 0) { 739 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 740 while (vp != NULL && vp->v_type == VMARKER) 741 vp = TAILQ_NEXT(vp, v_nmntvnodes); 742 if (vp == NULL) 743 break; 744 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 745 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 746 --count; 747 if (!VI_TRYLOCK(vp)) 748 goto next_iter; 749 /* 750 * If it's been deconstructed already, it's still 751 * referenced, or it exceeds the trigger, skip it. 752 */ 753 if (vp->v_usecount || 754 (!vlru_allow_cache_src && 755 !LIST_EMPTY(&(vp)->v_cache_src)) || 756 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && 757 vp->v_object->resident_page_count > trigger)) { 758 VI_UNLOCK(vp); 759 goto next_iter; 760 } 761 MNT_IUNLOCK(mp); 762 vholdl(vp); 763 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { 764 vdrop(vp); 765 goto next_iter_mntunlocked; 766 } 767 VI_LOCK(vp); 768 /* 769 * v_usecount may have been bumped after VOP_LOCK() dropped 770 * the vnode interlock and before it was locked again. 771 * 772 * It is not necessary to recheck VI_DOOMED because it can 773 * only be set by another thread that holds both the vnode 774 * lock and vnode interlock. If another thread has the 775 * vnode lock before we get to VOP_LOCK() and obtains the 776 * vnode interlock after VOP_LOCK() drops the vnode 777 * interlock, the other thread will be unable to drop the 778 * vnode lock before our VOP_LOCK() call fails. 779 */ 780 if (vp->v_usecount || 781 (!vlru_allow_cache_src && 782 !LIST_EMPTY(&(vp)->v_cache_src)) || 783 (vp->v_object != NULL && 784 vp->v_object->resident_page_count > trigger)) { 785 VOP_UNLOCK(vp, LK_INTERLOCK); 786 vdrop(vp); 787 goto next_iter_mntunlocked; 788 } 789 KASSERT((vp->v_iflag & VI_DOOMED) == 0, 790 ("VI_DOOMED unexpectedly detected in vlrureclaim()")); 791 vgonel(vp); 792 VOP_UNLOCK(vp, 0); 793 vdropl(vp); 794 done++; 795next_iter_mntunlocked: 796 if (!should_yield()) 797 goto relock_mnt; 798 goto yield; 799next_iter: 800 if (!should_yield()) 801 continue; 802 MNT_IUNLOCK(mp); 803yield: 804 kern_yield(PRI_USER); 805relock_mnt: 806 MNT_ILOCK(mp); 807 } 808 MNT_IUNLOCK(mp); 809 vn_finished_write(mp); 810 return done; 811} 812 813/* 814 * Attempt to keep the free list at wantfreevnodes length. 815 */ 816static void 817vnlru_free(int count) 818{ 819 struct vnode *vp; 820 821 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 822 for (; count > 0; count--) { 823 vp = TAILQ_FIRST(&vnode_free_list); 824 /* 825 * The list can be modified while the free_list_mtx 826 * has been dropped and vp could be NULL here. 827 */ 828 if (!vp) 829 break; 830 VNASSERT(vp->v_op != NULL, vp, 831 ("vnlru_free: vnode already reclaimed.")); 832 KASSERT((vp->v_iflag & VI_FREE) != 0, 833 ("Removing vnode not on freelist")); 834 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 835 ("Mangling active vnode")); 836 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 837 /* 838 * Don't recycle if we can't get the interlock. 839 */ 840 if (!VI_TRYLOCK(vp)) { 841 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); 842 continue; 843 } 844 VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0, 845 vp, ("vp inconsistent on freelist")); 846 847 /* 848 * The clear of VI_FREE prevents activation of the 849 * vnode. There is no sense in putting the vnode on 850 * the mount point active list, only to remove it 851 * later during recycling. Inline the relevant part 852 * of vholdl(), to avoid triggering assertions or 853 * activating. 854 */ 855 freevnodes--; 856 vp->v_iflag &= ~VI_FREE; 857 vp->v_holdcnt++; 858 859 mtx_unlock(&vnode_free_list_mtx); 860 VI_UNLOCK(vp); 861 vtryrecycle(vp); 862 /* 863 * If the recycled succeeded this vdrop will actually free 864 * the vnode. If not it will simply place it back on 865 * the free list. 866 */ 867 vdrop(vp); 868 mtx_lock(&vnode_free_list_mtx); 869 } 870} 871/* 872 * Attempt to recycle vnodes in a context that is always safe to block. 873 * Calling vlrurecycle() from the bowels of filesystem code has some 874 * interesting deadlock problems. 875 */ 876static struct proc *vnlruproc; 877static int vnlruproc_sig; 878 879static void 880vnlru_proc(void) 881{ 882 struct mount *mp, *nmp; 883 int done; 884 struct proc *p = vnlruproc; 885 886 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p, 887 SHUTDOWN_PRI_FIRST); 888 889 for (;;) { 890 kproc_suspend_check(p); 891 mtx_lock(&vnode_free_list_mtx); 892 if (freevnodes > wantfreevnodes) 893 vnlru_free(freevnodes - wantfreevnodes); 894 if (numvnodes <= desiredvnodes * 9 / 10) { 895 vnlruproc_sig = 0; 896 wakeup(&vnlruproc_sig); 897 msleep(vnlruproc, &vnode_free_list_mtx, 898 PVFS|PDROP, "vlruwt", hz); 899 continue; 900 } 901 mtx_unlock(&vnode_free_list_mtx); 902 done = 0; 903 mtx_lock(&mountlist_mtx); 904 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 905 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { 906 nmp = TAILQ_NEXT(mp, mnt_list); 907 continue; 908 } 909 done += vlrureclaim(mp); 910 mtx_lock(&mountlist_mtx); 911 nmp = TAILQ_NEXT(mp, mnt_list); 912 vfs_unbusy(mp); 913 } 914 mtx_unlock(&mountlist_mtx); 915 if (done == 0) { 916#if 0 917 /* These messages are temporary debugging aids */ 918 if (vnlru_nowhere < 5) 919 printf("vnlru process getting nowhere..\n"); 920 else if (vnlru_nowhere == 5) 921 printf("vnlru process messages stopped.\n"); 922#endif 923 vnlru_nowhere++; 924 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 925 } else 926 kern_yield(PRI_USER); 927 } 928} 929 930static struct kproc_desc vnlru_kp = { 931 "vnlru", 932 vnlru_proc, 933 &vnlruproc 934}; 935SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 936 &vnlru_kp); 937 938/* 939 * Routines having to do with the management of the vnode table. 940 */ 941 942/* 943 * Try to recycle a freed vnode. We abort if anyone picks up a reference 944 * before we actually vgone(). This function must be called with the vnode 945 * held to prevent the vnode from being returned to the free list midway 946 * through vgone(). 947 */ 948static int 949vtryrecycle(struct vnode *vp) 950{ 951 struct mount *vnmp; 952 953 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 954 VNASSERT(vp->v_holdcnt, vp, 955 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 956 /* 957 * This vnode may found and locked via some other list, if so we 958 * can't recycle it yet. 959 */ 960 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 961 CTR2(KTR_VFS, 962 "%s: impossible to recycle, vp %p lock is already held", 963 __func__, vp); 964 return (EWOULDBLOCK); 965 } 966 /* 967 * Don't recycle if its filesystem is being suspended. 968 */ 969 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 970 VOP_UNLOCK(vp, 0); 971 CTR2(KTR_VFS, 972 "%s: impossible to recycle, cannot start the write for %p", 973 __func__, vp); 974 return (EBUSY); 975 } 976 /* 977 * If we got this far, we need to acquire the interlock and see if 978 * anyone picked up this vnode from another list. If not, we will 979 * mark it with DOOMED via vgonel() so that anyone who does find it 980 * will skip over it. 981 */ 982 VI_LOCK(vp); 983 if (vp->v_usecount) { 984 VOP_UNLOCK(vp, LK_INTERLOCK); 985 vn_finished_write(vnmp); 986 CTR2(KTR_VFS, 987 "%s: impossible to recycle, %p is already referenced", 988 __func__, vp); 989 return (EBUSY); 990 } 991 if ((vp->v_iflag & VI_DOOMED) == 0) 992 vgonel(vp); 993 VOP_UNLOCK(vp, LK_INTERLOCK); 994 vn_finished_write(vnmp); 995 return (0); 996} 997 998/* 999 * Wait for available vnodes. 1000 */ 1001static int 1002getnewvnode_wait(int suspended) 1003{ 1004 1005 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1006 if (numvnodes > desiredvnodes) { 1007 if (suspended) { 1008 /* 1009 * File system is beeing suspended, we cannot risk a 1010 * deadlock here, so allocate new vnode anyway. 1011 */ 1012 if (freevnodes > wantfreevnodes) 1013 vnlru_free(freevnodes - wantfreevnodes); 1014 return (0); 1015 } 1016 if (vnlruproc_sig == 0) { 1017 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 1018 wakeup(vnlruproc); 1019 } 1020 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, 1021 "vlruwk", hz); 1022 } 1023 return (numvnodes > desiredvnodes ? ENFILE : 0); 1024} 1025 1026void 1027getnewvnode_reserve(u_int count) 1028{ 1029 struct thread *td; 1030 1031 td = curthread; 1032 /* First try to be quick and racy. */ 1033 if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) { 1034 td->td_vp_reserv += count; 1035 return; 1036 } else 1037 atomic_subtract_long(&numvnodes, count); 1038 1039 mtx_lock(&vnode_free_list_mtx); 1040 while (count > 0) { 1041 if (getnewvnode_wait(0) == 0) { 1042 count--; 1043 td->td_vp_reserv++; 1044 atomic_add_long(&numvnodes, 1); 1045 } 1046 } 1047 mtx_unlock(&vnode_free_list_mtx); 1048} 1049 1050void 1051getnewvnode_drop_reserve(void) 1052{ 1053 struct thread *td; 1054 1055 td = curthread; 1056 atomic_subtract_long(&numvnodes, td->td_vp_reserv); 1057 td->td_vp_reserv = 0; 1058} 1059 1060/* 1061 * Return the next vnode from the free list. 1062 */ 1063int 1064getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1065 struct vnode **vpp) 1066{ 1067 struct vnode *vp; 1068 struct bufobj *bo; 1069 struct thread *td; 1070 int error; 1071 1072 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1073 vp = NULL; 1074 td = curthread; 1075 if (td->td_vp_reserv > 0) { 1076 td->td_vp_reserv -= 1; 1077 goto alloc; 1078 } 1079 mtx_lock(&vnode_free_list_mtx); 1080 /* 1081 * Lend our context to reclaim vnodes if they've exceeded the max. 1082 */ 1083 if (freevnodes > wantfreevnodes) 1084 vnlru_free(1); 1085 error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & 1086 MNTK_SUSPEND)); 1087#if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 1088 if (error != 0) { 1089 mtx_unlock(&vnode_free_list_mtx); 1090 return (error); 1091 } 1092#endif 1093 atomic_add_long(&numvnodes, 1); 1094 mtx_unlock(&vnode_free_list_mtx); 1095alloc: 1096 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO); 1097 /* 1098 * Setup locks. 1099 */ 1100 vp->v_vnlock = &vp->v_lock; 1101 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 1102 /* 1103 * By default, don't allow shared locks unless filesystems 1104 * opt-in. 1105 */ 1106 lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOSHARE | LK_IS_VNODE); 1107 /* 1108 * Initialize bufobj. 1109 */ 1110 bo = &vp->v_bufobj; 1111 bo->__bo_vnode = vp; 1112 rw_init(BO_LOCKPTR(bo), "bufobj interlock"); 1113 bo->bo_ops = &buf_ops_bio; 1114 bo->bo_private = vp; 1115 TAILQ_INIT(&bo->bo_clean.bv_hd); 1116 TAILQ_INIT(&bo->bo_dirty.bv_hd); 1117 /* 1118 * Initialize namecache. 1119 */ 1120 LIST_INIT(&vp->v_cache_src); 1121 TAILQ_INIT(&vp->v_cache_dst); 1122 /* 1123 * Finalize various vnode identity bits. 1124 */ 1125 vp->v_type = VNON; 1126 vp->v_tag = tag; 1127 vp->v_op = vops; 1128 v_incr_usecount(vp); 1129 vp->v_data = NULL; 1130#ifdef MAC 1131 mac_vnode_init(vp); 1132 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1133 mac_vnode_associate_singlelabel(mp, vp); 1134 else if (mp == NULL && vops != &dead_vnodeops) 1135 printf("NULL mp in getnewvnode()\n"); 1136#endif 1137 if (mp != NULL) { 1138 bo->bo_bsize = mp->mnt_stat.f_iosize; 1139 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1140 vp->v_vflag |= VV_NOKNOTE; 1141 } 1142 rangelock_init(&vp->v_rl); 1143 1144 /* 1145 * For the filesystems which do not use vfs_hash_insert(), 1146 * still initialize v_hash to have vfs_hash_index() useful. 1147 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1148 * its own hashing. 1149 */ 1150 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1151 1152 *vpp = vp; 1153 return (0); 1154} 1155 1156/* 1157 * Delete from old mount point vnode list, if on one. 1158 */ 1159static void 1160delmntque(struct vnode *vp) 1161{ 1162 struct mount *mp; 1163 int active; 1164 1165 mp = vp->v_mount; 1166 if (mp == NULL) 1167 return; 1168 MNT_ILOCK(mp); 1169 VI_LOCK(vp); 1170 KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize, 1171 ("Active vnode list size %d > Vnode list size %d", 1172 mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize)); 1173 active = vp->v_iflag & VI_ACTIVE; 1174 vp->v_iflag &= ~VI_ACTIVE; 1175 if (active) { 1176 mtx_lock(&vnode_free_list_mtx); 1177 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); 1178 mp->mnt_activevnodelistsize--; 1179 mtx_unlock(&vnode_free_list_mtx); 1180 } 1181 vp->v_mount = NULL; 1182 VI_UNLOCK(vp); 1183 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1184 ("bad mount point vnode list size")); 1185 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1186 mp->mnt_nvnodelistsize--; 1187 MNT_REL(mp); 1188 MNT_IUNLOCK(mp); 1189} 1190 1191static void 1192insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1193{ 1194 1195 vp->v_data = NULL; 1196 vp->v_op = &dead_vnodeops; 1197 vgone(vp); 1198 vput(vp); 1199} 1200 1201/* 1202 * Insert into list of vnodes for the new mount point, if available. 1203 */ 1204int 1205insmntque1(struct vnode *vp, struct mount *mp, 1206 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1207{ 1208 1209 KASSERT(vp->v_mount == NULL, 1210 ("insmntque: vnode already on per mount vnode list")); 1211 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1212 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1213 1214 /* 1215 * We acquire the vnode interlock early to ensure that the 1216 * vnode cannot be recycled by another process releasing a 1217 * holdcnt on it before we get it on both the vnode list 1218 * and the active vnode list. The mount mutex protects only 1219 * manipulation of the vnode list and the vnode freelist 1220 * mutex protects only manipulation of the active vnode list. 1221 * Hence the need to hold the vnode interlock throughout. 1222 */ 1223 MNT_ILOCK(mp); 1224 VI_LOCK(vp); 1225 if (((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 && 1226 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1227 mp->mnt_nvnodelistsize == 0)) && 1228 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1229 VI_UNLOCK(vp); 1230 MNT_IUNLOCK(mp); 1231 if (dtr != NULL) 1232 dtr(vp, dtr_arg); 1233 return (EBUSY); 1234 } 1235 vp->v_mount = mp; 1236 MNT_REF(mp); 1237 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1238 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1239 ("neg mount point vnode list size")); 1240 mp->mnt_nvnodelistsize++; 1241 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1242 ("Activating already active vnode")); 1243 vp->v_iflag |= VI_ACTIVE; 1244 mtx_lock(&vnode_free_list_mtx); 1245 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 1246 mp->mnt_activevnodelistsize++; 1247 mtx_unlock(&vnode_free_list_mtx); 1248 VI_UNLOCK(vp); 1249 MNT_IUNLOCK(mp); 1250 return (0); 1251} 1252 1253int 1254insmntque(struct vnode *vp, struct mount *mp) 1255{ 1256 1257 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1258} 1259 1260/* 1261 * Flush out and invalidate all buffers associated with a bufobj 1262 * Called with the underlying object locked. 1263 */ 1264int 1265bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1266{ 1267 int error; 1268 1269 BO_LOCK(bo); 1270 if (flags & V_SAVE) { 1271 error = bufobj_wwait(bo, slpflag, slptimeo); 1272 if (error) { 1273 BO_UNLOCK(bo); 1274 return (error); 1275 } 1276 if (bo->bo_dirty.bv_cnt > 0) { 1277 BO_UNLOCK(bo); 1278 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1279 return (error); 1280 /* 1281 * XXX We could save a lock/unlock if this was only 1282 * enabled under INVARIANTS 1283 */ 1284 BO_LOCK(bo); 1285 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1286 panic("vinvalbuf: dirty bufs"); 1287 } 1288 } 1289 /* 1290 * If you alter this loop please notice that interlock is dropped and 1291 * reacquired in flushbuflist. Special care is needed to ensure that 1292 * no race conditions occur from this. 1293 */ 1294 do { 1295 error = flushbuflist(&bo->bo_clean, 1296 flags, bo, slpflag, slptimeo); 1297 if (error == 0 && !(flags & V_CLEANONLY)) 1298 error = flushbuflist(&bo->bo_dirty, 1299 flags, bo, slpflag, slptimeo); 1300 if (error != 0 && error != EAGAIN) { 1301 BO_UNLOCK(bo); 1302 return (error); 1303 } 1304 } while (error != 0); 1305 1306 /* 1307 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1308 * have write I/O in-progress but if there is a VM object then the 1309 * VM object can also have read-I/O in-progress. 1310 */ 1311 do { 1312 bufobj_wwait(bo, 0, 0); 1313 BO_UNLOCK(bo); 1314 if (bo->bo_object != NULL) { 1315 VM_OBJECT_WLOCK(bo->bo_object); 1316 vm_object_pip_wait(bo->bo_object, "bovlbx"); 1317 VM_OBJECT_WUNLOCK(bo->bo_object); 1318 } 1319 BO_LOCK(bo); 1320 } while (bo->bo_numoutput > 0); 1321 BO_UNLOCK(bo); 1322 1323 /* 1324 * Destroy the copy in the VM cache, too. 1325 */ 1326 if (bo->bo_object != NULL && 1327 (flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0) { 1328 VM_OBJECT_WLOCK(bo->bo_object); 1329 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1330 OBJPR_CLEANONLY : 0); 1331 VM_OBJECT_WUNLOCK(bo->bo_object); 1332 } 1333 1334#ifdef INVARIANTS 1335 BO_LOCK(bo); 1336 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0 && 1337 (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0)) 1338 panic("vinvalbuf: flush failed"); 1339 BO_UNLOCK(bo); 1340#endif 1341 return (0); 1342} 1343 1344/* 1345 * Flush out and invalidate all buffers associated with a vnode. 1346 * Called with the underlying object locked. 1347 */ 1348int 1349vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1350{ 1351 1352 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1353 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1354 if (vp->v_object != NULL && vp->v_object->handle != vp) 1355 return (0); 1356 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1357} 1358 1359/* 1360 * Flush out buffers on the specified list. 1361 * 1362 */ 1363static int 1364flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1365 int slptimeo) 1366{ 1367 struct buf *bp, *nbp; 1368 int retval, error; 1369 daddr_t lblkno; 1370 b_xflags_t xflags; 1371 1372 ASSERT_BO_WLOCKED(bo); 1373 1374 retval = 0; 1375 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1376 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) || 1377 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) { 1378 continue; 1379 } 1380 lblkno = 0; 1381 xflags = 0; 1382 if (nbp != NULL) { 1383 lblkno = nbp->b_lblkno; 1384 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 1385 } 1386 retval = EAGAIN; 1387 error = BUF_TIMELOCK(bp, 1388 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 1389 "flushbuf", slpflag, slptimeo); 1390 if (error) { 1391 BO_LOCK(bo); 1392 return (error != ENOLCK ? error : EAGAIN); 1393 } 1394 KASSERT(bp->b_bufobj == bo, 1395 ("bp %p wrong b_bufobj %p should be %p", 1396 bp, bp->b_bufobj, bo)); 1397 if (bp->b_bufobj != bo) { /* XXX: necessary ? */ 1398 BUF_UNLOCK(bp); 1399 BO_LOCK(bo); 1400 return (EAGAIN); 1401 } 1402 /* 1403 * XXX Since there are no node locks for NFS, I 1404 * believe there is a slight chance that a delayed 1405 * write will occur while sleeping just above, so 1406 * check for it. 1407 */ 1408 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1409 (flags & V_SAVE)) { 1410 bremfree(bp); 1411 bp->b_flags |= B_ASYNC; 1412 bwrite(bp); 1413 BO_LOCK(bo); 1414 return (EAGAIN); /* XXX: why not loop ? */ 1415 } 1416 bremfree(bp); 1417 bp->b_flags |= (B_INVAL | B_RELBUF); 1418 bp->b_flags &= ~B_ASYNC; 1419 brelse(bp); 1420 BO_LOCK(bo); 1421 if (nbp != NULL && 1422 (nbp->b_bufobj != bo || 1423 nbp->b_lblkno != lblkno || 1424 (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) != xflags)) 1425 break; /* nbp invalid */ 1426 } 1427 return (retval); 1428} 1429 1430/* 1431 * Truncate a file's buffer and pages to a specified length. This 1432 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1433 * sync activity. 1434 */ 1435int 1436vtruncbuf(struct vnode *vp, struct ucred *cred, off_t length, int blksize) 1437{ 1438 struct buf *bp, *nbp; 1439 int anyfreed; 1440 int trunclbn; 1441 struct bufobj *bo; 1442 1443 CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__, 1444 vp, cred, blksize, (uintmax_t)length); 1445 1446 /* 1447 * Round up to the *next* lbn. 1448 */ 1449 trunclbn = (length + blksize - 1) / blksize; 1450 1451 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 1452restart: 1453 bo = &vp->v_bufobj; 1454 BO_LOCK(bo); 1455 anyfreed = 1; 1456 for (;anyfreed;) { 1457 anyfreed = 0; 1458 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 1459 if (bp->b_lblkno < trunclbn) 1460 continue; 1461 if (BUF_LOCK(bp, 1462 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1463 BO_LOCKPTR(bo)) == ENOLCK) 1464 goto restart; 1465 1466 bremfree(bp); 1467 bp->b_flags |= (B_INVAL | B_RELBUF); 1468 bp->b_flags &= ~B_ASYNC; 1469 brelse(bp); 1470 anyfreed = 1; 1471 1472 BO_LOCK(bo); 1473 if (nbp != NULL && 1474 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1475 (nbp->b_vp != vp) || 1476 (nbp->b_flags & B_DELWRI))) { 1477 BO_UNLOCK(bo); 1478 goto restart; 1479 } 1480 } 1481 1482 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1483 if (bp->b_lblkno < trunclbn) 1484 continue; 1485 if (BUF_LOCK(bp, 1486 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1487 BO_LOCKPTR(bo)) == ENOLCK) 1488 goto restart; 1489 bremfree(bp); 1490 bp->b_flags |= (B_INVAL | B_RELBUF); 1491 bp->b_flags &= ~B_ASYNC; 1492 brelse(bp); 1493 anyfreed = 1; 1494 1495 BO_LOCK(bo); 1496 if (nbp != NULL && 1497 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1498 (nbp->b_vp != vp) || 1499 (nbp->b_flags & B_DELWRI) == 0)) { 1500 BO_UNLOCK(bo); 1501 goto restart; 1502 } 1503 } 1504 } 1505 1506 if (length > 0) { 1507restartsync: 1508 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1509 if (bp->b_lblkno > 0) 1510 continue; 1511 /* 1512 * Since we hold the vnode lock this should only 1513 * fail if we're racing with the buf daemon. 1514 */ 1515 if (BUF_LOCK(bp, 1516 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1517 BO_LOCKPTR(bo)) == ENOLCK) { 1518 goto restart; 1519 } 1520 VNASSERT((bp->b_flags & B_DELWRI), vp, 1521 ("buf(%p) on dirty queue without DELWRI", bp)); 1522 1523 bremfree(bp); 1524 bawrite(bp); 1525 BO_LOCK(bo); 1526 goto restartsync; 1527 } 1528 } 1529 1530 bufobj_wwait(bo, 0, 0); 1531 BO_UNLOCK(bo); 1532 vnode_pager_setsize(vp, length); 1533 1534 return (0); 1535} 1536 1537static void 1538buf_vlist_remove(struct buf *bp) 1539{ 1540 struct bufv *bv; 1541 1542 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1543 ASSERT_BO_WLOCKED(bp->b_bufobj); 1544 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 1545 (BX_VNDIRTY|BX_VNCLEAN), 1546 ("buf_vlist_remove: Buf %p is on two lists", bp)); 1547 if (bp->b_xflags & BX_VNDIRTY) 1548 bv = &bp->b_bufobj->bo_dirty; 1549 else 1550 bv = &bp->b_bufobj->bo_clean; 1551 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 1552 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 1553 bv->bv_cnt--; 1554 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1555} 1556 1557/* 1558 * Add the buffer to the sorted clean or dirty block list. 1559 * 1560 * NOTE: xflags is passed as a constant, optimizing this inline function! 1561 */ 1562static void 1563buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 1564{ 1565 struct bufv *bv; 1566 struct buf *n; 1567 int error; 1568 1569 ASSERT_BO_WLOCKED(bo); 1570 KASSERT((bo->bo_flag & BO_DEAD) == 0, ("dead bo %p", bo)); 1571 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 1572 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 1573 bp->b_xflags |= xflags; 1574 if (xflags & BX_VNDIRTY) 1575 bv = &bo->bo_dirty; 1576 else 1577 bv = &bo->bo_clean; 1578 1579 /* 1580 * Keep the list ordered. Optimize empty list insertion. Assume 1581 * we tend to grow at the tail so lookup_le should usually be cheaper 1582 * than _ge. 1583 */ 1584 if (bv->bv_cnt == 0 || 1585 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 1586 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 1587 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 1588 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 1589 else 1590 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 1591 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 1592 if (error) 1593 panic("buf_vlist_add: Preallocated nodes insufficient."); 1594 bv->bv_cnt++; 1595} 1596 1597/* 1598 * Lookup a buffer using the splay tree. Note that we specifically avoid 1599 * shadow buffers used in background bitmap writes. 1600 * 1601 * This code isn't quite efficient as it could be because we are maintaining 1602 * two sorted lists and do not know which list the block resides in. 1603 * 1604 * During a "make buildworld" the desired buffer is found at one of 1605 * the roots more than 60% of the time. Thus, checking both roots 1606 * before performing either splay eliminates unnecessary splays on the 1607 * first tree splayed. 1608 */ 1609struct buf * 1610gbincore(struct bufobj *bo, daddr_t lblkno) 1611{ 1612 struct buf *bp; 1613 1614 ASSERT_BO_LOCKED(bo); 1615 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 1616 if (bp != NULL) 1617 return (bp); 1618 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); 1619} 1620 1621/* 1622 * Associate a buffer with a vnode. 1623 */ 1624void 1625bgetvp(struct vnode *vp, struct buf *bp) 1626{ 1627 struct bufobj *bo; 1628 1629 bo = &vp->v_bufobj; 1630 ASSERT_BO_WLOCKED(bo); 1631 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 1632 1633 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 1634 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 1635 ("bgetvp: bp already attached! %p", bp)); 1636 1637 vhold(vp); 1638 bp->b_vp = vp; 1639 bp->b_bufobj = bo; 1640 /* 1641 * Insert onto list for new vnode. 1642 */ 1643 buf_vlist_add(bp, bo, BX_VNCLEAN); 1644} 1645 1646/* 1647 * Disassociate a buffer from a vnode. 1648 */ 1649void 1650brelvp(struct buf *bp) 1651{ 1652 struct bufobj *bo; 1653 struct vnode *vp; 1654 1655 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1656 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 1657 1658 /* 1659 * Delete from old vnode list, if on one. 1660 */ 1661 vp = bp->b_vp; /* XXX */ 1662 bo = bp->b_bufobj; 1663 BO_LOCK(bo); 1664 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1665 buf_vlist_remove(bp); 1666 else 1667 panic("brelvp: Buffer %p not on queue.", bp); 1668 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 1669 bo->bo_flag &= ~BO_ONWORKLST; 1670 mtx_lock(&sync_mtx); 1671 LIST_REMOVE(bo, bo_synclist); 1672 syncer_worklist_len--; 1673 mtx_unlock(&sync_mtx); 1674 } 1675 bp->b_vp = NULL; 1676 bp->b_bufobj = NULL; 1677 BO_UNLOCK(bo); 1678 vdrop(vp); 1679} 1680 1681/* 1682 * Add an item to the syncer work queue. 1683 */ 1684static void 1685vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 1686{ 1687 int slot; 1688 1689 ASSERT_BO_WLOCKED(bo); 1690 1691 mtx_lock(&sync_mtx); 1692 if (bo->bo_flag & BO_ONWORKLST) 1693 LIST_REMOVE(bo, bo_synclist); 1694 else { 1695 bo->bo_flag |= BO_ONWORKLST; 1696 syncer_worklist_len++; 1697 } 1698 1699 if (delay > syncer_maxdelay - 2) 1700 delay = syncer_maxdelay - 2; 1701 slot = (syncer_delayno + delay) & syncer_mask; 1702 1703 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 1704 mtx_unlock(&sync_mtx); 1705} 1706 1707static int 1708sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 1709{ 1710 int error, len; 1711 1712 mtx_lock(&sync_mtx); 1713 len = syncer_worklist_len - sync_vnode_count; 1714 mtx_unlock(&sync_mtx); 1715 error = SYSCTL_OUT(req, &len, sizeof(len)); 1716 return (error); 1717} 1718 1719SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 1720 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 1721 1722static struct proc *updateproc; 1723static void sched_sync(void); 1724static struct kproc_desc up_kp = { 1725 "syncer", 1726 sched_sync, 1727 &updateproc 1728}; 1729SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 1730 1731static int 1732sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 1733{ 1734 struct vnode *vp; 1735 struct mount *mp; 1736 1737 *bo = LIST_FIRST(slp); 1738 if (*bo == NULL) 1739 return (0); 1740 vp = (*bo)->__bo_vnode; /* XXX */ 1741 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 1742 return (1); 1743 /* 1744 * We use vhold in case the vnode does not 1745 * successfully sync. vhold prevents the vnode from 1746 * going away when we unlock the sync_mtx so that 1747 * we can acquire the vnode interlock. 1748 */ 1749 vholdl(vp); 1750 mtx_unlock(&sync_mtx); 1751 VI_UNLOCK(vp); 1752 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1753 vdrop(vp); 1754 mtx_lock(&sync_mtx); 1755 return (*bo == LIST_FIRST(slp)); 1756 } 1757 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1758 (void) VOP_FSYNC(vp, MNT_LAZY, td); 1759 VOP_UNLOCK(vp, 0); 1760 vn_finished_write(mp); 1761 BO_LOCK(*bo); 1762 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 1763 /* 1764 * Put us back on the worklist. The worklist 1765 * routine will remove us from our current 1766 * position and then add us back in at a later 1767 * position. 1768 */ 1769 vn_syncer_add_to_worklist(*bo, syncdelay); 1770 } 1771 BO_UNLOCK(*bo); 1772 vdrop(vp); 1773 mtx_lock(&sync_mtx); 1774 return (0); 1775} 1776 1777static int first_printf = 1; 1778 1779/* 1780 * System filesystem synchronizer daemon. 1781 */ 1782static void 1783sched_sync(void) 1784{ 1785 struct synclist *next, *slp; 1786 struct bufobj *bo; 1787 long starttime; 1788 struct thread *td = curthread; 1789 int last_work_seen; 1790 int net_worklist_len; 1791 int syncer_final_iter; 1792 int error; 1793 1794 last_work_seen = 0; 1795 syncer_final_iter = 0; 1796 syncer_state = SYNCER_RUNNING; 1797 starttime = time_uptime; 1798 td->td_pflags |= TDP_NORUNNINGBUF; 1799 1800 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 1801 SHUTDOWN_PRI_LAST); 1802 1803 mtx_lock(&sync_mtx); 1804 for (;;) { 1805 if (syncer_state == SYNCER_FINAL_DELAY && 1806 syncer_final_iter == 0) { 1807 mtx_unlock(&sync_mtx); 1808 kproc_suspend_check(td->td_proc); 1809 mtx_lock(&sync_mtx); 1810 } 1811 net_worklist_len = syncer_worklist_len - sync_vnode_count; 1812 if (syncer_state != SYNCER_RUNNING && 1813 starttime != time_uptime) { 1814 if (first_printf) { 1815 printf("\nSyncing disks, vnodes remaining..."); 1816 first_printf = 0; 1817 } 1818 printf("%d ", net_worklist_len); 1819 } 1820 starttime = time_uptime; 1821 1822 /* 1823 * Push files whose dirty time has expired. Be careful 1824 * of interrupt race on slp queue. 1825 * 1826 * Skip over empty worklist slots when shutting down. 1827 */ 1828 do { 1829 slp = &syncer_workitem_pending[syncer_delayno]; 1830 syncer_delayno += 1; 1831 if (syncer_delayno == syncer_maxdelay) 1832 syncer_delayno = 0; 1833 next = &syncer_workitem_pending[syncer_delayno]; 1834 /* 1835 * If the worklist has wrapped since the 1836 * it was emptied of all but syncer vnodes, 1837 * switch to the FINAL_DELAY state and run 1838 * for one more second. 1839 */ 1840 if (syncer_state == SYNCER_SHUTTING_DOWN && 1841 net_worklist_len == 0 && 1842 last_work_seen == syncer_delayno) { 1843 syncer_state = SYNCER_FINAL_DELAY; 1844 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 1845 } 1846 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 1847 syncer_worklist_len > 0); 1848 1849 /* 1850 * Keep track of the last time there was anything 1851 * on the worklist other than syncer vnodes. 1852 * Return to the SHUTTING_DOWN state if any 1853 * new work appears. 1854 */ 1855 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 1856 last_work_seen = syncer_delayno; 1857 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 1858 syncer_state = SYNCER_SHUTTING_DOWN; 1859 while (!LIST_EMPTY(slp)) { 1860 error = sync_vnode(slp, &bo, td); 1861 if (error == 1) { 1862 LIST_REMOVE(bo, bo_synclist); 1863 LIST_INSERT_HEAD(next, bo, bo_synclist); 1864 continue; 1865 } 1866 1867 if (first_printf == 0) 1868 wdog_kern_pat(WD_LASTVAL); 1869 1870 } 1871 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 1872 syncer_final_iter--; 1873 /* 1874 * The variable rushjob allows the kernel to speed up the 1875 * processing of the filesystem syncer process. A rushjob 1876 * value of N tells the filesystem syncer to process the next 1877 * N seconds worth of work on its queue ASAP. Currently rushjob 1878 * is used by the soft update code to speed up the filesystem 1879 * syncer process when the incore state is getting so far 1880 * ahead of the disk that the kernel memory pool is being 1881 * threatened with exhaustion. 1882 */ 1883 if (rushjob > 0) { 1884 rushjob -= 1; 1885 continue; 1886 } 1887 /* 1888 * Just sleep for a short period of time between 1889 * iterations when shutting down to allow some I/O 1890 * to happen. 1891 * 1892 * If it has taken us less than a second to process the 1893 * current work, then wait. Otherwise start right over 1894 * again. We can still lose time if any single round 1895 * takes more than two seconds, but it does not really 1896 * matter as we are just trying to generally pace the 1897 * filesystem activity. 1898 */ 1899 if (syncer_state != SYNCER_RUNNING || 1900 time_uptime == starttime) { 1901 thread_lock(td); 1902 sched_prio(td, PPAUSE); 1903 thread_unlock(td); 1904 } 1905 if (syncer_state != SYNCER_RUNNING) 1906 cv_timedwait(&sync_wakeup, &sync_mtx, 1907 hz / SYNCER_SHUTDOWN_SPEEDUP); 1908 else if (time_uptime == starttime) 1909 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 1910 } 1911} 1912 1913/* 1914 * Request the syncer daemon to speed up its work. 1915 * We never push it to speed up more than half of its 1916 * normal turn time, otherwise it could take over the cpu. 1917 */ 1918int 1919speedup_syncer(void) 1920{ 1921 int ret = 0; 1922 1923 mtx_lock(&sync_mtx); 1924 if (rushjob < syncdelay / 2) { 1925 rushjob += 1; 1926 stat_rush_requests += 1; 1927 ret = 1; 1928 } 1929 mtx_unlock(&sync_mtx); 1930 cv_broadcast(&sync_wakeup); 1931 return (ret); 1932} 1933 1934/* 1935 * Tell the syncer to speed up its work and run though its work 1936 * list several times, then tell it to shut down. 1937 */ 1938static void 1939syncer_shutdown(void *arg, int howto) 1940{ 1941 1942 if (howto & RB_NOSYNC) 1943 return; 1944 mtx_lock(&sync_mtx); 1945 syncer_state = SYNCER_SHUTTING_DOWN; 1946 rushjob = 0; 1947 mtx_unlock(&sync_mtx); 1948 cv_broadcast(&sync_wakeup); 1949 kproc_shutdown(arg, howto); 1950} 1951 1952void 1953syncer_suspend(void) 1954{ 1955 1956 syncer_shutdown(updateproc, 0); 1957} 1958 1959void 1960syncer_resume(void) 1961{ 1962 1963 mtx_lock(&sync_mtx); 1964 first_printf = 1; 1965 syncer_state = SYNCER_RUNNING; 1966 mtx_unlock(&sync_mtx); 1967 cv_broadcast(&sync_wakeup); 1968 kproc_resume(updateproc); 1969} 1970 1971/* 1972 * Reassign a buffer from one vnode to another. 1973 * Used to assign file specific control information 1974 * (indirect blocks) to the vnode to which they belong. 1975 */ 1976void 1977reassignbuf(struct buf *bp) 1978{ 1979 struct vnode *vp; 1980 struct bufobj *bo; 1981 int delay; 1982#ifdef INVARIANTS 1983 struct bufv *bv; 1984#endif 1985 1986 vp = bp->b_vp; 1987 bo = bp->b_bufobj; 1988 ++reassignbufcalls; 1989 1990 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 1991 bp, bp->b_vp, bp->b_flags); 1992 /* 1993 * B_PAGING flagged buffers cannot be reassigned because their vp 1994 * is not fully linked in. 1995 */ 1996 if (bp->b_flags & B_PAGING) 1997 panic("cannot reassign paging buffer"); 1998 1999 /* 2000 * Delete from old vnode list, if on one. 2001 */ 2002 BO_LOCK(bo); 2003 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2004 buf_vlist_remove(bp); 2005 else 2006 panic("reassignbuf: Buffer %p not on queue.", bp); 2007 /* 2008 * If dirty, put on list of dirty buffers; otherwise insert onto list 2009 * of clean buffers. 2010 */ 2011 if (bp->b_flags & B_DELWRI) { 2012 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2013 switch (vp->v_type) { 2014 case VDIR: 2015 delay = dirdelay; 2016 break; 2017 case VCHR: 2018 delay = metadelay; 2019 break; 2020 default: 2021 delay = filedelay; 2022 } 2023 vn_syncer_add_to_worklist(bo, delay); 2024 } 2025 buf_vlist_add(bp, bo, BX_VNDIRTY); 2026 } else { 2027 buf_vlist_add(bp, bo, BX_VNCLEAN); 2028 2029 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2030 mtx_lock(&sync_mtx); 2031 LIST_REMOVE(bo, bo_synclist); 2032 syncer_worklist_len--; 2033 mtx_unlock(&sync_mtx); 2034 bo->bo_flag &= ~BO_ONWORKLST; 2035 } 2036 } 2037#ifdef INVARIANTS 2038 bv = &bo->bo_clean; 2039 bp = TAILQ_FIRST(&bv->bv_hd); 2040 KASSERT(bp == NULL || bp->b_bufobj == bo, 2041 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2042 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2043 KASSERT(bp == NULL || bp->b_bufobj == bo, 2044 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2045 bv = &bo->bo_dirty; 2046 bp = TAILQ_FIRST(&bv->bv_hd); 2047 KASSERT(bp == NULL || bp->b_bufobj == bo, 2048 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2049 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2050 KASSERT(bp == NULL || bp->b_bufobj == bo, 2051 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2052#endif 2053 BO_UNLOCK(bo); 2054} 2055 2056/* 2057 * Increment the use and hold counts on the vnode, taking care to reference 2058 * the driver's usecount if this is a chardev. The vholdl() will remove 2059 * the vnode from the free list if it is presently free. Requires the 2060 * vnode interlock and returns with it held. 2061 */ 2062static void 2063v_incr_usecount(struct vnode *vp) 2064{ 2065 2066 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2067 vholdl(vp); 2068 vp->v_usecount++; 2069 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2070 dev_lock(); 2071 vp->v_rdev->si_usecount++; 2072 dev_unlock(); 2073 } 2074} 2075 2076/* 2077 * Turn a holdcnt into a use+holdcnt such that only one call to 2078 * v_decr_usecount is needed. 2079 */ 2080static void 2081v_upgrade_usecount(struct vnode *vp) 2082{ 2083 2084 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2085 vp->v_usecount++; 2086 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2087 dev_lock(); 2088 vp->v_rdev->si_usecount++; 2089 dev_unlock(); 2090 } 2091} 2092 2093/* 2094 * Decrement the vnode use and hold count along with the driver's usecount 2095 * if this is a chardev. The vdropl() below releases the vnode interlock 2096 * as it may free the vnode. 2097 */ 2098static void 2099v_decr_usecount(struct vnode *vp) 2100{ 2101 2102 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2103 VNASSERT(vp->v_usecount > 0, vp, 2104 ("v_decr_usecount: negative usecount")); 2105 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2106 vp->v_usecount--; 2107 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2108 dev_lock(); 2109 vp->v_rdev->si_usecount--; 2110 dev_unlock(); 2111 } 2112 vdropl(vp); 2113} 2114 2115/* 2116 * Decrement only the use count and driver use count. This is intended to 2117 * be paired with a follow on vdropl() to release the remaining hold count. 2118 * In this way we may vgone() a vnode with a 0 usecount without risk of 2119 * having it end up on a free list because the hold count is kept above 0. 2120 */ 2121static void 2122v_decr_useonly(struct vnode *vp) 2123{ 2124 2125 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2126 VNASSERT(vp->v_usecount > 0, vp, 2127 ("v_decr_useonly: negative usecount")); 2128 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2129 vp->v_usecount--; 2130 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2131 dev_lock(); 2132 vp->v_rdev->si_usecount--; 2133 dev_unlock(); 2134 } 2135} 2136 2137/* 2138 * Grab a particular vnode from the free list, increment its 2139 * reference count and lock it. VI_DOOMED is set if the vnode 2140 * is being destroyed. Only callers who specify LK_RETRY will 2141 * see doomed vnodes. If inactive processing was delayed in 2142 * vput try to do it here. 2143 */ 2144int 2145vget(struct vnode *vp, int flags, struct thread *td) 2146{ 2147 int error; 2148 2149 error = 0; 2150 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2151 ("vget: invalid lock operation")); 2152 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2153 2154 if ((flags & LK_INTERLOCK) == 0) 2155 VI_LOCK(vp); 2156 vholdl(vp); 2157 if ((error = vn_lock(vp, flags | LK_INTERLOCK)) != 0) { 2158 vdrop(vp); 2159 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2160 vp); 2161 return (error); 2162 } 2163 if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0) 2164 panic("vget: vn_lock failed to return ENOENT\n"); 2165 VI_LOCK(vp); 2166 /* Upgrade our holdcnt to a usecount. */ 2167 v_upgrade_usecount(vp); 2168 /* 2169 * We don't guarantee that any particular close will 2170 * trigger inactive processing so just make a best effort 2171 * here at preventing a reference to a removed file. If 2172 * we don't succeed no harm is done. 2173 */ 2174 if (vp->v_iflag & VI_OWEINACT) { 2175 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE && 2176 (flags & LK_NOWAIT) == 0) 2177 vinactive(vp, td); 2178 vp->v_iflag &= ~VI_OWEINACT; 2179 } 2180 VI_UNLOCK(vp); 2181 return (0); 2182} 2183 2184/* 2185 * Increase the reference count of a vnode. 2186 */ 2187void 2188vref(struct vnode *vp) 2189{ 2190 2191 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2192 VI_LOCK(vp); 2193 v_incr_usecount(vp); 2194 VI_UNLOCK(vp); 2195} 2196 2197/* 2198 * Return reference count of a vnode. 2199 * 2200 * The results of this call are only guaranteed when some mechanism other 2201 * than the VI lock is used to stop other processes from gaining references 2202 * to the vnode. This may be the case if the caller holds the only reference. 2203 * This is also useful when stale data is acceptable as race conditions may 2204 * be accounted for by some other means. 2205 */ 2206int 2207vrefcnt(struct vnode *vp) 2208{ 2209 int usecnt; 2210 2211 VI_LOCK(vp); 2212 usecnt = vp->v_usecount; 2213 VI_UNLOCK(vp); 2214 2215 return (usecnt); 2216} 2217 2218#define VPUTX_VRELE 1 2219#define VPUTX_VPUT 2 2220#define VPUTX_VUNREF 3 2221 2222static void 2223vputx(struct vnode *vp, int func) 2224{ 2225 int error; 2226 2227 KASSERT(vp != NULL, ("vputx: null vp")); 2228 if (func == VPUTX_VUNREF) 2229 ASSERT_VOP_LOCKED(vp, "vunref"); 2230 else if (func == VPUTX_VPUT) 2231 ASSERT_VOP_LOCKED(vp, "vput"); 2232 else 2233 KASSERT(func == VPUTX_VRELE, ("vputx: wrong func")); 2234 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2235 VI_LOCK(vp); 2236 2237 /* Skip this v_writecount check if we're going to panic below. */ 2238 VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp, 2239 ("vputx: missed vn_close")); 2240 error = 0; 2241 2242 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) && 2243 vp->v_usecount == 1)) { 2244 if (func == VPUTX_VPUT) 2245 VOP_UNLOCK(vp, 0); 2246 v_decr_usecount(vp); 2247 return; 2248 } 2249 2250 if (vp->v_usecount != 1) { 2251 vprint("vputx: negative ref count", vp); 2252 panic("vputx: negative ref cnt"); 2253 } 2254 CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); 2255 /* 2256 * We want to hold the vnode until the inactive finishes to 2257 * prevent vgone() races. We drop the use count here and the 2258 * hold count below when we're done. 2259 */ 2260 v_decr_useonly(vp); 2261 /* 2262 * We must call VOP_INACTIVE with the node locked. Mark 2263 * as VI_DOINGINACT to avoid recursion. 2264 */ 2265 vp->v_iflag |= VI_OWEINACT; 2266 switch (func) { 2267 case VPUTX_VRELE: 2268 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 2269 VI_LOCK(vp); 2270 break; 2271 case VPUTX_VPUT: 2272 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2273 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 2274 LK_NOWAIT); 2275 VI_LOCK(vp); 2276 } 2277 break; 2278 case VPUTX_VUNREF: 2279 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2280 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 2281 VI_LOCK(vp); 2282 } 2283 break; 2284 } 2285 if (vp->v_usecount > 0) 2286 vp->v_iflag &= ~VI_OWEINACT; 2287 if (error == 0) { 2288 if (vp->v_iflag & VI_OWEINACT) 2289 vinactive(vp, curthread); 2290 if (func != VPUTX_VUNREF) 2291 VOP_UNLOCK(vp, 0); 2292 } 2293 vdropl(vp); 2294} 2295 2296/* 2297 * Vnode put/release. 2298 * If count drops to zero, call inactive routine and return to freelist. 2299 */ 2300void 2301vrele(struct vnode *vp) 2302{ 2303 2304 vputx(vp, VPUTX_VRELE); 2305} 2306 2307/* 2308 * Release an already locked vnode. This give the same effects as 2309 * unlock+vrele(), but takes less time and avoids releasing and 2310 * re-aquiring the lock (as vrele() acquires the lock internally.) 2311 */ 2312void 2313vput(struct vnode *vp) 2314{ 2315 2316 vputx(vp, VPUTX_VPUT); 2317} 2318 2319/* 2320 * Release an exclusively locked vnode. Do not unlock the vnode lock. 2321 */ 2322void 2323vunref(struct vnode *vp) 2324{ 2325 2326 vputx(vp, VPUTX_VUNREF); 2327} 2328 2329/* 2330 * Somebody doesn't want the vnode recycled. 2331 */ 2332void 2333vhold(struct vnode *vp) 2334{ 2335 2336 VI_LOCK(vp); 2337 vholdl(vp); 2338 VI_UNLOCK(vp); 2339} 2340 2341/* 2342 * Increase the hold count and activate if this is the first reference. 2343 */ 2344void 2345vholdl(struct vnode *vp) 2346{ 2347 struct mount *mp; 2348 2349 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2350#ifdef INVARIANTS 2351 /* getnewvnode() calls v_incr_usecount() without holding interlock. */ 2352 if (vp->v_type != VNON || vp->v_data != NULL) { 2353 ASSERT_VI_LOCKED(vp, "vholdl"); 2354 VNASSERT(vp->v_holdcnt > 0 || (vp->v_iflag & VI_FREE) != 0, 2355 vp, ("vholdl: free vnode is held")); 2356 } 2357#endif 2358 vp->v_holdcnt++; 2359 if ((vp->v_iflag & VI_FREE) == 0) 2360 return; 2361 VNASSERT(vp->v_holdcnt == 1, vp, ("vholdl: wrong hold count")); 2362 VNASSERT(vp->v_op != NULL, vp, ("vholdl: vnode already reclaimed.")); 2363 /* 2364 * Remove a vnode from the free list, mark it as in use, 2365 * and put it on the active list. 2366 */ 2367 mtx_lock(&vnode_free_list_mtx); 2368 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 2369 freevnodes--; 2370 vp->v_iflag &= ~(VI_FREE|VI_AGE); 2371 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 2372 ("Activating already active vnode")); 2373 vp->v_iflag |= VI_ACTIVE; 2374 mp = vp->v_mount; 2375 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 2376 mp->mnt_activevnodelistsize++; 2377 mtx_unlock(&vnode_free_list_mtx); 2378} 2379 2380/* 2381 * Note that there is one less who cares about this vnode. 2382 * vdrop() is the opposite of vhold(). 2383 */ 2384void 2385vdrop(struct vnode *vp) 2386{ 2387 2388 VI_LOCK(vp); 2389 vdropl(vp); 2390} 2391 2392/* 2393 * Drop the hold count of the vnode. If this is the last reference to 2394 * the vnode we place it on the free list unless it has been vgone'd 2395 * (marked VI_DOOMED) in which case we will free it. 2396 */ 2397void 2398vdropl(struct vnode *vp) 2399{ 2400 struct bufobj *bo; 2401 struct mount *mp; 2402 int active; 2403 2404 ASSERT_VI_LOCKED(vp, "vdropl"); 2405 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2406 if (vp->v_holdcnt <= 0) 2407 panic("vdrop: holdcnt %d", vp->v_holdcnt); 2408 vp->v_holdcnt--; 2409 if (vp->v_holdcnt > 0) { 2410 VI_UNLOCK(vp); 2411 return; 2412 } 2413 if ((vp->v_iflag & VI_DOOMED) == 0) { 2414 /* 2415 * Mark a vnode as free: remove it from its active list 2416 * and put it up for recycling on the freelist. 2417 */ 2418 VNASSERT(vp->v_op != NULL, vp, 2419 ("vdropl: vnode already reclaimed.")); 2420 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2421 ("vnode already free")); 2422 VNASSERT(vp->v_holdcnt == 0, vp, 2423 ("vdropl: freeing when we shouldn't")); 2424 active = vp->v_iflag & VI_ACTIVE; 2425 vp->v_iflag &= ~VI_ACTIVE; 2426 mp = vp->v_mount; 2427 mtx_lock(&vnode_free_list_mtx); 2428 if (active) { 2429 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, 2430 v_actfreelist); 2431 mp->mnt_activevnodelistsize--; 2432 } 2433 if (vp->v_iflag & VI_AGE) { 2434 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_actfreelist); 2435 } else { 2436 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); 2437 } 2438 freevnodes++; 2439 vp->v_iflag &= ~VI_AGE; 2440 vp->v_iflag |= VI_FREE; 2441 mtx_unlock(&vnode_free_list_mtx); 2442 VI_UNLOCK(vp); 2443 return; 2444 } 2445 /* 2446 * The vnode has been marked for destruction, so free it. 2447 */ 2448 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 2449 atomic_subtract_long(&numvnodes, 1); 2450 bo = &vp->v_bufobj; 2451 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2452 ("cleaned vnode still on the free list.")); 2453 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2454 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 2455 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2456 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2457 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2458 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2459 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2460 ("clean blk trie not empty")); 2461 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2462 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2463 ("dirty blk trie not empty")); 2464 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 2465 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 2466 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 2467 VI_UNLOCK(vp); 2468#ifdef MAC 2469 mac_vnode_destroy(vp); 2470#endif 2471 if (vp->v_pollinfo != NULL) 2472 destroy_vpollinfo(vp->v_pollinfo); 2473#ifdef INVARIANTS 2474 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 2475 vp->v_op = NULL; 2476#endif 2477 rangelock_destroy(&vp->v_rl); 2478 lockdestroy(vp->v_vnlock); 2479 mtx_destroy(&vp->v_interlock); 2480 rw_destroy(BO_LOCKPTR(bo)); 2481 uma_zfree(vnode_zone, vp); 2482} 2483 2484/* 2485 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 2486 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 2487 * OWEINACT tracks whether a vnode missed a call to inactive due to a 2488 * failed lock upgrade. 2489 */ 2490void 2491vinactive(struct vnode *vp, struct thread *td) 2492{ 2493 struct vm_object *obj; 2494 2495 ASSERT_VOP_ELOCKED(vp, "vinactive"); 2496 ASSERT_VI_LOCKED(vp, "vinactive"); 2497 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 2498 ("vinactive: recursed on VI_DOINGINACT")); 2499 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2500 vp->v_iflag |= VI_DOINGINACT; 2501 vp->v_iflag &= ~VI_OWEINACT; 2502 VI_UNLOCK(vp); 2503 /* 2504 * Before moving off the active list, we must be sure that any 2505 * modified pages are on the vnode's dirty list since these will 2506 * no longer be checked once the vnode is on the inactive list. 2507 * Because the vnode vm object keeps a hold reference on the vnode 2508 * if there is at least one resident non-cached page, the vnode 2509 * cannot leave the active list without the page cleanup done. 2510 */ 2511 obj = vp->v_object; 2512 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0) { 2513 VM_OBJECT_WLOCK(obj); 2514 vm_object_page_clean(obj, 0, 0, OBJPC_NOSYNC); 2515 VM_OBJECT_WUNLOCK(obj); 2516 } 2517 VOP_INACTIVE(vp, td); 2518 VI_LOCK(vp); 2519 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 2520 ("vinactive: lost VI_DOINGINACT")); 2521 vp->v_iflag &= ~VI_DOINGINACT; 2522} 2523 2524/* 2525 * Remove any vnodes in the vnode table belonging to mount point mp. 2526 * 2527 * If FORCECLOSE is not specified, there should not be any active ones, 2528 * return error if any are found (nb: this is a user error, not a 2529 * system error). If FORCECLOSE is specified, detach any active vnodes 2530 * that are found. 2531 * 2532 * If WRITECLOSE is set, only flush out regular file vnodes open for 2533 * writing. 2534 * 2535 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 2536 * 2537 * `rootrefs' specifies the base reference count for the root vnode 2538 * of this filesystem. The root vnode is considered busy if its 2539 * v_usecount exceeds this value. On a successful return, vflush(, td) 2540 * will call vrele() on the root vnode exactly rootrefs times. 2541 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 2542 * be zero. 2543 */ 2544#ifdef DIAGNOSTIC 2545static int busyprt = 0; /* print out busy vnodes */ 2546SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 2547#endif 2548 2549int 2550vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 2551{ 2552 struct vnode *vp, *mvp, *rootvp = NULL; 2553 struct vattr vattr; 2554 int busy = 0, error; 2555 2556 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 2557 rootrefs, flags); 2558 if (rootrefs > 0) { 2559 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 2560 ("vflush: bad args")); 2561 /* 2562 * Get the filesystem root vnode. We can vput() it 2563 * immediately, since with rootrefs > 0, it won't go away. 2564 */ 2565 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 2566 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 2567 __func__, error); 2568 return (error); 2569 } 2570 vput(rootvp); 2571 } 2572loop: 2573 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 2574 vholdl(vp); 2575 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 2576 if (error) { 2577 vdrop(vp); 2578 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 2579 goto loop; 2580 } 2581 /* 2582 * Skip over a vnodes marked VV_SYSTEM. 2583 */ 2584 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 2585 VOP_UNLOCK(vp, 0); 2586 vdrop(vp); 2587 continue; 2588 } 2589 /* 2590 * If WRITECLOSE is set, flush out unlinked but still open 2591 * files (even if open only for reading) and regular file 2592 * vnodes open for writing. 2593 */ 2594 if (flags & WRITECLOSE) { 2595 if (vp->v_object != NULL) { 2596 VM_OBJECT_WLOCK(vp->v_object); 2597 vm_object_page_clean(vp->v_object, 0, 0, 0); 2598 VM_OBJECT_WUNLOCK(vp->v_object); 2599 } 2600 error = VOP_FSYNC(vp, MNT_WAIT, td); 2601 if (error != 0) { 2602 VOP_UNLOCK(vp, 0); 2603 vdrop(vp); 2604 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 2605 return (error); 2606 } 2607 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 2608 VI_LOCK(vp); 2609 2610 if ((vp->v_type == VNON || 2611 (error == 0 && vattr.va_nlink > 0)) && 2612 (vp->v_writecount == 0 || vp->v_type != VREG)) { 2613 VOP_UNLOCK(vp, 0); 2614 vdropl(vp); 2615 continue; 2616 } 2617 } else 2618 VI_LOCK(vp); 2619 /* 2620 * With v_usecount == 0, all we need to do is clear out the 2621 * vnode data structures and we are done. 2622 * 2623 * If FORCECLOSE is set, forcibly close the vnode. 2624 */ 2625 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 2626 VNASSERT(vp->v_usecount == 0 || 2627 vp->v_op != &devfs_specops || 2628 (vp->v_type != VCHR && vp->v_type != VBLK), vp, 2629 ("device VNODE %p is FORCECLOSED", vp)); 2630 vgonel(vp); 2631 } else { 2632 busy++; 2633#ifdef DIAGNOSTIC 2634 if (busyprt) 2635 vprint("vflush: busy vnode", vp); 2636#endif 2637 } 2638 VOP_UNLOCK(vp, 0); 2639 vdropl(vp); 2640 } 2641 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 2642 /* 2643 * If just the root vnode is busy, and if its refcount 2644 * is equal to `rootrefs', then go ahead and kill it. 2645 */ 2646 VI_LOCK(rootvp); 2647 KASSERT(busy > 0, ("vflush: not busy")); 2648 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 2649 ("vflush: usecount %d < rootrefs %d", 2650 rootvp->v_usecount, rootrefs)); 2651 if (busy == 1 && rootvp->v_usecount == rootrefs) { 2652 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 2653 vgone(rootvp); 2654 VOP_UNLOCK(rootvp, 0); 2655 busy = 0; 2656 } else 2657 VI_UNLOCK(rootvp); 2658 } 2659 if (busy) { 2660 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 2661 busy); 2662 return (EBUSY); 2663 } 2664 for (; rootrefs > 0; rootrefs--) 2665 vrele(rootvp); 2666 return (0); 2667} 2668 2669/* 2670 * Recycle an unused vnode to the front of the free list. 2671 */ 2672int 2673vrecycle(struct vnode *vp) 2674{ 2675 int recycled; 2676 2677 ASSERT_VOP_ELOCKED(vp, "vrecycle"); 2678 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2679 recycled = 0; 2680 VI_LOCK(vp); 2681 if (vp->v_usecount == 0) { 2682 recycled = 1; 2683 vgonel(vp); 2684 } 2685 VI_UNLOCK(vp); 2686 return (recycled); 2687} 2688 2689/* 2690 * Eliminate all activity associated with a vnode 2691 * in preparation for reuse. 2692 */ 2693void 2694vgone(struct vnode *vp) 2695{ 2696 VI_LOCK(vp); 2697 vgonel(vp); 2698 VI_UNLOCK(vp); 2699} 2700 2701static void 2702notify_lowervp_vfs_dummy(struct mount *mp __unused, 2703 struct vnode *lowervp __unused) 2704{ 2705} 2706 2707/* 2708 * Notify upper mounts about reclaimed or unlinked vnode. 2709 */ 2710void 2711vfs_notify_upper(struct vnode *vp, int event) 2712{ 2713 static struct vfsops vgonel_vfsops = { 2714 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 2715 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 2716 }; 2717 struct mount *mp, *ump, *mmp; 2718 2719 mp = vp->v_mount; 2720 if (mp == NULL) 2721 return; 2722 2723 MNT_ILOCK(mp); 2724 if (TAILQ_EMPTY(&mp->mnt_uppers)) 2725 goto unlock; 2726 MNT_IUNLOCK(mp); 2727 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 2728 mmp->mnt_op = &vgonel_vfsops; 2729 mmp->mnt_kern_flag |= MNTK_MARKER; 2730 MNT_ILOCK(mp); 2731 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 2732 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 2733 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 2734 ump = TAILQ_NEXT(ump, mnt_upper_link); 2735 continue; 2736 } 2737 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 2738 MNT_IUNLOCK(mp); 2739 switch (event) { 2740 case VFS_NOTIFY_UPPER_RECLAIM: 2741 VFS_RECLAIM_LOWERVP(ump, vp); 2742 break; 2743 case VFS_NOTIFY_UPPER_UNLINK: 2744 VFS_UNLINK_LOWERVP(ump, vp); 2745 break; 2746 default: 2747 KASSERT(0, ("invalid event %d", event)); 2748 break; 2749 } 2750 MNT_ILOCK(mp); 2751 ump = TAILQ_NEXT(mmp, mnt_upper_link); 2752 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 2753 } 2754 free(mmp, M_TEMP); 2755 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 2756 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 2757 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 2758 wakeup(&mp->mnt_uppers); 2759 } 2760unlock: 2761 MNT_IUNLOCK(mp); 2762} 2763 2764/* 2765 * vgone, with the vp interlock held. 2766 */ 2767void 2768vgonel(struct vnode *vp) 2769{ 2770 struct thread *td; 2771 int oweinact; 2772 int active; 2773 struct mount *mp; 2774 2775 ASSERT_VOP_ELOCKED(vp, "vgonel"); 2776 ASSERT_VI_LOCKED(vp, "vgonel"); 2777 VNASSERT(vp->v_holdcnt, vp, 2778 ("vgonel: vp %p has no reference.", vp)); 2779 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2780 td = curthread; 2781 2782 /* 2783 * Don't vgonel if we're already doomed. 2784 */ 2785 if (vp->v_iflag & VI_DOOMED) 2786 return; 2787 vp->v_iflag |= VI_DOOMED; 2788 2789 /* 2790 * Check to see if the vnode is in use. If so, we have to call 2791 * VOP_CLOSE() and VOP_INACTIVE(). 2792 */ 2793 active = vp->v_usecount; 2794 oweinact = (vp->v_iflag & VI_OWEINACT); 2795 VI_UNLOCK(vp); 2796 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 2797 2798 /* 2799 * If purging an active vnode, it must be closed and 2800 * deactivated before being reclaimed. 2801 */ 2802 if (active) 2803 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 2804 if (oweinact || active) { 2805 VI_LOCK(vp); 2806 if ((vp->v_iflag & VI_DOINGINACT) == 0) 2807 vinactive(vp, td); 2808 VI_UNLOCK(vp); 2809 } 2810 if (vp->v_type == VSOCK) 2811 vfs_unp_reclaim(vp); 2812 2813 /* 2814 * Clean out any buffers associated with the vnode. 2815 * If the flush fails, just toss the buffers. 2816 */ 2817 mp = NULL; 2818 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 2819 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 2820 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 2821 while (vinvalbuf(vp, 0, 0, 0) != 0) 2822 ; 2823 } 2824#ifdef INVARIANTS 2825 BO_LOCK(&vp->v_bufobj); 2826 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 2827 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 2828 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 2829 vp->v_bufobj.bo_clean.bv_cnt == 0, 2830 ("vp %p bufobj not invalidated", vp)); 2831 vp->v_bufobj.bo_flag |= BO_DEAD; 2832 BO_UNLOCK(&vp->v_bufobj); 2833#endif 2834 2835 /* 2836 * Reclaim the vnode. 2837 */ 2838 if (VOP_RECLAIM(vp, td)) 2839 panic("vgone: cannot reclaim"); 2840 if (mp != NULL) 2841 vn_finished_secondary_write(mp); 2842 VNASSERT(vp->v_object == NULL, vp, 2843 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag)); 2844 /* 2845 * Clear the advisory locks and wake up waiting threads. 2846 */ 2847 (void)VOP_ADVLOCKPURGE(vp); 2848 /* 2849 * Delete from old mount point vnode list. 2850 */ 2851 delmntque(vp); 2852 cache_purge(vp); 2853 /* 2854 * Done with purge, reset to the standard lock and invalidate 2855 * the vnode. 2856 */ 2857 VI_LOCK(vp); 2858 vp->v_vnlock = &vp->v_lock; 2859 vp->v_op = &dead_vnodeops; 2860 vp->v_tag = "none"; 2861 vp->v_type = VBAD; 2862} 2863 2864/* 2865 * Calculate the total number of references to a special device. 2866 */ 2867int 2868vcount(struct vnode *vp) 2869{ 2870 int count; 2871 2872 dev_lock(); 2873 count = vp->v_rdev->si_usecount; 2874 dev_unlock(); 2875 return (count); 2876} 2877 2878/* 2879 * Same as above, but using the struct cdev *as argument 2880 */ 2881int 2882count_dev(struct cdev *dev) 2883{ 2884 int count; 2885 2886 dev_lock(); 2887 count = dev->si_usecount; 2888 dev_unlock(); 2889 return(count); 2890} 2891 2892/* 2893 * Print out a description of a vnode. 2894 */ 2895static char *typename[] = 2896{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 2897 "VMARKER"}; 2898 2899void 2900vn_printf(struct vnode *vp, const char *fmt, ...) 2901{ 2902 va_list ap; 2903 char buf[256], buf2[16]; 2904 u_long flags; 2905 2906 va_start(ap, fmt); 2907 vprintf(fmt, ap); 2908 va_end(ap); 2909 printf("%p: ", (void *)vp); 2910 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]); 2911 printf(" usecount %d, writecount %d, refcount %d mountedhere %p\n", 2912 vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere); 2913 buf[0] = '\0'; 2914 buf[1] = '\0'; 2915 if (vp->v_vflag & VV_ROOT) 2916 strlcat(buf, "|VV_ROOT", sizeof(buf)); 2917 if (vp->v_vflag & VV_ISTTY) 2918 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 2919 if (vp->v_vflag & VV_NOSYNC) 2920 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 2921 if (vp->v_vflag & VV_ETERNALDEV) 2922 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 2923 if (vp->v_vflag & VV_CACHEDLABEL) 2924 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 2925 if (vp->v_vflag & VV_TEXT) 2926 strlcat(buf, "|VV_TEXT", sizeof(buf)); 2927 if (vp->v_vflag & VV_COPYONWRITE) 2928 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 2929 if (vp->v_vflag & VV_SYSTEM) 2930 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 2931 if (vp->v_vflag & VV_PROCDEP) 2932 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 2933 if (vp->v_vflag & VV_NOKNOTE) 2934 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 2935 if (vp->v_vflag & VV_DELETED) 2936 strlcat(buf, "|VV_DELETED", sizeof(buf)); 2937 if (vp->v_vflag & VV_MD) 2938 strlcat(buf, "|VV_MD", sizeof(buf)); 2939 if (vp->v_vflag & VV_FORCEINSMQ) 2940 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 2941 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 2942 VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 2943 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 2944 if (flags != 0) { 2945 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 2946 strlcat(buf, buf2, sizeof(buf)); 2947 } 2948 if (vp->v_iflag & VI_MOUNT) 2949 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 2950 if (vp->v_iflag & VI_AGE) 2951 strlcat(buf, "|VI_AGE", sizeof(buf)); 2952 if (vp->v_iflag & VI_DOOMED) 2953 strlcat(buf, "|VI_DOOMED", sizeof(buf)); 2954 if (vp->v_iflag & VI_FREE) 2955 strlcat(buf, "|VI_FREE", sizeof(buf)); 2956 if (vp->v_iflag & VI_ACTIVE) 2957 strlcat(buf, "|VI_ACTIVE", sizeof(buf)); 2958 if (vp->v_iflag & VI_DOINGINACT) 2959 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 2960 if (vp->v_iflag & VI_OWEINACT) 2961 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 2962 flags = vp->v_iflag & ~(VI_MOUNT | VI_AGE | VI_DOOMED | VI_FREE | 2963 VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT); 2964 if (flags != 0) { 2965 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 2966 strlcat(buf, buf2, sizeof(buf)); 2967 } 2968 printf(" flags (%s)\n", buf + 1); 2969 if (mtx_owned(VI_MTX(vp))) 2970 printf(" VI_LOCKed"); 2971 if (vp->v_object != NULL) 2972 printf(" v_object %p ref %d pages %d " 2973 "cleanbuf %d dirtybuf %d\n", 2974 vp->v_object, vp->v_object->ref_count, 2975 vp->v_object->resident_page_count, 2976 vp->v_bufobj.bo_dirty.bv_cnt, 2977 vp->v_bufobj.bo_clean.bv_cnt); 2978 printf(" "); 2979 lockmgr_printinfo(vp->v_vnlock); 2980 if (vp->v_data != NULL) 2981 VOP_PRINT(vp); 2982} 2983 2984#ifdef DDB 2985/* 2986 * List all of the locked vnodes in the system. 2987 * Called when debugging the kernel. 2988 */ 2989DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 2990{ 2991 struct mount *mp; 2992 struct vnode *vp; 2993 2994 /* 2995 * Note: because this is DDB, we can't obey the locking semantics 2996 * for these structures, which means we could catch an inconsistent 2997 * state and dereference a nasty pointer. Not much to be done 2998 * about that. 2999 */ 3000 db_printf("Locked vnodes\n"); 3001 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3002 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3003 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 3004 vprint("", vp); 3005 } 3006 } 3007} 3008 3009/* 3010 * Show details about the given vnode. 3011 */ 3012DB_SHOW_COMMAND(vnode, db_show_vnode) 3013{ 3014 struct vnode *vp; 3015 3016 if (!have_addr) 3017 return; 3018 vp = (struct vnode *)addr; 3019 vn_printf(vp, "vnode "); 3020} 3021 3022/* 3023 * Show details about the given mount point. 3024 */ 3025DB_SHOW_COMMAND(mount, db_show_mount) 3026{ 3027 struct mount *mp; 3028 struct vfsopt *opt; 3029 struct statfs *sp; 3030 struct vnode *vp; 3031 char buf[512]; 3032 uint64_t mflags; 3033 u_int flags; 3034 3035 if (!have_addr) { 3036 /* No address given, print short info about all mount points. */ 3037 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3038 db_printf("%p %s on %s (%s)\n", mp, 3039 mp->mnt_stat.f_mntfromname, 3040 mp->mnt_stat.f_mntonname, 3041 mp->mnt_stat.f_fstypename); 3042 if (db_pager_quit) 3043 break; 3044 } 3045 db_printf("\nMore info: show mount <addr>\n"); 3046 return; 3047 } 3048 3049 mp = (struct mount *)addr; 3050 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 3051 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 3052 3053 buf[0] = '\0'; 3054 mflags = mp->mnt_flag; 3055#define MNT_FLAG(flag) do { \ 3056 if (mflags & (flag)) { \ 3057 if (buf[0] != '\0') \ 3058 strlcat(buf, ", ", sizeof(buf)); \ 3059 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 3060 mflags &= ~(flag); \ 3061 } \ 3062} while (0) 3063 MNT_FLAG(MNT_RDONLY); 3064 MNT_FLAG(MNT_SYNCHRONOUS); 3065 MNT_FLAG(MNT_NOEXEC); 3066 MNT_FLAG(MNT_NOSUID); 3067 MNT_FLAG(MNT_NFS4ACLS); 3068 MNT_FLAG(MNT_UNION); 3069 MNT_FLAG(MNT_ASYNC); 3070 MNT_FLAG(MNT_SUIDDIR); 3071 MNT_FLAG(MNT_SOFTDEP); 3072 MNT_FLAG(MNT_NOSYMFOLLOW); 3073 MNT_FLAG(MNT_GJOURNAL); 3074 MNT_FLAG(MNT_MULTILABEL); 3075 MNT_FLAG(MNT_ACLS); 3076 MNT_FLAG(MNT_NOATIME); 3077 MNT_FLAG(MNT_NOCLUSTERR); 3078 MNT_FLAG(MNT_NOCLUSTERW); 3079 MNT_FLAG(MNT_SUJ); 3080 MNT_FLAG(MNT_EXRDONLY); 3081 MNT_FLAG(MNT_EXPORTED); 3082 MNT_FLAG(MNT_DEFEXPORTED); 3083 MNT_FLAG(MNT_EXPORTANON); 3084 MNT_FLAG(MNT_EXKERB); 3085 MNT_FLAG(MNT_EXPUBLIC); 3086 MNT_FLAG(MNT_LOCAL); 3087 MNT_FLAG(MNT_QUOTA); 3088 MNT_FLAG(MNT_ROOTFS); 3089 MNT_FLAG(MNT_USER); 3090 MNT_FLAG(MNT_IGNORE); 3091 MNT_FLAG(MNT_UPDATE); 3092 MNT_FLAG(MNT_DELEXPORT); 3093 MNT_FLAG(MNT_RELOAD); 3094 MNT_FLAG(MNT_FORCE); 3095 MNT_FLAG(MNT_SNAPSHOT); 3096 MNT_FLAG(MNT_BYFSID); 3097#undef MNT_FLAG 3098 if (mflags != 0) { 3099 if (buf[0] != '\0') 3100 strlcat(buf, ", ", sizeof(buf)); 3101 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3102 "0x%016jx", mflags); 3103 } 3104 db_printf(" mnt_flag = %s\n", buf); 3105 3106 buf[0] = '\0'; 3107 flags = mp->mnt_kern_flag; 3108#define MNT_KERN_FLAG(flag) do { \ 3109 if (flags & (flag)) { \ 3110 if (buf[0] != '\0') \ 3111 strlcat(buf, ", ", sizeof(buf)); \ 3112 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 3113 flags &= ~(flag); \ 3114 } \ 3115} while (0) 3116 MNT_KERN_FLAG(MNTK_UNMOUNTF); 3117 MNT_KERN_FLAG(MNTK_ASYNC); 3118 MNT_KERN_FLAG(MNTK_SOFTDEP); 3119 MNT_KERN_FLAG(MNTK_NOINSMNTQ); 3120 MNT_KERN_FLAG(MNTK_DRAINING); 3121 MNT_KERN_FLAG(MNTK_REFEXPIRE); 3122 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 3123 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 3124 MNT_KERN_FLAG(MNTK_NO_IOPF); 3125 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 3126 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 3127 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 3128 MNT_KERN_FLAG(MNTK_MARKER); 3129 MNT_KERN_FLAG(MNTK_NOASYNC); 3130 MNT_KERN_FLAG(MNTK_UNMOUNT); 3131 MNT_KERN_FLAG(MNTK_MWAIT); 3132 MNT_KERN_FLAG(MNTK_SUSPEND); 3133 MNT_KERN_FLAG(MNTK_SUSPEND2); 3134 MNT_KERN_FLAG(MNTK_SUSPENDED); 3135 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 3136 MNT_KERN_FLAG(MNTK_NOKNOTE); 3137#undef MNT_KERN_FLAG 3138 if (flags != 0) { 3139 if (buf[0] != '\0') 3140 strlcat(buf, ", ", sizeof(buf)); 3141 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3142 "0x%08x", flags); 3143 } 3144 db_printf(" mnt_kern_flag = %s\n", buf); 3145 3146 db_printf(" mnt_opt = "); 3147 opt = TAILQ_FIRST(mp->mnt_opt); 3148 if (opt != NULL) { 3149 db_printf("%s", opt->name); 3150 opt = TAILQ_NEXT(opt, link); 3151 while (opt != NULL) { 3152 db_printf(", %s", opt->name); 3153 opt = TAILQ_NEXT(opt, link); 3154 } 3155 } 3156 db_printf("\n"); 3157 3158 sp = &mp->mnt_stat; 3159 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 3160 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 3161 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 3162 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 3163 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 3164 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 3165 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 3166 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 3167 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 3168 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 3169 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 3170 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 3171 3172 db_printf(" mnt_cred = { uid=%u ruid=%u", 3173 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 3174 if (jailed(mp->mnt_cred)) 3175 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 3176 db_printf(" }\n"); 3177 db_printf(" mnt_ref = %d\n", mp->mnt_ref); 3178 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 3179 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 3180 db_printf(" mnt_activevnodelistsize = %d\n", 3181 mp->mnt_activevnodelistsize); 3182 db_printf(" mnt_writeopcount = %d\n", mp->mnt_writeopcount); 3183 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 3184 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 3185 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 3186 db_printf(" mnt_lockref = %d\n", mp->mnt_lockref); 3187 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 3188 db_printf(" mnt_secondary_accwrites = %d\n", 3189 mp->mnt_secondary_accwrites); 3190 db_printf(" mnt_gjprovider = %s\n", 3191 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 3192 3193 db_printf("\n\nList of active vnodes\n"); 3194 TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) { 3195 if (vp->v_type != VMARKER) { 3196 vn_printf(vp, "vnode "); 3197 if (db_pager_quit) 3198 break; 3199 } 3200 } 3201 db_printf("\n\nList of inactive vnodes\n"); 3202 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3203 if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) { 3204 vn_printf(vp, "vnode "); 3205 if (db_pager_quit) 3206 break; 3207 } 3208 } 3209} 3210#endif /* DDB */ 3211 3212/* 3213 * Fill in a struct xvfsconf based on a struct vfsconf. 3214 */ 3215static int 3216vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 3217{ 3218 struct xvfsconf xvfsp; 3219 3220 bzero(&xvfsp, sizeof(xvfsp)); 3221 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3222 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3223 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3224 xvfsp.vfc_flags = vfsp->vfc_flags; 3225 /* 3226 * These are unused in userland, we keep them 3227 * to not break binary compatibility. 3228 */ 3229 xvfsp.vfc_vfsops = NULL; 3230 xvfsp.vfc_next = NULL; 3231 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3232} 3233 3234#ifdef COMPAT_FREEBSD32 3235struct xvfsconf32 { 3236 uint32_t vfc_vfsops; 3237 char vfc_name[MFSNAMELEN]; 3238 int32_t vfc_typenum; 3239 int32_t vfc_refcount; 3240 int32_t vfc_flags; 3241 uint32_t vfc_next; 3242}; 3243 3244static int 3245vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 3246{ 3247 struct xvfsconf32 xvfsp; 3248 3249 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3250 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3251 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3252 xvfsp.vfc_flags = vfsp->vfc_flags; 3253 xvfsp.vfc_vfsops = 0; 3254 xvfsp.vfc_next = 0; 3255 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3256} 3257#endif 3258 3259/* 3260 * Top level filesystem related information gathering. 3261 */ 3262static int 3263sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 3264{ 3265 struct vfsconf *vfsp; 3266 int error; 3267 3268 error = 0; 3269 vfsconf_slock(); 3270 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3271#ifdef COMPAT_FREEBSD32 3272 if (req->flags & SCTL_MASK32) 3273 error = vfsconf2x32(req, vfsp); 3274 else 3275#endif 3276 error = vfsconf2x(req, vfsp); 3277 if (error) 3278 break; 3279 } 3280 vfsconf_sunlock(); 3281 return (error); 3282} 3283 3284SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 3285 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 3286 "S,xvfsconf", "List of all configured filesystems"); 3287 3288#ifndef BURN_BRIDGES 3289static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 3290 3291static int 3292vfs_sysctl(SYSCTL_HANDLER_ARGS) 3293{ 3294 int *name = (int *)arg1 - 1; /* XXX */ 3295 u_int namelen = arg2 + 1; /* XXX */ 3296 struct vfsconf *vfsp; 3297 3298 log(LOG_WARNING, "userland calling deprecated sysctl, " 3299 "please rebuild world\n"); 3300 3301#if 1 || defined(COMPAT_PRELITE2) 3302 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 3303 if (namelen == 1) 3304 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 3305#endif 3306 3307 switch (name[1]) { 3308 case VFS_MAXTYPENUM: 3309 if (namelen != 2) 3310 return (ENOTDIR); 3311 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 3312 case VFS_CONF: 3313 if (namelen != 3) 3314 return (ENOTDIR); /* overloaded */ 3315 vfsconf_slock(); 3316 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3317 if (vfsp->vfc_typenum == name[2]) 3318 break; 3319 } 3320 vfsconf_sunlock(); 3321 if (vfsp == NULL) 3322 return (EOPNOTSUPP); 3323#ifdef COMPAT_FREEBSD32 3324 if (req->flags & SCTL_MASK32) 3325 return (vfsconf2x32(req, vfsp)); 3326 else 3327#endif 3328 return (vfsconf2x(req, vfsp)); 3329 } 3330 return (EOPNOTSUPP); 3331} 3332 3333static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 3334 CTLFLAG_MPSAFE, vfs_sysctl, 3335 "Generic filesystem"); 3336 3337#if 1 || defined(COMPAT_PRELITE2) 3338 3339static int 3340sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 3341{ 3342 int error; 3343 struct vfsconf *vfsp; 3344 struct ovfsconf ovfs; 3345 3346 vfsconf_slock(); 3347 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3348 bzero(&ovfs, sizeof(ovfs)); 3349 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 3350 strcpy(ovfs.vfc_name, vfsp->vfc_name); 3351 ovfs.vfc_index = vfsp->vfc_typenum; 3352 ovfs.vfc_refcount = vfsp->vfc_refcount; 3353 ovfs.vfc_flags = vfsp->vfc_flags; 3354 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 3355 if (error != 0) { 3356 vfsconf_sunlock(); 3357 return (error); 3358 } 3359 } 3360 vfsconf_sunlock(); 3361 return (0); 3362} 3363 3364#endif /* 1 || COMPAT_PRELITE2 */ 3365#endif /* !BURN_BRIDGES */ 3366 3367#define KINFO_VNODESLOP 10 3368#ifdef notyet 3369/* 3370 * Dump vnode list (via sysctl). 3371 */ 3372/* ARGSUSED */ 3373static int 3374sysctl_vnode(SYSCTL_HANDLER_ARGS) 3375{ 3376 struct xvnode *xvn; 3377 struct mount *mp; 3378 struct vnode *vp; 3379 int error, len, n; 3380 3381 /* 3382 * Stale numvnodes access is not fatal here. 3383 */ 3384 req->lock = 0; 3385 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 3386 if (!req->oldptr) 3387 /* Make an estimate */ 3388 return (SYSCTL_OUT(req, 0, len)); 3389 3390 error = sysctl_wire_old_buffer(req, 0); 3391 if (error != 0) 3392 return (error); 3393 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 3394 n = 0; 3395 mtx_lock(&mountlist_mtx); 3396 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3397 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 3398 continue; 3399 MNT_ILOCK(mp); 3400 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3401 if (n == len) 3402 break; 3403 vref(vp); 3404 xvn[n].xv_size = sizeof *xvn; 3405 xvn[n].xv_vnode = vp; 3406 xvn[n].xv_id = 0; /* XXX compat */ 3407#define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 3408 XV_COPY(usecount); 3409 XV_COPY(writecount); 3410 XV_COPY(holdcnt); 3411 XV_COPY(mount); 3412 XV_COPY(numoutput); 3413 XV_COPY(type); 3414#undef XV_COPY 3415 xvn[n].xv_flag = vp->v_vflag; 3416 3417 switch (vp->v_type) { 3418 case VREG: 3419 case VDIR: 3420 case VLNK: 3421 break; 3422 case VBLK: 3423 case VCHR: 3424 if (vp->v_rdev == NULL) { 3425 vrele(vp); 3426 continue; 3427 } 3428 xvn[n].xv_dev = dev2udev(vp->v_rdev); 3429 break; 3430 case VSOCK: 3431 xvn[n].xv_socket = vp->v_socket; 3432 break; 3433 case VFIFO: 3434 xvn[n].xv_fifo = vp->v_fifoinfo; 3435 break; 3436 case VNON: 3437 case VBAD: 3438 default: 3439 /* shouldn't happen? */ 3440 vrele(vp); 3441 continue; 3442 } 3443 vrele(vp); 3444 ++n; 3445 } 3446 MNT_IUNLOCK(mp); 3447 mtx_lock(&mountlist_mtx); 3448 vfs_unbusy(mp); 3449 if (n == len) 3450 break; 3451 } 3452 mtx_unlock(&mountlist_mtx); 3453 3454 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 3455 free(xvn, M_TEMP); 3456 return (error); 3457} 3458 3459SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 3460 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 3461 ""); 3462#endif 3463 3464/* 3465 * Unmount all filesystems. The list is traversed in reverse order 3466 * of mounting to avoid dependencies. 3467 */ 3468void 3469vfs_unmountall(void) 3470{ 3471 struct mount *mp; 3472 struct thread *td; 3473 int error; 3474 3475 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 3476 td = curthread; 3477 3478 /* 3479 * Since this only runs when rebooting, it is not interlocked. 3480 */ 3481 while(!TAILQ_EMPTY(&mountlist)) { 3482 mp = TAILQ_LAST(&mountlist, mntlist); 3483 error = dounmount(mp, MNT_FORCE, td); 3484 if (error) { 3485 TAILQ_REMOVE(&mountlist, mp, mnt_list); 3486 /* 3487 * XXX: Due to the way in which we mount the root 3488 * file system off of devfs, devfs will generate a 3489 * "busy" warning when we try to unmount it before 3490 * the root. Don't print a warning as a result in 3491 * order to avoid false positive errors that may 3492 * cause needless upset. 3493 */ 3494 if (strcmp(mp->mnt_vfc->vfc_name, "devfs") != 0) { 3495 printf("unmount of %s failed (", 3496 mp->mnt_stat.f_mntonname); 3497 if (error == EBUSY) 3498 printf("BUSY)\n"); 3499 else 3500 printf("%d)\n", error); 3501 } 3502 } else { 3503 /* The unmount has removed mp from the mountlist */ 3504 } 3505 } 3506} 3507 3508/* 3509 * perform msync on all vnodes under a mount point 3510 * the mount point must be locked. 3511 */ 3512void 3513vfs_msync(struct mount *mp, int flags) 3514{ 3515 struct vnode *vp, *mvp; 3516 struct vm_object *obj; 3517 3518 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 3519 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 3520 obj = vp->v_object; 3521 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 && 3522 (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) { 3523 if (!vget(vp, 3524 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 3525 curthread)) { 3526 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ 3527 vput(vp); 3528 continue; 3529 } 3530 3531 obj = vp->v_object; 3532 if (obj != NULL) { 3533 VM_OBJECT_WLOCK(obj); 3534 vm_object_page_clean(obj, 0, 0, 3535 flags == MNT_WAIT ? 3536 OBJPC_SYNC : OBJPC_NOSYNC); 3537 VM_OBJECT_WUNLOCK(obj); 3538 } 3539 vput(vp); 3540 } 3541 } else 3542 VI_UNLOCK(vp); 3543 } 3544} 3545 3546static void 3547destroy_vpollinfo_free(struct vpollinfo *vi) 3548{ 3549 3550 knlist_destroy(&vi->vpi_selinfo.si_note); 3551 mtx_destroy(&vi->vpi_lock); 3552 uma_zfree(vnodepoll_zone, vi); 3553} 3554 3555static void 3556destroy_vpollinfo(struct vpollinfo *vi) 3557{ 3558 3559 knlist_clear(&vi->vpi_selinfo.si_note, 1); 3560 seldrain(&vi->vpi_selinfo); 3561 destroy_vpollinfo_free(vi); 3562} 3563 3564/* 3565 * Initalize per-vnode helper structure to hold poll-related state. 3566 */ 3567void 3568v_addpollinfo(struct vnode *vp) 3569{ 3570 struct vpollinfo *vi; 3571 3572 if (vp->v_pollinfo != NULL) 3573 return; 3574 vi = uma_zalloc(vnodepoll_zone, M_WAITOK); 3575 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 3576 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 3577 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 3578 VI_LOCK(vp); 3579 if (vp->v_pollinfo != NULL) { 3580 VI_UNLOCK(vp); 3581 destroy_vpollinfo_free(vi); 3582 return; 3583 } 3584 vp->v_pollinfo = vi; 3585 VI_UNLOCK(vp); 3586} 3587 3588/* 3589 * Record a process's interest in events which might happen to 3590 * a vnode. Because poll uses the historic select-style interface 3591 * internally, this routine serves as both the ``check for any 3592 * pending events'' and the ``record my interest in future events'' 3593 * functions. (These are done together, while the lock is held, 3594 * to avoid race conditions.) 3595 */ 3596int 3597vn_pollrecord(struct vnode *vp, struct thread *td, int events) 3598{ 3599 3600 v_addpollinfo(vp); 3601 mtx_lock(&vp->v_pollinfo->vpi_lock); 3602 if (vp->v_pollinfo->vpi_revents & events) { 3603 /* 3604 * This leaves events we are not interested 3605 * in available for the other process which 3606 * which presumably had requested them 3607 * (otherwise they would never have been 3608 * recorded). 3609 */ 3610 events &= vp->v_pollinfo->vpi_revents; 3611 vp->v_pollinfo->vpi_revents &= ~events; 3612 3613 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3614 return (events); 3615 } 3616 vp->v_pollinfo->vpi_events |= events; 3617 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 3618 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3619 return (0); 3620} 3621 3622/* 3623 * Routine to create and manage a filesystem syncer vnode. 3624 */ 3625#define sync_close ((int (*)(struct vop_close_args *))nullop) 3626static int sync_fsync(struct vop_fsync_args *); 3627static int sync_inactive(struct vop_inactive_args *); 3628static int sync_reclaim(struct vop_reclaim_args *); 3629 3630static struct vop_vector sync_vnodeops = { 3631 .vop_bypass = VOP_EOPNOTSUPP, 3632 .vop_close = sync_close, /* close */ 3633 .vop_fsync = sync_fsync, /* fsync */ 3634 .vop_inactive = sync_inactive, /* inactive */ 3635 .vop_reclaim = sync_reclaim, /* reclaim */ 3636 .vop_lock1 = vop_stdlock, /* lock */ 3637 .vop_unlock = vop_stdunlock, /* unlock */ 3638 .vop_islocked = vop_stdislocked, /* islocked */ 3639}; 3640 3641/* 3642 * Create a new filesystem syncer vnode for the specified mount point. 3643 */ 3644void 3645vfs_allocate_syncvnode(struct mount *mp) 3646{ 3647 struct vnode *vp; 3648 struct bufobj *bo; 3649 static long start, incr, next; 3650 int error; 3651 3652 /* Allocate a new vnode */ 3653 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 3654 if (error != 0) 3655 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 3656 vp->v_type = VNON; 3657 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3658 vp->v_vflag |= VV_FORCEINSMQ; 3659 error = insmntque(vp, mp); 3660 if (error != 0) 3661 panic("vfs_allocate_syncvnode: insmntque() failed"); 3662 vp->v_vflag &= ~VV_FORCEINSMQ; 3663 VOP_UNLOCK(vp, 0); 3664 /* 3665 * Place the vnode onto the syncer worklist. We attempt to 3666 * scatter them about on the list so that they will go off 3667 * at evenly distributed times even if all the filesystems 3668 * are mounted at once. 3669 */ 3670 next += incr; 3671 if (next == 0 || next > syncer_maxdelay) { 3672 start /= 2; 3673 incr /= 2; 3674 if (start == 0) { 3675 start = syncer_maxdelay / 2; 3676 incr = syncer_maxdelay; 3677 } 3678 next = start; 3679 } 3680 bo = &vp->v_bufobj; 3681 BO_LOCK(bo); 3682 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 3683 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 3684 mtx_lock(&sync_mtx); 3685 sync_vnode_count++; 3686 if (mp->mnt_syncer == NULL) { 3687 mp->mnt_syncer = vp; 3688 vp = NULL; 3689 } 3690 mtx_unlock(&sync_mtx); 3691 BO_UNLOCK(bo); 3692 if (vp != NULL) { 3693 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3694 vgone(vp); 3695 vput(vp); 3696 } 3697} 3698 3699void 3700vfs_deallocate_syncvnode(struct mount *mp) 3701{ 3702 struct vnode *vp; 3703 3704 mtx_lock(&sync_mtx); 3705 vp = mp->mnt_syncer; 3706 if (vp != NULL) 3707 mp->mnt_syncer = NULL; 3708 mtx_unlock(&sync_mtx); 3709 if (vp != NULL) 3710 vrele(vp); 3711} 3712 3713/* 3714 * Do a lazy sync of the filesystem. 3715 */ 3716static int 3717sync_fsync(struct vop_fsync_args *ap) 3718{ 3719 struct vnode *syncvp = ap->a_vp; 3720 struct mount *mp = syncvp->v_mount; 3721 int error, save; 3722 struct bufobj *bo; 3723 3724 /* 3725 * We only need to do something if this is a lazy evaluation. 3726 */ 3727 if (ap->a_waitfor != MNT_LAZY) 3728 return (0); 3729 3730 /* 3731 * Move ourselves to the back of the sync list. 3732 */ 3733 bo = &syncvp->v_bufobj; 3734 BO_LOCK(bo); 3735 vn_syncer_add_to_worklist(bo, syncdelay); 3736 BO_UNLOCK(bo); 3737 3738 /* 3739 * Walk the list of vnodes pushing all that are dirty and 3740 * not already on the sync list. 3741 */ 3742 if (vfs_busy(mp, MBF_NOWAIT) != 0) 3743 return (0); 3744 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 3745 vfs_unbusy(mp); 3746 return (0); 3747 } 3748 save = curthread_pflags_set(TDP_SYNCIO); 3749 vfs_msync(mp, MNT_NOWAIT); 3750 error = VFS_SYNC(mp, MNT_LAZY); 3751 curthread_pflags_restore(save); 3752 vn_finished_write(mp); 3753 vfs_unbusy(mp); 3754 return (error); 3755} 3756 3757/* 3758 * The syncer vnode is no referenced. 3759 */ 3760static int 3761sync_inactive(struct vop_inactive_args *ap) 3762{ 3763 3764 vgone(ap->a_vp); 3765 return (0); 3766} 3767 3768/* 3769 * The syncer vnode is no longer needed and is being decommissioned. 3770 * 3771 * Modifications to the worklist must be protected by sync_mtx. 3772 */ 3773static int 3774sync_reclaim(struct vop_reclaim_args *ap) 3775{ 3776 struct vnode *vp = ap->a_vp; 3777 struct bufobj *bo; 3778 3779 bo = &vp->v_bufobj; 3780 BO_LOCK(bo); 3781 mtx_lock(&sync_mtx); 3782 if (vp->v_mount->mnt_syncer == vp) 3783 vp->v_mount->mnt_syncer = NULL; 3784 if (bo->bo_flag & BO_ONWORKLST) { 3785 LIST_REMOVE(bo, bo_synclist); 3786 syncer_worklist_len--; 3787 sync_vnode_count--; 3788 bo->bo_flag &= ~BO_ONWORKLST; 3789 } 3790 mtx_unlock(&sync_mtx); 3791 BO_UNLOCK(bo); 3792 3793 return (0); 3794} 3795 3796/* 3797 * Check if vnode represents a disk device 3798 */ 3799int 3800vn_isdisk(struct vnode *vp, int *errp) 3801{ 3802 int error; 3803 3804 error = 0; 3805 dev_lock(); 3806 if (vp->v_type != VCHR) 3807 error = ENOTBLK; 3808 else if (vp->v_rdev == NULL) 3809 error = ENXIO; 3810 else if (vp->v_rdev->si_devsw == NULL) 3811 error = ENXIO; 3812 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 3813 error = ENOTBLK; 3814 dev_unlock(); 3815 if (errp != NULL) 3816 *errp = error; 3817 return (error == 0); 3818} 3819 3820/* 3821 * Common filesystem object access control check routine. Accepts a 3822 * vnode's type, "mode", uid and gid, requested access mode, credentials, 3823 * and optional call-by-reference privused argument allowing vaccess() 3824 * to indicate to the caller whether privilege was used to satisfy the 3825 * request (obsoleted). Returns 0 on success, or an errno on failure. 3826 */ 3827int 3828vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 3829 accmode_t accmode, struct ucred *cred, int *privused) 3830{ 3831 accmode_t dac_granted; 3832 accmode_t priv_granted; 3833 3834 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 3835 ("invalid bit in accmode")); 3836 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 3837 ("VAPPEND without VWRITE")); 3838 3839 /* 3840 * Look for a normal, non-privileged way to access the file/directory 3841 * as requested. If it exists, go with that. 3842 */ 3843 3844 if (privused != NULL) 3845 *privused = 0; 3846 3847 dac_granted = 0; 3848 3849 /* Check the owner. */ 3850 if (cred->cr_uid == file_uid) { 3851 dac_granted |= VADMIN; 3852 if (file_mode & S_IXUSR) 3853 dac_granted |= VEXEC; 3854 if (file_mode & S_IRUSR) 3855 dac_granted |= VREAD; 3856 if (file_mode & S_IWUSR) 3857 dac_granted |= (VWRITE | VAPPEND); 3858 3859 if ((accmode & dac_granted) == accmode) 3860 return (0); 3861 3862 goto privcheck; 3863 } 3864 3865 /* Otherwise, check the groups (first match) */ 3866 if (groupmember(file_gid, cred)) { 3867 if (file_mode & S_IXGRP) 3868 dac_granted |= VEXEC; 3869 if (file_mode & S_IRGRP) 3870 dac_granted |= VREAD; 3871 if (file_mode & S_IWGRP) 3872 dac_granted |= (VWRITE | VAPPEND); 3873 3874 if ((accmode & dac_granted) == accmode) 3875 return (0); 3876 3877 goto privcheck; 3878 } 3879 3880 /* Otherwise, check everyone else. */ 3881 if (file_mode & S_IXOTH) 3882 dac_granted |= VEXEC; 3883 if (file_mode & S_IROTH) 3884 dac_granted |= VREAD; 3885 if (file_mode & S_IWOTH) 3886 dac_granted |= (VWRITE | VAPPEND); 3887 if ((accmode & dac_granted) == accmode) 3888 return (0); 3889 3890privcheck: 3891 /* 3892 * Build a privilege mask to determine if the set of privileges 3893 * satisfies the requirements when combined with the granted mask 3894 * from above. For each privilege, if the privilege is required, 3895 * bitwise or the request type onto the priv_granted mask. 3896 */ 3897 priv_granted = 0; 3898 3899 if (type == VDIR) { 3900 /* 3901 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 3902 * requests, instead of PRIV_VFS_EXEC. 3903 */ 3904 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3905 !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0)) 3906 priv_granted |= VEXEC; 3907 } else { 3908 /* 3909 * Ensure that at least one execute bit is on. Otherwise, 3910 * a privileged user will always succeed, and we don't want 3911 * this to happen unless the file really is executable. 3912 */ 3913 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3914 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 3915 !priv_check_cred(cred, PRIV_VFS_EXEC, 0)) 3916 priv_granted |= VEXEC; 3917 } 3918 3919 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 3920 !priv_check_cred(cred, PRIV_VFS_READ, 0)) 3921 priv_granted |= VREAD; 3922 3923 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 3924 !priv_check_cred(cred, PRIV_VFS_WRITE, 0)) 3925 priv_granted |= (VWRITE | VAPPEND); 3926 3927 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 3928 !priv_check_cred(cred, PRIV_VFS_ADMIN, 0)) 3929 priv_granted |= VADMIN; 3930 3931 if ((accmode & (priv_granted | dac_granted)) == accmode) { 3932 /* XXX audit: privilege used */ 3933 if (privused != NULL) 3934 *privused = 1; 3935 return (0); 3936 } 3937 3938 return ((accmode & VADMIN) ? EPERM : EACCES); 3939} 3940 3941/* 3942 * Credential check based on process requesting service, and per-attribute 3943 * permissions. 3944 */ 3945int 3946extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 3947 struct thread *td, accmode_t accmode) 3948{ 3949 3950 /* 3951 * Kernel-invoked always succeeds. 3952 */ 3953 if (cred == NOCRED) 3954 return (0); 3955 3956 /* 3957 * Do not allow privileged processes in jail to directly manipulate 3958 * system attributes. 3959 */ 3960 switch (attrnamespace) { 3961 case EXTATTR_NAMESPACE_SYSTEM: 3962 /* Potentially should be: return (EPERM); */ 3963 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0)); 3964 case EXTATTR_NAMESPACE_USER: 3965 return (VOP_ACCESS(vp, accmode, cred, td)); 3966 default: 3967 return (EPERM); 3968 } 3969} 3970 3971#ifdef DEBUG_VFS_LOCKS 3972/* 3973 * This only exists to supress warnings from unlocked specfs accesses. It is 3974 * no longer ok to have an unlocked VFS. 3975 */ 3976#define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ 3977 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 3978 3979int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 3980SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 3981 "Drop into debugger on lock violation"); 3982 3983int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 3984SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 3985 0, "Check for interlock across VOPs"); 3986 3987int vfs_badlock_print = 1; /* Print lock violations. */ 3988SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 3989 0, "Print lock violations"); 3990 3991#ifdef KDB 3992int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 3993SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 3994 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 3995#endif 3996 3997static void 3998vfs_badlock(const char *msg, const char *str, struct vnode *vp) 3999{ 4000 4001#ifdef KDB 4002 if (vfs_badlock_backtrace) 4003 kdb_backtrace(); 4004#endif 4005 if (vfs_badlock_print) 4006 printf("%s: %p %s\n", str, (void *)vp, msg); 4007 if (vfs_badlock_ddb) 4008 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4009} 4010 4011void 4012assert_vi_locked(struct vnode *vp, const char *str) 4013{ 4014 4015 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 4016 vfs_badlock("interlock is not locked but should be", str, vp); 4017} 4018 4019void 4020assert_vi_unlocked(struct vnode *vp, const char *str) 4021{ 4022 4023 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 4024 vfs_badlock("interlock is locked but should not be", str, vp); 4025} 4026 4027void 4028assert_vop_locked(struct vnode *vp, const char *str) 4029{ 4030 int locked; 4031 4032 if (!IGNORE_LOCK(vp)) { 4033 locked = VOP_ISLOCKED(vp); 4034 if (locked == 0 || locked == LK_EXCLOTHER) 4035 vfs_badlock("is not locked but should be", str, vp); 4036 } 4037} 4038 4039void 4040assert_vop_unlocked(struct vnode *vp, const char *str) 4041{ 4042 4043 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 4044 vfs_badlock("is locked but should not be", str, vp); 4045} 4046 4047void 4048assert_vop_elocked(struct vnode *vp, const char *str) 4049{ 4050 4051 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 4052 vfs_badlock("is not exclusive locked but should be", str, vp); 4053} 4054 4055#if 0 4056void 4057assert_vop_elocked_other(struct vnode *vp, const char *str) 4058{ 4059 4060 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLOTHER) 4061 vfs_badlock("is not exclusive locked by another thread", 4062 str, vp); 4063} 4064 4065void 4066assert_vop_slocked(struct vnode *vp, const char *str) 4067{ 4068 4069 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_SHARED) 4070 vfs_badlock("is not locked shared but should be", str, vp); 4071} 4072#endif /* 0 */ 4073#endif /* DEBUG_VFS_LOCKS */ 4074 4075void 4076vop_rename_fail(struct vop_rename_args *ap) 4077{ 4078 4079 if (ap->a_tvp != NULL) 4080 vput(ap->a_tvp); 4081 if (ap->a_tdvp == ap->a_tvp) 4082 vrele(ap->a_tdvp); 4083 else 4084 vput(ap->a_tdvp); 4085 vrele(ap->a_fdvp); 4086 vrele(ap->a_fvp); 4087} 4088 4089void 4090vop_rename_pre(void *ap) 4091{ 4092 struct vop_rename_args *a = ap; 4093 4094#ifdef DEBUG_VFS_LOCKS 4095 if (a->a_tvp) 4096 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 4097 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 4098 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 4099 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 4100 4101 /* Check the source (from). */ 4102 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 4103 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 4104 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 4105 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 4106 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 4107 4108 /* Check the target. */ 4109 if (a->a_tvp) 4110 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 4111 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 4112#endif 4113 if (a->a_tdvp != a->a_fdvp) 4114 vhold(a->a_fdvp); 4115 if (a->a_tvp != a->a_fvp) 4116 vhold(a->a_fvp); 4117 vhold(a->a_tdvp); 4118 if (a->a_tvp) 4119 vhold(a->a_tvp); 4120} 4121 4122void 4123vop_strategy_pre(void *ap) 4124{ 4125#ifdef DEBUG_VFS_LOCKS 4126 struct vop_strategy_args *a; 4127 struct buf *bp; 4128 4129 a = ap; 4130 bp = a->a_bp; 4131 4132 /* 4133 * Cluster ops lock their component buffers but not the IO container. 4134 */ 4135 if ((bp->b_flags & B_CLUSTER) != 0) 4136 return; 4137 4138 if (panicstr == NULL && !BUF_ISLOCKED(bp)) { 4139 if (vfs_badlock_print) 4140 printf( 4141 "VOP_STRATEGY: bp is not locked but should be\n"); 4142 if (vfs_badlock_ddb) 4143 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4144 } 4145#endif 4146} 4147 4148void 4149vop_lock_pre(void *ap) 4150{ 4151#ifdef DEBUG_VFS_LOCKS 4152 struct vop_lock1_args *a = ap; 4153 4154 if ((a->a_flags & LK_INTERLOCK) == 0) 4155 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4156 else 4157 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 4158#endif 4159} 4160 4161void 4162vop_lock_post(void *ap, int rc) 4163{ 4164#ifdef DEBUG_VFS_LOCKS 4165 struct vop_lock1_args *a = ap; 4166 4167 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4168 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 4169 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 4170#endif 4171} 4172 4173void 4174vop_unlock_pre(void *ap) 4175{ 4176#ifdef DEBUG_VFS_LOCKS 4177 struct vop_unlock_args *a = ap; 4178 4179 if (a->a_flags & LK_INTERLOCK) 4180 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); 4181 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 4182#endif 4183} 4184 4185void 4186vop_unlock_post(void *ap, int rc) 4187{ 4188#ifdef DEBUG_VFS_LOCKS 4189 struct vop_unlock_args *a = ap; 4190 4191 if (a->a_flags & LK_INTERLOCK) 4192 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); 4193#endif 4194} 4195 4196void 4197vop_create_post(void *ap, int rc) 4198{ 4199 struct vop_create_args *a = ap; 4200 4201 if (!rc) 4202 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4203} 4204 4205void 4206vop_deleteextattr_post(void *ap, int rc) 4207{ 4208 struct vop_deleteextattr_args *a = ap; 4209 4210 if (!rc) 4211 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4212} 4213 4214void 4215vop_link_post(void *ap, int rc) 4216{ 4217 struct vop_link_args *a = ap; 4218 4219 if (!rc) { 4220 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 4221 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 4222 } 4223} 4224 4225void 4226vop_mkdir_post(void *ap, int rc) 4227{ 4228 struct vop_mkdir_args *a = ap; 4229 4230 if (!rc) 4231 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4232} 4233 4234void 4235vop_mknod_post(void *ap, int rc) 4236{ 4237 struct vop_mknod_args *a = ap; 4238 4239 if (!rc) 4240 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4241} 4242 4243void 4244vop_remove_post(void *ap, int rc) 4245{ 4246 struct vop_remove_args *a = ap; 4247 4248 if (!rc) { 4249 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4250 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4251 } 4252} 4253 4254void 4255vop_rename_post(void *ap, int rc) 4256{ 4257 struct vop_rename_args *a = ap; 4258 4259 if (!rc) { 4260 VFS_KNOTE_UNLOCKED(a->a_fdvp, NOTE_WRITE); 4261 VFS_KNOTE_UNLOCKED(a->a_tdvp, NOTE_WRITE); 4262 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 4263 if (a->a_tvp) 4264 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 4265 } 4266 if (a->a_tdvp != a->a_fdvp) 4267 vdrop(a->a_fdvp); 4268 if (a->a_tvp != a->a_fvp) 4269 vdrop(a->a_fvp); 4270 vdrop(a->a_tdvp); 4271 if (a->a_tvp) 4272 vdrop(a->a_tvp); 4273} 4274 4275void 4276vop_rmdir_post(void *ap, int rc) 4277{ 4278 struct vop_rmdir_args *a = ap; 4279 4280 if (!rc) { 4281 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4282 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4283 } 4284} 4285 4286void 4287vop_setattr_post(void *ap, int rc) 4288{ 4289 struct vop_setattr_args *a = ap; 4290 4291 if (!rc) 4292 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4293} 4294 4295void 4296vop_setextattr_post(void *ap, int rc) 4297{ 4298 struct vop_setextattr_args *a = ap; 4299 4300 if (!rc) 4301 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4302} 4303 4304void 4305vop_symlink_post(void *ap, int rc) 4306{ 4307 struct vop_symlink_args *a = ap; 4308 4309 if (!rc) 4310 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4311} 4312 4313static struct knlist fs_knlist; 4314 4315static void 4316vfs_event_init(void *arg) 4317{ 4318 knlist_init_mtx(&fs_knlist, NULL); 4319} 4320/* XXX - correct order? */ 4321SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 4322 4323void 4324vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 4325{ 4326 4327 KNOTE_UNLOCKED(&fs_knlist, event); 4328} 4329 4330static int filt_fsattach(struct knote *kn); 4331static void filt_fsdetach(struct knote *kn); 4332static int filt_fsevent(struct knote *kn, long hint); 4333 4334struct filterops fs_filtops = { 4335 .f_isfd = 0, 4336 .f_attach = filt_fsattach, 4337 .f_detach = filt_fsdetach, 4338 .f_event = filt_fsevent 4339}; 4340 4341static int 4342filt_fsattach(struct knote *kn) 4343{ 4344 4345 kn->kn_flags |= EV_CLEAR; 4346 knlist_add(&fs_knlist, kn, 0); 4347 return (0); 4348} 4349 4350static void 4351filt_fsdetach(struct knote *kn) 4352{ 4353 4354 knlist_remove(&fs_knlist, kn, 0); 4355} 4356 4357static int 4358filt_fsevent(struct knote *kn, long hint) 4359{ 4360 4361 kn->kn_fflags |= hint; 4362 return (kn->kn_fflags != 0); 4363} 4364 4365static int 4366sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 4367{ 4368 struct vfsidctl vc; 4369 int error; 4370 struct mount *mp; 4371 4372 error = SYSCTL_IN(req, &vc, sizeof(vc)); 4373 if (error) 4374 return (error); 4375 if (vc.vc_vers != VFS_CTL_VERS1) 4376 return (EINVAL); 4377 mp = vfs_getvfs(&vc.vc_fsid); 4378 if (mp == NULL) 4379 return (ENOENT); 4380 /* ensure that a specific sysctl goes to the right filesystem. */ 4381 if (strcmp(vc.vc_fstypename, "*") != 0 && 4382 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 4383 vfs_rel(mp); 4384 return (EINVAL); 4385 } 4386 VCTLTOREQ(&vc, req); 4387 error = VFS_SYSCTL(mp, vc.vc_op, req); 4388 vfs_rel(mp); 4389 return (error); 4390} 4391 4392SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR, 4393 NULL, 0, sysctl_vfs_ctl, "", 4394 "Sysctl by fsid"); 4395 4396/* 4397 * Function to initialize a va_filerev field sensibly. 4398 * XXX: Wouldn't a random number make a lot more sense ?? 4399 */ 4400u_quad_t 4401init_va_filerev(void) 4402{ 4403 struct bintime bt; 4404 4405 getbinuptime(&bt); 4406 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 4407} 4408 4409static int filt_vfsread(struct knote *kn, long hint); 4410static int filt_vfswrite(struct knote *kn, long hint); 4411static int filt_vfsvnode(struct knote *kn, long hint); 4412static void filt_vfsdetach(struct knote *kn); 4413static struct filterops vfsread_filtops = { 4414 .f_isfd = 1, 4415 .f_detach = filt_vfsdetach, 4416 .f_event = filt_vfsread 4417}; 4418static struct filterops vfswrite_filtops = { 4419 .f_isfd = 1, 4420 .f_detach = filt_vfsdetach, 4421 .f_event = filt_vfswrite 4422}; 4423static struct filterops vfsvnode_filtops = { 4424 .f_isfd = 1, 4425 .f_detach = filt_vfsdetach, 4426 .f_event = filt_vfsvnode 4427}; 4428 4429static void 4430vfs_knllock(void *arg) 4431{ 4432 struct vnode *vp = arg; 4433 4434 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4435} 4436 4437static void 4438vfs_knlunlock(void *arg) 4439{ 4440 struct vnode *vp = arg; 4441 4442 VOP_UNLOCK(vp, 0); 4443} 4444 4445static void 4446vfs_knl_assert_locked(void *arg) 4447{ 4448#ifdef DEBUG_VFS_LOCKS 4449 struct vnode *vp = arg; 4450 4451 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 4452#endif 4453} 4454 4455static void 4456vfs_knl_assert_unlocked(void *arg) 4457{ 4458#ifdef DEBUG_VFS_LOCKS 4459 struct vnode *vp = arg; 4460 4461 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 4462#endif 4463} 4464 4465int 4466vfs_kqfilter(struct vop_kqfilter_args *ap) 4467{ 4468 struct vnode *vp = ap->a_vp; 4469 struct knote *kn = ap->a_kn; 4470 struct knlist *knl; 4471 4472 switch (kn->kn_filter) { 4473 case EVFILT_READ: 4474 kn->kn_fop = &vfsread_filtops; 4475 break; 4476 case EVFILT_WRITE: 4477 kn->kn_fop = &vfswrite_filtops; 4478 break; 4479 case EVFILT_VNODE: 4480 kn->kn_fop = &vfsvnode_filtops; 4481 break; 4482 default: 4483 return (EINVAL); 4484 } 4485 4486 kn->kn_hook = (caddr_t)vp; 4487 4488 v_addpollinfo(vp); 4489 if (vp->v_pollinfo == NULL) 4490 return (ENOMEM); 4491 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 4492 vhold(vp); 4493 knlist_add(knl, kn, 0); 4494 4495 return (0); 4496} 4497 4498/* 4499 * Detach knote from vnode 4500 */ 4501static void 4502filt_vfsdetach(struct knote *kn) 4503{ 4504 struct vnode *vp = (struct vnode *)kn->kn_hook; 4505 4506 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 4507 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 4508 vdrop(vp); 4509} 4510 4511/*ARGSUSED*/ 4512static int 4513filt_vfsread(struct knote *kn, long hint) 4514{ 4515 struct vnode *vp = (struct vnode *)kn->kn_hook; 4516 struct vattr va; 4517 int res; 4518 4519 /* 4520 * filesystem is gone, so set the EOF flag and schedule 4521 * the knote for deletion. 4522 */ 4523 if (hint == NOTE_REVOKE) { 4524 VI_LOCK(vp); 4525 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 4526 VI_UNLOCK(vp); 4527 return (1); 4528 } 4529 4530 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 4531 return (0); 4532 4533 VI_LOCK(vp); 4534 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 4535 res = (kn->kn_data != 0); 4536 VI_UNLOCK(vp); 4537 return (res); 4538} 4539 4540/*ARGSUSED*/ 4541static int 4542filt_vfswrite(struct knote *kn, long hint) 4543{ 4544 struct vnode *vp = (struct vnode *)kn->kn_hook; 4545 4546 VI_LOCK(vp); 4547 4548 /* 4549 * filesystem is gone, so set the EOF flag and schedule 4550 * the knote for deletion. 4551 */ 4552 if (hint == NOTE_REVOKE) 4553 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 4554 4555 kn->kn_data = 0; 4556 VI_UNLOCK(vp); 4557 return (1); 4558} 4559 4560static int 4561filt_vfsvnode(struct knote *kn, long hint) 4562{ 4563 struct vnode *vp = (struct vnode *)kn->kn_hook; 4564 int res; 4565 4566 VI_LOCK(vp); 4567 if (kn->kn_sfflags & hint) 4568 kn->kn_fflags |= hint; 4569 if (hint == NOTE_REVOKE) { 4570 kn->kn_flags |= EV_EOF; 4571 VI_UNLOCK(vp); 4572 return (1); 4573 } 4574 res = (kn->kn_fflags != 0); 4575 VI_UNLOCK(vp); 4576 return (res); 4577} 4578 4579int 4580vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 4581{ 4582 int error; 4583 4584 if (dp->d_reclen > ap->a_uio->uio_resid) 4585 return (ENAMETOOLONG); 4586 error = uiomove(dp, dp->d_reclen, ap->a_uio); 4587 if (error) { 4588 if (ap->a_ncookies != NULL) { 4589 if (ap->a_cookies != NULL) 4590 free(ap->a_cookies, M_TEMP); 4591 ap->a_cookies = NULL; 4592 *ap->a_ncookies = 0; 4593 } 4594 return (error); 4595 } 4596 if (ap->a_ncookies == NULL) 4597 return (0); 4598 4599 KASSERT(ap->a_cookies, 4600 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 4601 4602 *ap->a_cookies = realloc(*ap->a_cookies, 4603 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 4604 (*ap->a_cookies)[*ap->a_ncookies] = off; 4605 return (0); 4606} 4607 4608/* 4609 * Mark for update the access time of the file if the filesystem 4610 * supports VOP_MARKATIME. This functionality is used by execve and 4611 * mmap, so we want to avoid the I/O implied by directly setting 4612 * va_atime for the sake of efficiency. 4613 */ 4614void 4615vfs_mark_atime(struct vnode *vp, struct ucred *cred) 4616{ 4617 struct mount *mp; 4618 4619 mp = vp->v_mount; 4620 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); 4621 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) 4622 (void)VOP_MARKATIME(vp); 4623} 4624 4625/* 4626 * The purpose of this routine is to remove granularity from accmode_t, 4627 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 4628 * VADMIN and VAPPEND. 4629 * 4630 * If it returns 0, the caller is supposed to continue with the usual 4631 * access checks using 'accmode' as modified by this routine. If it 4632 * returns nonzero value, the caller is supposed to return that value 4633 * as errno. 4634 * 4635 * Note that after this routine runs, accmode may be zero. 4636 */ 4637int 4638vfs_unixify_accmode(accmode_t *accmode) 4639{ 4640 /* 4641 * There is no way to specify explicit "deny" rule using 4642 * file mode or POSIX.1e ACLs. 4643 */ 4644 if (*accmode & VEXPLICIT_DENY) { 4645 *accmode = 0; 4646 return (0); 4647 } 4648 4649 /* 4650 * None of these can be translated into usual access bits. 4651 * Also, the common case for NFSv4 ACLs is to not contain 4652 * either of these bits. Caller should check for VWRITE 4653 * on the containing directory instead. 4654 */ 4655 if (*accmode & (VDELETE_CHILD | VDELETE)) 4656 return (EPERM); 4657 4658 if (*accmode & VADMIN_PERMS) { 4659 *accmode &= ~VADMIN_PERMS; 4660 *accmode |= VADMIN; 4661 } 4662 4663 /* 4664 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 4665 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 4666 */ 4667 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 4668 4669 return (0); 4670} 4671 4672/* 4673 * These are helper functions for filesystems to traverse all 4674 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 4675 * 4676 * This interface replaces MNT_VNODE_FOREACH. 4677 */ 4678 4679MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 4680 4681struct vnode * 4682__mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 4683{ 4684 struct vnode *vp; 4685 4686 if (should_yield()) 4687 kern_yield(PRI_USER); 4688 MNT_ILOCK(mp); 4689 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4690 vp = TAILQ_NEXT(*mvp, v_nmntvnodes); 4691 while (vp != NULL && (vp->v_type == VMARKER || 4692 (vp->v_iflag & VI_DOOMED) != 0)) 4693 vp = TAILQ_NEXT(vp, v_nmntvnodes); 4694 4695 /* Check if we are done */ 4696 if (vp == NULL) { 4697 __mnt_vnode_markerfree_all(mvp, mp); 4698 /* MNT_IUNLOCK(mp); -- done in above function */ 4699 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 4700 return (NULL); 4701 } 4702 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 4703 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 4704 VI_LOCK(vp); 4705 MNT_IUNLOCK(mp); 4706 return (vp); 4707} 4708 4709struct vnode * 4710__mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 4711{ 4712 struct vnode *vp; 4713 4714 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 4715 MNT_ILOCK(mp); 4716 MNT_REF(mp); 4717 (*mvp)->v_type = VMARKER; 4718 4719 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 4720 while (vp != NULL && (vp->v_type == VMARKER || 4721 (vp->v_iflag & VI_DOOMED) != 0)) 4722 vp = TAILQ_NEXT(vp, v_nmntvnodes); 4723 4724 /* Check if we are done */ 4725 if (vp == NULL) { 4726 MNT_REL(mp); 4727 MNT_IUNLOCK(mp); 4728 free(*mvp, M_VNODE_MARKER); 4729 *mvp = NULL; 4730 return (NULL); 4731 } 4732 (*mvp)->v_mount = mp; 4733 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 4734 VI_LOCK(vp); 4735 MNT_IUNLOCK(mp); 4736 return (vp); 4737} 4738 4739 4740void 4741__mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 4742{ 4743 4744 if (*mvp == NULL) { 4745 MNT_IUNLOCK(mp); 4746 return; 4747 } 4748 4749 mtx_assert(MNT_MTX(mp), MA_OWNED); 4750 4751 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4752 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 4753 MNT_REL(mp); 4754 MNT_IUNLOCK(mp); 4755 free(*mvp, M_VNODE_MARKER); 4756 *mvp = NULL; 4757} 4758 4759/* 4760 * These are helper functions for filesystems to traverse their 4761 * active vnodes. See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h 4762 */ 4763static void 4764mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 4765{ 4766 4767 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4768 4769 MNT_ILOCK(mp); 4770 MNT_REL(mp); 4771 MNT_IUNLOCK(mp); 4772 free(*mvp, M_VNODE_MARKER); 4773 *mvp = NULL; 4774} 4775 4776static struct vnode * 4777mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 4778{ 4779 struct vnode *vp, *nvp; 4780 4781 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 4782 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4783restart: 4784 vp = TAILQ_NEXT(*mvp, v_actfreelist); 4785 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 4786 while (vp != NULL) { 4787 if (vp->v_type == VMARKER) { 4788 vp = TAILQ_NEXT(vp, v_actfreelist); 4789 continue; 4790 } 4791 if (!VI_TRYLOCK(vp)) { 4792 if (mp_ncpus == 1 || should_yield()) { 4793 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 4794 mtx_unlock(&vnode_free_list_mtx); 4795 pause("vnacti", 1); 4796 mtx_lock(&vnode_free_list_mtx); 4797 goto restart; 4798 } 4799 continue; 4800 } 4801 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 4802 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 4803 ("alien vnode on the active list %p %p", vp, mp)); 4804 if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0) 4805 break; 4806 nvp = TAILQ_NEXT(vp, v_actfreelist); 4807 VI_UNLOCK(vp); 4808 vp = nvp; 4809 } 4810 4811 /* Check if we are done */ 4812 if (vp == NULL) { 4813 mtx_unlock(&vnode_free_list_mtx); 4814 mnt_vnode_markerfree_active(mvp, mp); 4815 return (NULL); 4816 } 4817 TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist); 4818 mtx_unlock(&vnode_free_list_mtx); 4819 ASSERT_VI_LOCKED(vp, "active iter"); 4820 KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp)); 4821 return (vp); 4822} 4823 4824struct vnode * 4825__mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 4826{ 4827 4828 if (should_yield()) 4829 kern_yield(PRI_USER); 4830 mtx_lock(&vnode_free_list_mtx); 4831 return (mnt_vnode_next_active(mvp, mp)); 4832} 4833 4834struct vnode * 4835__mnt_vnode_first_active(struct vnode **mvp, struct mount *mp) 4836{ 4837 struct vnode *vp; 4838 4839 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 4840 MNT_ILOCK(mp); 4841 MNT_REF(mp); 4842 MNT_IUNLOCK(mp); 4843 (*mvp)->v_type = VMARKER; 4844 (*mvp)->v_mount = mp; 4845 4846 mtx_lock(&vnode_free_list_mtx); 4847 vp = TAILQ_FIRST(&mp->mnt_activevnodelist); 4848 if (vp == NULL) { 4849 mtx_unlock(&vnode_free_list_mtx); 4850 mnt_vnode_markerfree_active(mvp, mp); 4851 return (NULL); 4852 } 4853 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 4854 return (mnt_vnode_next_active(mvp, mp)); 4855} 4856 4857void 4858__mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 4859{ 4860 4861 if (*mvp == NULL) 4862 return; 4863 4864 mtx_lock(&vnode_free_list_mtx); 4865 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 4866 mtx_unlock(&vnode_free_list_mtx); 4867 mnt_vnode_markerfree_active(mvp, mp); 4868} 4869