ffs_vfsops.c revision 309208
1/*- 2 * Copyright (c) 1989, 1991, 1993, 1994 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: stable/10/sys/ufs/ffs/ffs_vfsops.c 309208 2016-11-27 09:14:52Z kib $"); 34 35#include "opt_quota.h" 36#include "opt_ufs.h" 37#include "opt_ffs.h" 38#include "opt_ddb.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/namei.h> 43#include <sys/priv.h> 44#include <sys/proc.h> 45#include <sys/taskqueue.h> 46#include <sys/kernel.h> 47#include <sys/vnode.h> 48#include <sys/mount.h> 49#include <sys/bio.h> 50#include <sys/buf.h> 51#include <sys/conf.h> 52#include <sys/fcntl.h> 53#include <sys/ioccom.h> 54#include <sys/malloc.h> 55#include <sys/mutex.h> 56#include <sys/rwlock.h> 57 58#include <security/mac/mac_framework.h> 59 60#include <ufs/ufs/extattr.h> 61#include <ufs/ufs/gjournal.h> 62#include <ufs/ufs/quota.h> 63#include <ufs/ufs/ufsmount.h> 64#include <ufs/ufs/inode.h> 65#include <ufs/ufs/ufs_extern.h> 66 67#include <ufs/ffs/fs.h> 68#include <ufs/ffs/ffs_extern.h> 69 70#include <vm/vm.h> 71#include <vm/uma.h> 72#include <vm/vm_page.h> 73 74#include <geom/geom.h> 75#include <geom/geom_vfs.h> 76 77#include <ddb/ddb.h> 78 79static uma_zone_t uma_inode, uma_ufs1, uma_ufs2; 80 81static int ffs_mountfs(struct vnode *, struct mount *, struct thread *); 82static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, 83 ufs2_daddr_t); 84static void ffs_ifree(struct ufsmount *ump, struct inode *ip); 85static int ffs_sync_lazy(struct mount *mp); 86 87static vfs_init_t ffs_init; 88static vfs_uninit_t ffs_uninit; 89static vfs_extattrctl_t ffs_extattrctl; 90static vfs_cmount_t ffs_cmount; 91static vfs_unmount_t ffs_unmount; 92static vfs_mount_t ffs_mount; 93static vfs_statfs_t ffs_statfs; 94static vfs_fhtovp_t ffs_fhtovp; 95static vfs_sync_t ffs_sync; 96 97static struct vfsops ufs_vfsops = { 98 .vfs_extattrctl = ffs_extattrctl, 99 .vfs_fhtovp = ffs_fhtovp, 100 .vfs_init = ffs_init, 101 .vfs_mount = ffs_mount, 102 .vfs_cmount = ffs_cmount, 103 .vfs_quotactl = ufs_quotactl, 104 .vfs_root = ufs_root, 105 .vfs_statfs = ffs_statfs, 106 .vfs_sync = ffs_sync, 107 .vfs_uninit = ffs_uninit, 108 .vfs_unmount = ffs_unmount, 109 .vfs_vget = ffs_vget, 110 .vfs_susp_clean = process_deferred_inactive, 111}; 112 113VFS_SET(ufs_vfsops, ufs, 0); 114MODULE_VERSION(ufs, 1); 115 116static b_strategy_t ffs_geom_strategy; 117static b_write_t ffs_bufwrite; 118 119static struct buf_ops ffs_ops = { 120 .bop_name = "FFS", 121 .bop_write = ffs_bufwrite, 122 .bop_strategy = ffs_geom_strategy, 123 .bop_sync = bufsync, 124#ifdef NO_FFS_SNAPSHOT 125 .bop_bdflush = bufbdflush, 126#else 127 .bop_bdflush = ffs_bdflush, 128#endif 129}; 130 131/* 132 * Note that userquota and groupquota options are not currently used 133 * by UFS/FFS code and generally mount(8) does not pass those options 134 * from userland, but they can be passed by loader(8) via 135 * vfs.root.mountfrom.options. 136 */ 137static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr", 138 "noclusterw", "noexec", "export", "force", "from", "groupquota", 139 "multilabel", "nfsv4acls", "fsckpid", "snapshot", "nosuid", "suiddir", 140 "nosymfollow", "sync", "union", "userquota", NULL }; 141 142static int 143ffs_mount(struct mount *mp) 144{ 145 struct vnode *devvp; 146 struct thread *td; 147 struct ufsmount *ump = NULL; 148 struct fs *fs; 149 pid_t fsckpid = 0; 150 int error, error1, flags; 151 uint64_t mntorflags; 152 accmode_t accmode; 153 struct nameidata ndp; 154 char *fspec; 155 156 td = curthread; 157 if (vfs_filteropt(mp->mnt_optnew, ffs_opts)) 158 return (EINVAL); 159 if (uma_inode == NULL) { 160 uma_inode = uma_zcreate("FFS inode", 161 sizeof(struct inode), NULL, NULL, NULL, NULL, 162 UMA_ALIGN_PTR, 0); 163 uma_ufs1 = uma_zcreate("FFS1 dinode", 164 sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL, 165 UMA_ALIGN_PTR, 0); 166 uma_ufs2 = uma_zcreate("FFS2 dinode", 167 sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL, 168 UMA_ALIGN_PTR, 0); 169 } 170 171 vfs_deleteopt(mp->mnt_optnew, "groupquota"); 172 vfs_deleteopt(mp->mnt_optnew, "userquota"); 173 174 fspec = vfs_getopts(mp->mnt_optnew, "from", &error); 175 if (error) 176 return (error); 177 178 mntorflags = 0; 179 if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0) 180 mntorflags |= MNT_ACLS; 181 182 if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) { 183 mntorflags |= MNT_SNAPSHOT; 184 /* 185 * Once we have set the MNT_SNAPSHOT flag, do not 186 * persist "snapshot" in the options list. 187 */ 188 vfs_deleteopt(mp->mnt_optnew, "snapshot"); 189 vfs_deleteopt(mp->mnt_opt, "snapshot"); 190 } 191 192 if (vfs_getopt(mp->mnt_optnew, "fsckpid", NULL, NULL) == 0 && 193 vfs_scanopt(mp->mnt_optnew, "fsckpid", "%d", &fsckpid) == 1) { 194 /* 195 * Once we have set the restricted PID, do not 196 * persist "fsckpid" in the options list. 197 */ 198 vfs_deleteopt(mp->mnt_optnew, "fsckpid"); 199 vfs_deleteopt(mp->mnt_opt, "fsckpid"); 200 if (mp->mnt_flag & MNT_UPDATE) { 201 if (VFSTOUFS(mp)->um_fs->fs_ronly == 0 && 202 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) { 203 vfs_mount_error(mp, 204 "Checker enable: Must be read-only"); 205 return (EINVAL); 206 } 207 } else if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) { 208 vfs_mount_error(mp, 209 "Checker enable: Must be read-only"); 210 return (EINVAL); 211 } 212 /* Set to -1 if we are done */ 213 if (fsckpid == 0) 214 fsckpid = -1; 215 } 216 217 if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) { 218 if (mntorflags & MNT_ACLS) { 219 vfs_mount_error(mp, 220 "\"acls\" and \"nfsv4acls\" options " 221 "are mutually exclusive"); 222 return (EINVAL); 223 } 224 mntorflags |= MNT_NFS4ACLS; 225 } 226 227 MNT_ILOCK(mp); 228 mp->mnt_flag |= mntorflags; 229 MNT_IUNLOCK(mp); 230 /* 231 * If updating, check whether changing from read-only to 232 * read/write; if there is no device name, that's all we do. 233 */ 234 if (mp->mnt_flag & MNT_UPDATE) { 235 ump = VFSTOUFS(mp); 236 fs = ump->um_fs; 237 devvp = ump->um_devvp; 238 if (fsckpid == -1 && ump->um_fsckpid > 0) { 239 if ((error = ffs_flushfiles(mp, WRITECLOSE, td)) != 0 || 240 (error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) 241 return (error); 242 DROP_GIANT(); 243 g_topology_lock(); 244 /* 245 * Return to normal read-only mode. 246 */ 247 error = g_access(ump->um_cp, 0, -1, 0); 248 g_topology_unlock(); 249 PICKUP_GIANT(); 250 ump->um_fsckpid = 0; 251 } 252 if (fs->fs_ronly == 0 && 253 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 254 /* 255 * Flush any dirty data and suspend filesystem. 256 */ 257 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 258 return (error); 259 error = vfs_write_suspend_umnt(mp); 260 if (error != 0) 261 return (error); 262 /* 263 * Check for and optionally get rid of files open 264 * for writing. 265 */ 266 flags = WRITECLOSE; 267 if (mp->mnt_flag & MNT_FORCE) 268 flags |= FORCECLOSE; 269 if (MOUNTEDSOFTDEP(mp)) { 270 error = softdep_flushfiles(mp, flags, td); 271 } else { 272 error = ffs_flushfiles(mp, flags, td); 273 } 274 if (error) { 275 vfs_write_resume(mp, 0); 276 return (error); 277 } 278 if (fs->fs_pendingblocks != 0 || 279 fs->fs_pendinginodes != 0) { 280 printf("WARNING: %s Update error: blocks %jd " 281 "files %d\n", fs->fs_fsmnt, 282 (intmax_t)fs->fs_pendingblocks, 283 fs->fs_pendinginodes); 284 fs->fs_pendingblocks = 0; 285 fs->fs_pendinginodes = 0; 286 } 287 if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) 288 fs->fs_clean = 1; 289 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 290 fs->fs_ronly = 0; 291 fs->fs_clean = 0; 292 vfs_write_resume(mp, 0); 293 return (error); 294 } 295 if (MOUNTEDSOFTDEP(mp)) 296 softdep_unmount(mp); 297 DROP_GIANT(); 298 g_topology_lock(); 299 /* 300 * Drop our write and exclusive access. 301 */ 302 g_access(ump->um_cp, 0, -1, -1); 303 g_topology_unlock(); 304 PICKUP_GIANT(); 305 fs->fs_ronly = 1; 306 MNT_ILOCK(mp); 307 mp->mnt_flag |= MNT_RDONLY; 308 MNT_IUNLOCK(mp); 309 /* 310 * Allow the writers to note that filesystem 311 * is ro now. 312 */ 313 vfs_write_resume(mp, 0); 314 } 315 if ((mp->mnt_flag & MNT_RELOAD) && 316 (error = ffs_reload(mp, td, 0)) != 0) 317 return (error); 318 if (fs->fs_ronly && 319 !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 320 /* 321 * If we are running a checker, do not allow upgrade. 322 */ 323 if (ump->um_fsckpid > 0) { 324 vfs_mount_error(mp, 325 "Active checker, cannot upgrade to write"); 326 return (EINVAL); 327 } 328 /* 329 * If upgrade to read-write by non-root, then verify 330 * that user has necessary permissions on the device. 331 */ 332 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 333 error = VOP_ACCESS(devvp, VREAD | VWRITE, 334 td->td_ucred, td); 335 if (error) 336 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 337 if (error) { 338 VOP_UNLOCK(devvp, 0); 339 return (error); 340 } 341 VOP_UNLOCK(devvp, 0); 342 fs->fs_flags &= ~FS_UNCLEAN; 343 if (fs->fs_clean == 0) { 344 fs->fs_flags |= FS_UNCLEAN; 345 if ((mp->mnt_flag & MNT_FORCE) || 346 ((fs->fs_flags & 347 (FS_SUJ | FS_NEEDSFSCK)) == 0 && 348 (fs->fs_flags & FS_DOSOFTDEP))) { 349 printf("WARNING: %s was not properly " 350 "dismounted\n", fs->fs_fsmnt); 351 } else { 352 vfs_mount_error(mp, 353 "R/W mount of %s denied. %s.%s", 354 fs->fs_fsmnt, 355 "Filesystem is not clean - run fsck", 356 (fs->fs_flags & FS_SUJ) == 0 ? "" : 357 " Forced mount will invalidate" 358 " journal contents"); 359 return (EPERM); 360 } 361 } 362 DROP_GIANT(); 363 g_topology_lock(); 364 /* 365 * Request exclusive write access. 366 */ 367 error = g_access(ump->um_cp, 0, 1, 1); 368 g_topology_unlock(); 369 PICKUP_GIANT(); 370 if (error) 371 return (error); 372 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 373 return (error); 374 fs->fs_ronly = 0; 375 MNT_ILOCK(mp); 376 mp->mnt_flag &= ~MNT_RDONLY; 377 MNT_IUNLOCK(mp); 378 fs->fs_mtime = time_second; 379 /* check to see if we need to start softdep */ 380 if ((fs->fs_flags & FS_DOSOFTDEP) && 381 (error = softdep_mount(devvp, mp, fs, td->td_ucred))){ 382 vn_finished_write(mp); 383 return (error); 384 } 385 fs->fs_clean = 0; 386 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 387 vn_finished_write(mp); 388 return (error); 389 } 390 if (fs->fs_snapinum[0] != 0) 391 ffs_snapshot_mount(mp); 392 vn_finished_write(mp); 393 } 394 /* 395 * Soft updates is incompatible with "async", 396 * so if we are doing softupdates stop the user 397 * from setting the async flag in an update. 398 * Softdep_mount() clears it in an initial mount 399 * or ro->rw remount. 400 */ 401 if (MOUNTEDSOFTDEP(mp)) { 402 /* XXX: Reset too late ? */ 403 MNT_ILOCK(mp); 404 mp->mnt_flag &= ~MNT_ASYNC; 405 MNT_IUNLOCK(mp); 406 } 407 /* 408 * Keep MNT_ACLS flag if it is stored in superblock. 409 */ 410 if ((fs->fs_flags & FS_ACLS) != 0) { 411 /* XXX: Set too late ? */ 412 MNT_ILOCK(mp); 413 mp->mnt_flag |= MNT_ACLS; 414 MNT_IUNLOCK(mp); 415 } 416 417 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 418 /* XXX: Set too late ? */ 419 MNT_ILOCK(mp); 420 mp->mnt_flag |= MNT_NFS4ACLS; 421 MNT_IUNLOCK(mp); 422 } 423 /* 424 * If this is a request from fsck to clean up the filesystem, 425 * then allow the specified pid to proceed. 426 */ 427 if (fsckpid > 0) { 428 if (ump->um_fsckpid != 0) { 429 vfs_mount_error(mp, 430 "Active checker already running on %s", 431 fs->fs_fsmnt); 432 return (EINVAL); 433 } 434 KASSERT(MOUNTEDSOFTDEP(mp) == 0, 435 ("soft updates enabled on read-only file system")); 436 DROP_GIANT(); 437 g_topology_lock(); 438 /* 439 * Request write access. 440 */ 441 error = g_access(ump->um_cp, 0, 1, 0); 442 g_topology_unlock(); 443 PICKUP_GIANT(); 444 if (error) { 445 vfs_mount_error(mp, 446 "Checker activation failed on %s", 447 fs->fs_fsmnt); 448 return (error); 449 } 450 ump->um_fsckpid = fsckpid; 451 if (fs->fs_snapinum[0] != 0) 452 ffs_snapshot_mount(mp); 453 fs->fs_mtime = time_second; 454 fs->fs_fmod = 1; 455 fs->fs_clean = 0; 456 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 457 } 458 459 /* 460 * If this is a snapshot request, take the snapshot. 461 */ 462 if (mp->mnt_flag & MNT_SNAPSHOT) 463 return (ffs_snapshot(mp, fspec)); 464 465 /* 466 * Must not call namei() while owning busy ref. 467 */ 468 vfs_unbusy(mp); 469 } 470 471 /* 472 * Not an update, or updating the name: look up the name 473 * and verify that it refers to a sensible disk device. 474 */ 475 NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td); 476 error = namei(&ndp); 477 if ((mp->mnt_flag & MNT_UPDATE) != 0) { 478 /* 479 * Unmount does not start if MNT_UPDATE is set. Mount 480 * update busies mp before setting MNT_UPDATE. We 481 * must be able to retain our busy ref succesfully, 482 * without sleep. 483 */ 484 error1 = vfs_busy(mp, MBF_NOWAIT); 485 MPASS(error1 == 0); 486 } 487 if (error != 0) 488 return (error); 489 NDFREE(&ndp, NDF_ONLY_PNBUF); 490 devvp = ndp.ni_vp; 491 if (!vn_isdisk(devvp, &error)) { 492 vput(devvp); 493 return (error); 494 } 495 496 /* 497 * If mount by non-root, then verify that user has necessary 498 * permissions on the device. 499 */ 500 accmode = VREAD; 501 if ((mp->mnt_flag & MNT_RDONLY) == 0) 502 accmode |= VWRITE; 503 error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); 504 if (error) 505 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 506 if (error) { 507 vput(devvp); 508 return (error); 509 } 510 511 if (mp->mnt_flag & MNT_UPDATE) { 512 /* 513 * Update only 514 * 515 * If it's not the same vnode, or at least the same device 516 * then it's not correct. 517 */ 518 519 if (devvp->v_rdev != ump->um_devvp->v_rdev) 520 error = EINVAL; /* needs translation */ 521 vput(devvp); 522 if (error) 523 return (error); 524 } else { 525 /* 526 * New mount 527 * 528 * We need the name for the mount point (also used for 529 * "last mounted on") copied in. If an error occurs, 530 * the mount point is discarded by the upper level code. 531 * Note that vfs_mount_alloc() populates f_mntonname for us. 532 */ 533 if ((error = ffs_mountfs(devvp, mp, td)) != 0) { 534 vrele(devvp); 535 return (error); 536 } 537 if (fsckpid > 0) { 538 KASSERT(MOUNTEDSOFTDEP(mp) == 0, 539 ("soft updates enabled on read-only file system")); 540 ump = VFSTOUFS(mp); 541 fs = ump->um_fs; 542 DROP_GIANT(); 543 g_topology_lock(); 544 /* 545 * Request write access. 546 */ 547 error = g_access(ump->um_cp, 0, 1, 0); 548 g_topology_unlock(); 549 PICKUP_GIANT(); 550 if (error) { 551 printf("WARNING: %s: Checker activation " 552 "failed\n", fs->fs_fsmnt); 553 } else { 554 ump->um_fsckpid = fsckpid; 555 if (fs->fs_snapinum[0] != 0) 556 ffs_snapshot_mount(mp); 557 fs->fs_mtime = time_second; 558 fs->fs_clean = 0; 559 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 560 } 561 } 562 } 563 vfs_mountedfrom(mp, fspec); 564 return (0); 565} 566 567/* 568 * Compatibility with old mount system call. 569 */ 570 571static int 572ffs_cmount(struct mntarg *ma, void *data, uint64_t flags) 573{ 574 struct ufs_args args; 575 struct export_args exp; 576 int error; 577 578 if (data == NULL) 579 return (EINVAL); 580 error = copyin(data, &args, sizeof args); 581 if (error) 582 return (error); 583 vfs_oexport_conv(&args.export, &exp); 584 585 ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN); 586 ma = mount_arg(ma, "export", &exp, sizeof(exp)); 587 error = kernel_mount(ma, flags); 588 589 return (error); 590} 591 592/* 593 * Reload all incore data for a filesystem (used after running fsck on 594 * the root filesystem and finding things to fix). If the 'force' flag 595 * is 0, the filesystem must be mounted read-only. 596 * 597 * Things to do to update the mount: 598 * 1) invalidate all cached meta-data. 599 * 2) re-read superblock from disk. 600 * 3) re-read summary information from disk. 601 * 4) invalidate all inactive vnodes. 602 * 5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary 603 * writers, if requested. 604 * 6) invalidate all cached file data. 605 * 7) re-read inode data for all active vnodes. 606 */ 607int 608ffs_reload(struct mount *mp, struct thread *td, int flags) 609{ 610 struct vnode *vp, *mvp, *devvp; 611 struct inode *ip; 612 void *space; 613 struct buf *bp; 614 struct fs *fs, *newfs; 615 struct ufsmount *ump; 616 ufs2_daddr_t sblockloc; 617 int i, blks, error; 618 u_long size; 619 int32_t *lp; 620 621 ump = VFSTOUFS(mp); 622 623 MNT_ILOCK(mp); 624 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) { 625 MNT_IUNLOCK(mp); 626 return (EINVAL); 627 } 628 MNT_IUNLOCK(mp); 629 630 /* 631 * Step 1: invalidate all cached meta-data. 632 */ 633 devvp = VFSTOUFS(mp)->um_devvp; 634 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 635 if (vinvalbuf(devvp, 0, 0, 0) != 0) 636 panic("ffs_reload: dirty1"); 637 VOP_UNLOCK(devvp, 0); 638 639 /* 640 * Step 2: re-read superblock from disk. 641 */ 642 fs = VFSTOUFS(mp)->um_fs; 643 if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize, 644 NOCRED, &bp)) != 0) 645 return (error); 646 newfs = (struct fs *)bp->b_data; 647 if ((newfs->fs_magic != FS_UFS1_MAGIC && 648 newfs->fs_magic != FS_UFS2_MAGIC) || 649 newfs->fs_bsize > MAXBSIZE || 650 newfs->fs_bsize < sizeof(struct fs)) { 651 brelse(bp); 652 return (EIO); /* XXX needs translation */ 653 } 654 /* 655 * Copy pointer fields back into superblock before copying in XXX 656 * new superblock. These should really be in the ufsmount. XXX 657 * Note that important parameters (eg fs_ncg) are unchanged. 658 */ 659 newfs->fs_csp = fs->fs_csp; 660 newfs->fs_maxcluster = fs->fs_maxcluster; 661 newfs->fs_contigdirs = fs->fs_contigdirs; 662 newfs->fs_active = fs->fs_active; 663 newfs->fs_ronly = fs->fs_ronly; 664 sblockloc = fs->fs_sblockloc; 665 bcopy(newfs, fs, (u_int)fs->fs_sbsize); 666 brelse(bp); 667 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 668 ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc); 669 UFS_LOCK(ump); 670 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 671 printf("WARNING: %s: reload pending error: blocks %jd " 672 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 673 fs->fs_pendinginodes); 674 fs->fs_pendingblocks = 0; 675 fs->fs_pendinginodes = 0; 676 } 677 UFS_UNLOCK(ump); 678 679 /* 680 * Step 3: re-read summary information from disk. 681 */ 682 size = fs->fs_cssize; 683 blks = howmany(size, fs->fs_fsize); 684 if (fs->fs_contigsumsize > 0) 685 size += fs->fs_ncg * sizeof(int32_t); 686 size += fs->fs_ncg * sizeof(u_int8_t); 687 free(fs->fs_csp, M_UFSMNT); 688 space = malloc(size, M_UFSMNT, M_WAITOK); 689 fs->fs_csp = space; 690 for (i = 0; i < blks; i += fs->fs_frag) { 691 size = fs->fs_bsize; 692 if (i + fs->fs_frag > blks) 693 size = (blks - i) * fs->fs_fsize; 694 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 695 NOCRED, &bp); 696 if (error) 697 return (error); 698 bcopy(bp->b_data, space, (u_int)size); 699 space = (char *)space + size; 700 brelse(bp); 701 } 702 /* 703 * We no longer know anything about clusters per cylinder group. 704 */ 705 if (fs->fs_contigsumsize > 0) { 706 fs->fs_maxcluster = lp = space; 707 for (i = 0; i < fs->fs_ncg; i++) 708 *lp++ = fs->fs_contigsumsize; 709 space = lp; 710 } 711 size = fs->fs_ncg * sizeof(u_int8_t); 712 fs->fs_contigdirs = (u_int8_t *)space; 713 bzero(fs->fs_contigdirs, size); 714 if ((flags & FFSR_UNSUSPEND) != 0) { 715 MNT_ILOCK(mp); 716 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 717 wakeup(&mp->mnt_flag); 718 MNT_IUNLOCK(mp); 719 } 720 721loop: 722 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 723 /* 724 * Skip syncer vnode. 725 */ 726 if (vp->v_type == VNON) { 727 VI_UNLOCK(vp); 728 continue; 729 } 730 /* 731 * Step 4: invalidate all cached file data. 732 */ 733 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { 734 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 735 goto loop; 736 } 737 if (vinvalbuf(vp, 0, 0, 0)) 738 panic("ffs_reload: dirty2"); 739 /* 740 * Step 5: re-read inode data for all active vnodes. 741 */ 742 ip = VTOI(vp); 743 error = 744 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 745 (int)fs->fs_bsize, NOCRED, &bp); 746 if (error) { 747 VOP_UNLOCK(vp, 0); 748 vrele(vp); 749 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 750 return (error); 751 } 752 ffs_load_inode(bp, ip, fs, ip->i_number); 753 ip->i_effnlink = ip->i_nlink; 754 brelse(bp); 755 VOP_UNLOCK(vp, 0); 756 vrele(vp); 757 } 758 return (0); 759} 760 761/* 762 * Possible superblock locations ordered from most to least likely. 763 */ 764static int sblock_try[] = SBLOCKSEARCH; 765 766/* 767 * Common code for mount and mountroot 768 */ 769static int 770ffs_mountfs(devvp, mp, td) 771 struct vnode *devvp; 772 struct mount *mp; 773 struct thread *td; 774{ 775 struct ufsmount *ump; 776 struct buf *bp; 777 struct fs *fs; 778 struct cdev *dev; 779 void *space; 780 ufs2_daddr_t sblockloc; 781 int error, i, blks, len, ronly; 782 u_long size; 783 int32_t *lp; 784 struct ucred *cred; 785 struct g_consumer *cp; 786 struct mount *nmp; 787 788 bp = NULL; 789 ump = NULL; 790 cred = td ? td->td_ucred : NOCRED; 791 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 792 793 KASSERT(devvp->v_type == VCHR, ("reclaimed devvp")); 794 dev = devvp->v_rdev; 795 if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0, 796 (uintptr_t)mp) == 0) { 797 VOP_UNLOCK(devvp, 0); 798 return (EBUSY); 799 } 800 DROP_GIANT(); 801 g_topology_lock(); 802 error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1); 803 g_topology_unlock(); 804 PICKUP_GIANT(); 805 if (error != 0) { 806 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 807 VOP_UNLOCK(devvp, 0); 808 return (error); 809 } 810 dev_ref(dev); 811 devvp->v_bufobj.bo_ops = &ffs_ops; 812 VOP_UNLOCK(devvp, 0); 813 if (dev->si_iosize_max != 0) 814 mp->mnt_iosize_max = dev->si_iosize_max; 815 if (mp->mnt_iosize_max > MAXPHYS) 816 mp->mnt_iosize_max = MAXPHYS; 817 818 fs = NULL; 819 sblockloc = 0; 820 /* 821 * Try reading the superblock in each of its possible locations. 822 */ 823 for (i = 0; sblock_try[i] != -1; i++) { 824 if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) { 825 error = EINVAL; 826 vfs_mount_error(mp, 827 "Invalid sectorsize %d for superblock size %d", 828 cp->provider->sectorsize, SBLOCKSIZE); 829 goto out; 830 } 831 if ((error = bread(devvp, btodb(sblock_try[i]), SBLOCKSIZE, 832 cred, &bp)) != 0) 833 goto out; 834 fs = (struct fs *)bp->b_data; 835 sblockloc = sblock_try[i]; 836 if ((fs->fs_magic == FS_UFS1_MAGIC || 837 (fs->fs_magic == FS_UFS2_MAGIC && 838 (fs->fs_sblockloc == sblockloc || 839 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0))) && 840 fs->fs_bsize <= MAXBSIZE && 841 fs->fs_bsize >= sizeof(struct fs)) 842 break; 843 brelse(bp); 844 bp = NULL; 845 } 846 if (sblock_try[i] == -1) { 847 error = EINVAL; /* XXX needs translation */ 848 goto out; 849 } 850 fs->fs_fmod = 0; 851 fs->fs_flags &= ~FS_INDEXDIRS; /* no support for directory indicies */ 852 fs->fs_flags &= ~FS_UNCLEAN; 853 if (fs->fs_clean == 0) { 854 fs->fs_flags |= FS_UNCLEAN; 855 if (ronly || (mp->mnt_flag & MNT_FORCE) || 856 ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 && 857 (fs->fs_flags & FS_DOSOFTDEP))) { 858 printf("WARNING: %s was not properly dismounted\n", 859 fs->fs_fsmnt); 860 } else { 861 vfs_mount_error(mp, "R/W mount of %s denied. %s%s", 862 fs->fs_fsmnt, "Filesystem is not clean - run fsck.", 863 (fs->fs_flags & FS_SUJ) == 0 ? "" : 864 " Forced mount will invalidate journal contents"); 865 error = EPERM; 866 goto out; 867 } 868 if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) && 869 (mp->mnt_flag & MNT_FORCE)) { 870 printf("WARNING: %s: lost blocks %jd files %d\n", 871 fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 872 fs->fs_pendinginodes); 873 fs->fs_pendingblocks = 0; 874 fs->fs_pendinginodes = 0; 875 } 876 } 877 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 878 printf("WARNING: %s: mount pending error: blocks %jd " 879 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 880 fs->fs_pendinginodes); 881 fs->fs_pendingblocks = 0; 882 fs->fs_pendinginodes = 0; 883 } 884 if ((fs->fs_flags & FS_GJOURNAL) != 0) { 885#ifdef UFS_GJOURNAL 886 /* 887 * Get journal provider name. 888 */ 889 len = 1024; 890 mp->mnt_gjprovider = malloc((u_long)len, M_UFSMNT, M_WAITOK); 891 if (g_io_getattr("GJOURNAL::provider", cp, &len, 892 mp->mnt_gjprovider) == 0) { 893 mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len, 894 M_UFSMNT, M_WAITOK); 895 MNT_ILOCK(mp); 896 mp->mnt_flag |= MNT_GJOURNAL; 897 MNT_IUNLOCK(mp); 898 } else { 899 printf("WARNING: %s: GJOURNAL flag on fs " 900 "but no gjournal provider below\n", 901 mp->mnt_stat.f_mntonname); 902 free(mp->mnt_gjprovider, M_UFSMNT); 903 mp->mnt_gjprovider = NULL; 904 } 905#else 906 printf("WARNING: %s: GJOURNAL flag on fs but no " 907 "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname); 908#endif 909 } else { 910 mp->mnt_gjprovider = NULL; 911 } 912 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); 913 ump->um_cp = cp; 914 ump->um_bo = &devvp->v_bufobj; 915 ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT, M_WAITOK); 916 if (fs->fs_magic == FS_UFS1_MAGIC) { 917 ump->um_fstype = UFS1; 918 ump->um_balloc = ffs_balloc_ufs1; 919 } else { 920 ump->um_fstype = UFS2; 921 ump->um_balloc = ffs_balloc_ufs2; 922 } 923 ump->um_blkatoff = ffs_blkatoff; 924 ump->um_truncate = ffs_truncate; 925 ump->um_update = ffs_update; 926 ump->um_valloc = ffs_valloc; 927 ump->um_vfree = ffs_vfree; 928 ump->um_ifree = ffs_ifree; 929 ump->um_rdonly = ffs_rdonly; 930 ump->um_snapgone = ffs_snapgone; 931 mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF); 932 bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize); 933 if (fs->fs_sbsize < SBLOCKSIZE) 934 bp->b_flags |= B_INVAL | B_NOCACHE; 935 brelse(bp); 936 bp = NULL; 937 fs = ump->um_fs; 938 ffs_oldfscompat_read(fs, ump, sblockloc); 939 fs->fs_ronly = ronly; 940 size = fs->fs_cssize; 941 blks = howmany(size, fs->fs_fsize); 942 if (fs->fs_contigsumsize > 0) 943 size += fs->fs_ncg * sizeof(int32_t); 944 size += fs->fs_ncg * sizeof(u_int8_t); 945 space = malloc(size, M_UFSMNT, M_WAITOK); 946 fs->fs_csp = space; 947 for (i = 0; i < blks; i += fs->fs_frag) { 948 size = fs->fs_bsize; 949 if (i + fs->fs_frag > blks) 950 size = (blks - i) * fs->fs_fsize; 951 if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 952 cred, &bp)) != 0) { 953 free(fs->fs_csp, M_UFSMNT); 954 goto out; 955 } 956 bcopy(bp->b_data, space, (u_int)size); 957 space = (char *)space + size; 958 brelse(bp); 959 bp = NULL; 960 } 961 if (fs->fs_contigsumsize > 0) { 962 fs->fs_maxcluster = lp = space; 963 for (i = 0; i < fs->fs_ncg; i++) 964 *lp++ = fs->fs_contigsumsize; 965 space = lp; 966 } 967 size = fs->fs_ncg * sizeof(u_int8_t); 968 fs->fs_contigdirs = (u_int8_t *)space; 969 bzero(fs->fs_contigdirs, size); 970 fs->fs_active = NULL; 971 mp->mnt_data = ump; 972 mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0]; 973 mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; 974 nmp = NULL; 975 if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 || 976 (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) { 977 if (nmp) 978 vfs_rel(nmp); 979 vfs_getnewfsid(mp); 980 } 981 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 982 MNT_ILOCK(mp); 983 mp->mnt_flag |= MNT_LOCAL; 984 MNT_IUNLOCK(mp); 985 if ((fs->fs_flags & FS_MULTILABEL) != 0) { 986#ifdef MAC 987 MNT_ILOCK(mp); 988 mp->mnt_flag |= MNT_MULTILABEL; 989 MNT_IUNLOCK(mp); 990#else 991 printf("WARNING: %s: multilabel flag on fs but " 992 "no MAC support\n", mp->mnt_stat.f_mntonname); 993#endif 994 } 995 if ((fs->fs_flags & FS_ACLS) != 0) { 996#ifdef UFS_ACL 997 MNT_ILOCK(mp); 998 999 if (mp->mnt_flag & MNT_NFS4ACLS) 1000 printf("WARNING: %s: ACLs flag on fs conflicts with " 1001 "\"nfsv4acls\" mount option; option ignored\n", 1002 mp->mnt_stat.f_mntonname); 1003 mp->mnt_flag &= ~MNT_NFS4ACLS; 1004 mp->mnt_flag |= MNT_ACLS; 1005 1006 MNT_IUNLOCK(mp); 1007#else 1008 printf("WARNING: %s: ACLs flag on fs but no ACLs support\n", 1009 mp->mnt_stat.f_mntonname); 1010#endif 1011 } 1012 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 1013#ifdef UFS_ACL 1014 MNT_ILOCK(mp); 1015 1016 if (mp->mnt_flag & MNT_ACLS) 1017 printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts " 1018 "with \"acls\" mount option; option ignored\n", 1019 mp->mnt_stat.f_mntonname); 1020 mp->mnt_flag &= ~MNT_ACLS; 1021 mp->mnt_flag |= MNT_NFS4ACLS; 1022 1023 MNT_IUNLOCK(mp); 1024#else 1025 printf("WARNING: %s: NFSv4 ACLs flag on fs but no " 1026 "ACLs support\n", mp->mnt_stat.f_mntonname); 1027#endif 1028 } 1029 if ((fs->fs_flags & FS_TRIM) != 0) { 1030 len = sizeof(int); 1031 if (g_io_getattr("GEOM::candelete", cp, &len, 1032 &ump->um_candelete) == 0) { 1033 if (!ump->um_candelete) 1034 printf("WARNING: %s: TRIM flag on fs but disk " 1035 "does not support TRIM\n", 1036 mp->mnt_stat.f_mntonname); 1037 } else { 1038 printf("WARNING: %s: TRIM flag on fs but disk does " 1039 "not confirm that it supports TRIM\n", 1040 mp->mnt_stat.f_mntonname); 1041 ump->um_candelete = 0; 1042 } 1043 if (ump->um_candelete) { 1044 ump->um_trim_tq = taskqueue_create("trim", M_WAITOK, 1045 taskqueue_thread_enqueue, &ump->um_trim_tq); 1046 taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS, 1047 "%s trim", mp->mnt_stat.f_mntonname); 1048 } 1049 } 1050 1051 ump->um_mountp = mp; 1052 ump->um_dev = dev; 1053 ump->um_devvp = devvp; 1054 ump->um_nindir = fs->fs_nindir; 1055 ump->um_bptrtodb = fs->fs_fsbtodb; 1056 ump->um_seqinc = fs->fs_frag; 1057 for (i = 0; i < MAXQUOTAS; i++) 1058 ump->um_quotas[i] = NULLVP; 1059#ifdef UFS_EXTATTR 1060 ufs_extattr_uepm_init(&ump->um_extattr); 1061#endif 1062 /* 1063 * Set FS local "last mounted on" information (NULL pad) 1064 */ 1065 bzero(fs->fs_fsmnt, MAXMNTLEN); 1066 strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN); 1067 mp->mnt_stat.f_iosize = fs->fs_bsize; 1068 1069 if (mp->mnt_flag & MNT_ROOTFS) { 1070 /* 1071 * Root mount; update timestamp in mount structure. 1072 * this will be used by the common root mount code 1073 * to update the system clock. 1074 */ 1075 mp->mnt_time = fs->fs_time; 1076 } 1077 1078 if (ronly == 0) { 1079 fs->fs_mtime = time_second; 1080 if ((fs->fs_flags & FS_DOSOFTDEP) && 1081 (error = softdep_mount(devvp, mp, fs, cred)) != 0) { 1082 free(fs->fs_csp, M_UFSMNT); 1083 ffs_flushfiles(mp, FORCECLOSE, td); 1084 goto out; 1085 } 1086 if (fs->fs_snapinum[0] != 0) 1087 ffs_snapshot_mount(mp); 1088 fs->fs_fmod = 1; 1089 fs->fs_clean = 0; 1090 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 1091 } 1092 /* 1093 * Initialize filesystem state information in mount struct. 1094 */ 1095 MNT_ILOCK(mp); 1096 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 1097 MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE; 1098 MNT_IUNLOCK(mp); 1099#ifdef UFS_EXTATTR 1100#ifdef UFS_EXTATTR_AUTOSTART 1101 /* 1102 * 1103 * Auto-starting does the following: 1104 * - check for /.attribute in the fs, and extattr_start if so 1105 * - for each file in .attribute, enable that file with 1106 * an attribute of the same name. 1107 * Not clear how to report errors -- probably eat them. 1108 * This would all happen while the filesystem was busy/not 1109 * available, so would effectively be "atomic". 1110 */ 1111 (void) ufs_extattr_autostart(mp, td); 1112#endif /* !UFS_EXTATTR_AUTOSTART */ 1113#endif /* !UFS_EXTATTR */ 1114 return (0); 1115out: 1116 if (bp) 1117 brelse(bp); 1118 if (cp != NULL) { 1119 DROP_GIANT(); 1120 g_topology_lock(); 1121 g_vfs_close(cp); 1122 g_topology_unlock(); 1123 PICKUP_GIANT(); 1124 } 1125 if (ump) { 1126 mtx_destroy(UFS_MTX(ump)); 1127 if (mp->mnt_gjprovider != NULL) { 1128 free(mp->mnt_gjprovider, M_UFSMNT); 1129 mp->mnt_gjprovider = NULL; 1130 } 1131 free(ump->um_fs, M_UFSMNT); 1132 free(ump, M_UFSMNT); 1133 mp->mnt_data = NULL; 1134 } 1135 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 1136 dev_rel(dev); 1137 return (error); 1138} 1139 1140#include <sys/sysctl.h> 1141static int bigcgs = 0; 1142SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, ""); 1143 1144/* 1145 * Sanity checks for loading old filesystem superblocks. 1146 * See ffs_oldfscompat_write below for unwound actions. 1147 * 1148 * XXX - Parts get retired eventually. 1149 * Unfortunately new bits get added. 1150 */ 1151static void 1152ffs_oldfscompat_read(fs, ump, sblockloc) 1153 struct fs *fs; 1154 struct ufsmount *ump; 1155 ufs2_daddr_t sblockloc; 1156{ 1157 off_t maxfilesize; 1158 1159 /* 1160 * If not yet done, update fs_flags location and value of fs_sblockloc. 1161 */ 1162 if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 1163 fs->fs_flags = fs->fs_old_flags; 1164 fs->fs_old_flags |= FS_FLAGS_UPDATED; 1165 fs->fs_sblockloc = sblockloc; 1166 } 1167 /* 1168 * If not yet done, update UFS1 superblock with new wider fields. 1169 */ 1170 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) { 1171 fs->fs_maxbsize = fs->fs_bsize; 1172 fs->fs_time = fs->fs_old_time; 1173 fs->fs_size = fs->fs_old_size; 1174 fs->fs_dsize = fs->fs_old_dsize; 1175 fs->fs_csaddr = fs->fs_old_csaddr; 1176 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir; 1177 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree; 1178 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree; 1179 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree; 1180 } 1181 if (fs->fs_magic == FS_UFS1_MAGIC && 1182 fs->fs_old_inodefmt < FS_44INODEFMT) { 1183 fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1; 1184 fs->fs_qbmask = ~fs->fs_bmask; 1185 fs->fs_qfmask = ~fs->fs_fmask; 1186 } 1187 if (fs->fs_magic == FS_UFS1_MAGIC) { 1188 ump->um_savedmaxfilesize = fs->fs_maxfilesize; 1189 maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1; 1190 if (fs->fs_maxfilesize > maxfilesize) 1191 fs->fs_maxfilesize = maxfilesize; 1192 } 1193 /* Compatibility for old filesystems */ 1194 if (fs->fs_avgfilesize <= 0) 1195 fs->fs_avgfilesize = AVFILESIZ; 1196 if (fs->fs_avgfpdir <= 0) 1197 fs->fs_avgfpdir = AFPDIR; 1198 if (bigcgs) { 1199 fs->fs_save_cgsize = fs->fs_cgsize; 1200 fs->fs_cgsize = fs->fs_bsize; 1201 } 1202} 1203 1204/* 1205 * Unwinding superblock updates for old filesystems. 1206 * See ffs_oldfscompat_read above for details. 1207 * 1208 * XXX - Parts get retired eventually. 1209 * Unfortunately new bits get added. 1210 */ 1211void 1212ffs_oldfscompat_write(fs, ump) 1213 struct fs *fs; 1214 struct ufsmount *ump; 1215{ 1216 1217 /* 1218 * Copy back UFS2 updated fields that UFS1 inspects. 1219 */ 1220 if (fs->fs_magic == FS_UFS1_MAGIC) { 1221 fs->fs_old_time = fs->fs_time; 1222 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir; 1223 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree; 1224 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree; 1225 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree; 1226 fs->fs_maxfilesize = ump->um_savedmaxfilesize; 1227 } 1228 if (bigcgs) { 1229 fs->fs_cgsize = fs->fs_save_cgsize; 1230 fs->fs_save_cgsize = 0; 1231 } 1232} 1233 1234/* 1235 * unmount system call 1236 */ 1237static int 1238ffs_unmount(mp, mntflags) 1239 struct mount *mp; 1240 int mntflags; 1241{ 1242 struct thread *td; 1243 struct ufsmount *ump = VFSTOUFS(mp); 1244 struct fs *fs; 1245 int error, flags, susp; 1246#ifdef UFS_EXTATTR 1247 int e_restart; 1248#endif 1249 1250 flags = 0; 1251 td = curthread; 1252 fs = ump->um_fs; 1253 susp = 0; 1254 if (mntflags & MNT_FORCE) { 1255 flags |= FORCECLOSE; 1256 susp = fs->fs_ronly == 0; 1257 } 1258#ifdef UFS_EXTATTR 1259 if ((error = ufs_extattr_stop(mp, td))) { 1260 if (error != EOPNOTSUPP) 1261 printf("WARNING: unmount %s: ufs_extattr_stop " 1262 "returned errno %d\n", mp->mnt_stat.f_mntonname, 1263 error); 1264 e_restart = 0; 1265 } else { 1266 ufs_extattr_uepm_destroy(&ump->um_extattr); 1267 e_restart = 1; 1268 } 1269#endif 1270 if (susp) { 1271 error = vfs_write_suspend_umnt(mp); 1272 if (error != 0) 1273 goto fail1; 1274 } 1275 if (MOUNTEDSOFTDEP(mp)) 1276 error = softdep_flushfiles(mp, flags, td); 1277 else 1278 error = ffs_flushfiles(mp, flags, td); 1279 if (error != 0 && error != ENXIO) 1280 goto fail; 1281 1282 UFS_LOCK(ump); 1283 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 1284 printf("WARNING: unmount %s: pending error: blocks %jd " 1285 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 1286 fs->fs_pendinginodes); 1287 fs->fs_pendingblocks = 0; 1288 fs->fs_pendinginodes = 0; 1289 } 1290 UFS_UNLOCK(ump); 1291 if (MOUNTEDSOFTDEP(mp)) 1292 softdep_unmount(mp); 1293 if (fs->fs_ronly == 0 || ump->um_fsckpid > 0) { 1294 fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1; 1295 error = ffs_sbupdate(ump, MNT_WAIT, 0); 1296 if (error && error != ENXIO) { 1297 fs->fs_clean = 0; 1298 goto fail; 1299 } 1300 } 1301 if (susp) 1302 vfs_write_resume(mp, VR_START_WRITE); 1303 if (ump->um_trim_tq != NULL) { 1304 while (ump->um_trim_inflight != 0) 1305 pause("ufsutr", hz); 1306 taskqueue_drain_all(ump->um_trim_tq); 1307 taskqueue_free(ump->um_trim_tq); 1308 } 1309 DROP_GIANT(); 1310 g_topology_lock(); 1311 if (ump->um_fsckpid > 0) { 1312 /* 1313 * Return to normal read-only mode. 1314 */ 1315 error = g_access(ump->um_cp, 0, -1, 0); 1316 ump->um_fsckpid = 0; 1317 } 1318 g_vfs_close(ump->um_cp); 1319 g_topology_unlock(); 1320 PICKUP_GIANT(); 1321 atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0); 1322 vrele(ump->um_devvp); 1323 dev_rel(ump->um_dev); 1324 mtx_destroy(UFS_MTX(ump)); 1325 if (mp->mnt_gjprovider != NULL) { 1326 free(mp->mnt_gjprovider, M_UFSMNT); 1327 mp->mnt_gjprovider = NULL; 1328 } 1329 free(fs->fs_csp, M_UFSMNT); 1330 free(fs, M_UFSMNT); 1331 free(ump, M_UFSMNT); 1332 mp->mnt_data = NULL; 1333 MNT_ILOCK(mp); 1334 mp->mnt_flag &= ~MNT_LOCAL; 1335 MNT_IUNLOCK(mp); 1336 return (error); 1337 1338fail: 1339 if (susp) 1340 vfs_write_resume(mp, VR_START_WRITE); 1341fail1: 1342#ifdef UFS_EXTATTR 1343 if (e_restart) { 1344 ufs_extattr_uepm_init(&ump->um_extattr); 1345#ifdef UFS_EXTATTR_AUTOSTART 1346 (void) ufs_extattr_autostart(mp, td); 1347#endif 1348 } 1349#endif 1350 1351 return (error); 1352} 1353 1354/* 1355 * Flush out all the files in a filesystem. 1356 */ 1357int 1358ffs_flushfiles(mp, flags, td) 1359 struct mount *mp; 1360 int flags; 1361 struct thread *td; 1362{ 1363 struct ufsmount *ump; 1364 int qerror, error; 1365 1366 ump = VFSTOUFS(mp); 1367 qerror = 0; 1368#ifdef QUOTA 1369 if (mp->mnt_flag & MNT_QUOTA) { 1370 int i; 1371 error = vflush(mp, 0, SKIPSYSTEM|flags, td); 1372 if (error) 1373 return (error); 1374 for (i = 0; i < MAXQUOTAS; i++) { 1375 error = quotaoff(td, mp, i); 1376 if (error != 0) { 1377 if ((flags & EARLYFLUSH) == 0) 1378 return (error); 1379 else 1380 qerror = error; 1381 } 1382 } 1383 1384 /* 1385 * Here we fall through to vflush again to ensure that 1386 * we have gotten rid of all the system vnodes, unless 1387 * quotas must not be closed. 1388 */ 1389 } 1390#endif 1391 ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_flushfiles"); 1392 if (ump->um_devvp->v_vflag & VV_COPYONWRITE) { 1393 if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0) 1394 return (error); 1395 ffs_snapshot_unmount(mp); 1396 flags |= FORCECLOSE; 1397 /* 1398 * Here we fall through to vflush again to ensure 1399 * that we have gotten rid of all the system vnodes. 1400 */ 1401 } 1402 1403 /* 1404 * Do not close system files if quotas were not closed, to be 1405 * able to sync the remaining dquots. The freeblks softupdate 1406 * workitems might hold a reference on a dquot, preventing 1407 * quotaoff() from completing. Next round of 1408 * softdep_flushworklist() iteration should process the 1409 * blockers, allowing the next run of quotaoff() to finally 1410 * flush held dquots. 1411 * 1412 * Otherwise, flush all the files. 1413 */ 1414 if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0) 1415 return (error); 1416 1417 /* 1418 * Flush filesystem metadata. 1419 */ 1420 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 1421 error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td); 1422 VOP_UNLOCK(ump->um_devvp, 0); 1423 return (error); 1424} 1425 1426/* 1427 * Get filesystem statistics. 1428 */ 1429static int 1430ffs_statfs(mp, sbp) 1431 struct mount *mp; 1432 struct statfs *sbp; 1433{ 1434 struct ufsmount *ump; 1435 struct fs *fs; 1436 1437 ump = VFSTOUFS(mp); 1438 fs = ump->um_fs; 1439 if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC) 1440 panic("ffs_statfs"); 1441 sbp->f_version = STATFS_VERSION; 1442 sbp->f_bsize = fs->fs_fsize; 1443 sbp->f_iosize = fs->fs_bsize; 1444 sbp->f_blocks = fs->fs_dsize; 1445 UFS_LOCK(ump); 1446 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 1447 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks); 1448 sbp->f_bavail = freespace(fs, fs->fs_minfree) + 1449 dbtofsb(fs, fs->fs_pendingblocks); 1450 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO; 1451 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes; 1452 UFS_UNLOCK(ump); 1453 sbp->f_namemax = NAME_MAX; 1454 return (0); 1455} 1456 1457static bool 1458sync_doupdate(struct inode *ip) 1459{ 1460 1461 return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | 1462 IN_UPDATE)) != 0); 1463} 1464 1465/* 1466 * For a lazy sync, we only care about access times, quotas and the 1467 * superblock. Other filesystem changes are already converted to 1468 * cylinder group blocks or inode blocks updates and are written to 1469 * disk by syncer. 1470 */ 1471static int 1472ffs_sync_lazy(mp) 1473 struct mount *mp; 1474{ 1475 struct vnode *mvp, *vp; 1476 struct inode *ip; 1477 struct thread *td; 1478 int allerror, error; 1479 1480 allerror = 0; 1481 td = curthread; 1482 if ((mp->mnt_flag & MNT_NOATIME) != 0) 1483 goto qupdate; 1484 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 1485 if (vp->v_type == VNON) { 1486 VI_UNLOCK(vp); 1487 continue; 1488 } 1489 ip = VTOI(vp); 1490 1491 /* 1492 * The IN_ACCESS flag is converted to IN_MODIFIED by 1493 * ufs_close() and ufs_getattr() by the calls to 1494 * ufs_itimes_locked(), without subsequent UFS_UPDATE(). 1495 * Test also all the other timestamp flags too, to pick up 1496 * any other cases that could be missed. 1497 */ 1498 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) { 1499 VI_UNLOCK(vp); 1500 continue; 1501 } 1502 if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, 1503 td)) != 0) 1504 continue; 1505 if (sync_doupdate(ip)) 1506 error = ffs_update(vp, 0); 1507 if (error != 0) 1508 allerror = error; 1509 vput(vp); 1510 } 1511 1512qupdate: 1513#ifdef QUOTA 1514 qsync(mp); 1515#endif 1516 1517 if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 && 1518 (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0) 1519 allerror = error; 1520 return (allerror); 1521} 1522 1523/* 1524 * Go through the disk queues to initiate sandbagged IO; 1525 * go through the inodes to write those that have been modified; 1526 * initiate the writing of the super block if it has been modified. 1527 * 1528 * Note: we are always called with the filesystem marked busy using 1529 * vfs_busy(). 1530 */ 1531static int 1532ffs_sync(mp, waitfor) 1533 struct mount *mp; 1534 int waitfor; 1535{ 1536 struct vnode *mvp, *vp, *devvp; 1537 struct thread *td; 1538 struct inode *ip; 1539 struct ufsmount *ump = VFSTOUFS(mp); 1540 struct fs *fs; 1541 int error, count, lockreq, allerror = 0; 1542 int suspend; 1543 int suspended; 1544 int secondary_writes; 1545 int secondary_accwrites; 1546 int softdep_deps; 1547 int softdep_accdeps; 1548 struct bufobj *bo; 1549 1550 suspend = 0; 1551 suspended = 0; 1552 td = curthread; 1553 fs = ump->um_fs; 1554 if (fs->fs_fmod != 0 && fs->fs_ronly != 0 && ump->um_fsckpid == 0) 1555 panic("%s: ffs_sync: modification on read-only filesystem", 1556 fs->fs_fsmnt); 1557 if (waitfor == MNT_LAZY) { 1558 if (!rebooting) 1559 return (ffs_sync_lazy(mp)); 1560 waitfor = MNT_NOWAIT; 1561 } 1562 1563 /* 1564 * Write back each (modified) inode. 1565 */ 1566 lockreq = LK_EXCLUSIVE | LK_NOWAIT; 1567 if (waitfor == MNT_SUSPEND) { 1568 suspend = 1; 1569 waitfor = MNT_WAIT; 1570 } 1571 if (waitfor == MNT_WAIT) 1572 lockreq = LK_EXCLUSIVE; 1573 lockreq |= LK_INTERLOCK | LK_SLEEPFAIL; 1574loop: 1575 /* Grab snapshot of secondary write counts */ 1576 MNT_ILOCK(mp); 1577 secondary_writes = mp->mnt_secondary_writes; 1578 secondary_accwrites = mp->mnt_secondary_accwrites; 1579 MNT_IUNLOCK(mp); 1580 1581 /* Grab snapshot of softdep dependency counts */ 1582 softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps); 1583 1584 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1585 /* 1586 * Depend on the vnode interlock to keep things stable enough 1587 * for a quick test. Since there might be hundreds of 1588 * thousands of vnodes, we cannot afford even a subroutine 1589 * call unless there's a good chance that we have work to do. 1590 */ 1591 if (vp->v_type == VNON) { 1592 VI_UNLOCK(vp); 1593 continue; 1594 } 1595 ip = VTOI(vp); 1596 if ((ip->i_flag & 1597 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && 1598 vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1599 VI_UNLOCK(vp); 1600 continue; 1601 } 1602 if ((error = vget(vp, lockreq, td)) != 0) { 1603 if (error == ENOENT || error == ENOLCK) { 1604 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1605 goto loop; 1606 } 1607 continue; 1608 } 1609 if ((error = ffs_syncvnode(vp, waitfor, 0)) != 0) 1610 allerror = error; 1611 vput(vp); 1612 } 1613 /* 1614 * Force stale filesystem control information to be flushed. 1615 */ 1616 if (waitfor == MNT_WAIT || rebooting) { 1617 if ((error = softdep_flushworklist(ump->um_mountp, &count, td))) 1618 allerror = error; 1619 /* Flushed work items may create new vnodes to clean */ 1620 if (allerror == 0 && count) 1621 goto loop; 1622 } 1623#ifdef QUOTA 1624 qsync(mp); 1625#endif 1626 1627 devvp = ump->um_devvp; 1628 bo = &devvp->v_bufobj; 1629 BO_LOCK(bo); 1630 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 1631 BO_UNLOCK(bo); 1632 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1633 error = VOP_FSYNC(devvp, waitfor, td); 1634 VOP_UNLOCK(devvp, 0); 1635 if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN)) 1636 error = ffs_sbupdate(ump, waitfor, 0); 1637 if (error != 0) 1638 allerror = error; 1639 if (allerror == 0 && waitfor == MNT_WAIT) 1640 goto loop; 1641 } else if (suspend != 0) { 1642 if (softdep_check_suspend(mp, 1643 devvp, 1644 softdep_deps, 1645 softdep_accdeps, 1646 secondary_writes, 1647 secondary_accwrites) != 0) { 1648 MNT_IUNLOCK(mp); 1649 goto loop; /* More work needed */ 1650 } 1651 mtx_assert(MNT_MTX(mp), MA_OWNED); 1652 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 1653 MNT_IUNLOCK(mp); 1654 suspended = 1; 1655 } else 1656 BO_UNLOCK(bo); 1657 /* 1658 * Write back modified superblock. 1659 */ 1660 if (fs->fs_fmod != 0 && 1661 (error = ffs_sbupdate(ump, waitfor, suspended)) != 0) 1662 allerror = error; 1663 return (allerror); 1664} 1665 1666int 1667ffs_vget(mp, ino, flags, vpp) 1668 struct mount *mp; 1669 ino_t ino; 1670 int flags; 1671 struct vnode **vpp; 1672{ 1673 return (ffs_vgetf(mp, ino, flags, vpp, 0)); 1674} 1675 1676int 1677ffs_vgetf(mp, ino, flags, vpp, ffs_flags) 1678 struct mount *mp; 1679 ino_t ino; 1680 int flags; 1681 struct vnode **vpp; 1682 int ffs_flags; 1683{ 1684 struct fs *fs; 1685 struct inode *ip; 1686 struct ufsmount *ump; 1687 struct buf *bp; 1688 struct vnode *vp; 1689 struct cdev *dev; 1690 int error; 1691 1692 error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL); 1693 if (error || *vpp != NULL) 1694 return (error); 1695 1696 /* 1697 * We must promote to an exclusive lock for vnode creation. This 1698 * can happen if lookup is passed LOCKSHARED. 1699 */ 1700 if ((flags & LK_TYPE_MASK) == LK_SHARED) { 1701 flags &= ~LK_TYPE_MASK; 1702 flags |= LK_EXCLUSIVE; 1703 } 1704 1705 /* 1706 * We do not lock vnode creation as it is believed to be too 1707 * expensive for such rare case as simultaneous creation of vnode 1708 * for same ino by different processes. We just allow them to race 1709 * and check later to decide who wins. Let the race begin! 1710 */ 1711 1712 ump = VFSTOUFS(mp); 1713 dev = ump->um_dev; 1714 fs = ump->um_fs; 1715 ip = uma_zalloc(uma_inode, M_WAITOK | M_ZERO); 1716 1717 /* Allocate a new vnode/inode. */ 1718 error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ? 1719 &ffs_vnodeops1 : &ffs_vnodeops2, &vp); 1720 if (error) { 1721 *vpp = NULL; 1722 uma_zfree(uma_inode, ip); 1723 return (error); 1724 } 1725 /* 1726 * FFS supports recursive locking. 1727 */ 1728 lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); 1729 VN_LOCK_AREC(vp); 1730 vp->v_data = ip; 1731 vp->v_bufobj.bo_bsize = fs->fs_bsize; 1732 ip->i_vnode = vp; 1733 ip->i_ump = ump; 1734 ip->i_fs = fs; 1735 ip->i_dev = dev; 1736 ip->i_number = ino; 1737 ip->i_ea_refs = 0; 1738 ip->i_nextclustercg = -1; 1739#ifdef QUOTA 1740 { 1741 int i; 1742 for (i = 0; i < MAXQUOTAS; i++) 1743 ip->i_dquot[i] = NODQUOT; 1744 } 1745#endif 1746 1747 if (ffs_flags & FFSV_FORCEINSMQ) 1748 vp->v_vflag |= VV_FORCEINSMQ; 1749 error = insmntque(vp, mp); 1750 if (error != 0) { 1751 uma_zfree(uma_inode, ip); 1752 *vpp = NULL; 1753 return (error); 1754 } 1755 vp->v_vflag &= ~VV_FORCEINSMQ; 1756 error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL); 1757 if (error || *vpp != NULL) 1758 return (error); 1759 1760 /* Read in the disk contents for the inode, copy into the inode. */ 1761 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), 1762 (int)fs->fs_bsize, NOCRED, &bp); 1763 if (error) { 1764 /* 1765 * The inode does not contain anything useful, so it would 1766 * be misleading to leave it on its hash chain. With mode 1767 * still zero, it will be unlinked and returned to the free 1768 * list by vput(). 1769 */ 1770 brelse(bp); 1771 vput(vp); 1772 *vpp = NULL; 1773 return (error); 1774 } 1775 if (ip->i_ump->um_fstype == UFS1) 1776 ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK); 1777 else 1778 ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK); 1779 ffs_load_inode(bp, ip, fs, ino); 1780 if (DOINGSOFTDEP(vp)) 1781 softdep_load_inodeblock(ip); 1782 else 1783 ip->i_effnlink = ip->i_nlink; 1784 bqrelse(bp); 1785 1786 /* 1787 * Initialize the vnode from the inode, check for aliases. 1788 * Note that the underlying vnode may have changed. 1789 */ 1790 if (ip->i_ump->um_fstype == UFS1) 1791 error = ufs_vinit(mp, &ffs_fifoops1, &vp); 1792 else 1793 error = ufs_vinit(mp, &ffs_fifoops2, &vp); 1794 if (error) { 1795 vput(vp); 1796 *vpp = NULL; 1797 return (error); 1798 } 1799 1800 /* 1801 * Finish inode initialization. 1802 */ 1803 if (vp->v_type != VFIFO) { 1804 /* FFS supports shared locking for all files except fifos. */ 1805 VN_LOCK_ASHARE(vp); 1806 } 1807 1808 /* 1809 * Set up a generation number for this inode if it does not 1810 * already have one. This should only happen on old filesystems. 1811 */ 1812 if (ip->i_gen == 0) { 1813 ip->i_gen = arc4random() / 2 + 1; 1814 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 1815 ip->i_flag |= IN_MODIFIED; 1816 DIP_SET(ip, i_gen, ip->i_gen); 1817 } 1818 } 1819#ifdef MAC 1820 if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) { 1821 /* 1822 * If this vnode is already allocated, and we're running 1823 * multi-label, attempt to perform a label association 1824 * from the extended attributes on the inode. 1825 */ 1826 error = mac_vnode_associate_extattr(mp, vp); 1827 if (error) { 1828 /* ufs_inactive will release ip->i_devvp ref. */ 1829 vput(vp); 1830 *vpp = NULL; 1831 return (error); 1832 } 1833 } 1834#endif 1835 1836 *vpp = vp; 1837 return (0); 1838} 1839 1840/* 1841 * File handle to vnode 1842 * 1843 * Have to be really careful about stale file handles: 1844 * - check that the inode number is valid 1845 * - call ffs_vget() to get the locked inode 1846 * - check for an unallocated inode (i_mode == 0) 1847 * - check that the given client host has export rights and return 1848 * those rights via. exflagsp and credanonp 1849 */ 1850static int 1851ffs_fhtovp(mp, fhp, flags, vpp) 1852 struct mount *mp; 1853 struct fid *fhp; 1854 int flags; 1855 struct vnode **vpp; 1856{ 1857 struct ufid *ufhp; 1858 struct fs *fs; 1859 1860 ufhp = (struct ufid *)fhp; 1861 fs = VFSTOUFS(mp)->um_fs; 1862 if (ufhp->ufid_ino < ROOTINO || 1863 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg) 1864 return (ESTALE); 1865 return (ufs_fhtovp(mp, ufhp, flags, vpp)); 1866} 1867 1868/* 1869 * Initialize the filesystem. 1870 */ 1871static int 1872ffs_init(vfsp) 1873 struct vfsconf *vfsp; 1874{ 1875 1876 ffs_susp_initialize(); 1877 softdep_initialize(); 1878 return (ufs_init(vfsp)); 1879} 1880 1881/* 1882 * Undo the work of ffs_init(). 1883 */ 1884static int 1885ffs_uninit(vfsp) 1886 struct vfsconf *vfsp; 1887{ 1888 int ret; 1889 1890 ret = ufs_uninit(vfsp); 1891 softdep_uninitialize(); 1892 ffs_susp_uninitialize(); 1893 return (ret); 1894} 1895 1896/* 1897 * Write a superblock and associated information back to disk. 1898 */ 1899int 1900ffs_sbupdate(ump, waitfor, suspended) 1901 struct ufsmount *ump; 1902 int waitfor; 1903 int suspended; 1904{ 1905 struct fs *fs = ump->um_fs; 1906 struct buf *sbbp; 1907 struct buf *bp; 1908 int blks; 1909 void *space; 1910 int i, size, error, allerror = 0; 1911 1912 if (fs->fs_ronly == 1 && 1913 (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) != 1914 (MNT_RDONLY | MNT_UPDATE) && ump->um_fsckpid == 0) 1915 panic("ffs_sbupdate: write read-only filesystem"); 1916 /* 1917 * We use the superblock's buf to serialize calls to ffs_sbupdate(). 1918 */ 1919 sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 1920 (int)fs->fs_sbsize, 0, 0, 0); 1921 /* 1922 * First write back the summary information. 1923 */ 1924 blks = howmany(fs->fs_cssize, fs->fs_fsize); 1925 space = fs->fs_csp; 1926 for (i = 0; i < blks; i += fs->fs_frag) { 1927 size = fs->fs_bsize; 1928 if (i + fs->fs_frag > blks) 1929 size = (blks - i) * fs->fs_fsize; 1930 bp = getblk(ump->um_devvp, fsbtodb(fs, fs->fs_csaddr + i), 1931 size, 0, 0, 0); 1932 bcopy(space, bp->b_data, (u_int)size); 1933 space = (char *)space + size; 1934 if (suspended) 1935 bp->b_flags |= B_VALIDSUSPWRT; 1936 if (waitfor != MNT_WAIT) 1937 bawrite(bp); 1938 else if ((error = bwrite(bp)) != 0) 1939 allerror = error; 1940 } 1941 /* 1942 * Now write back the superblock itself. If any errors occurred 1943 * up to this point, then fail so that the superblock avoids 1944 * being written out as clean. 1945 */ 1946 if (allerror) { 1947 brelse(sbbp); 1948 return (allerror); 1949 } 1950 bp = sbbp; 1951 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 && 1952 (fs->fs_flags & FS_FLAGS_UPDATED) == 0) { 1953 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 1954 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1); 1955 fs->fs_sblockloc = SBLOCK_UFS1; 1956 } 1957 if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 && 1958 (fs->fs_flags & FS_FLAGS_UPDATED) == 0) { 1959 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 1960 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2); 1961 fs->fs_sblockloc = SBLOCK_UFS2; 1962 } 1963 fs->fs_fmod = 0; 1964 fs->fs_time = time_second; 1965 if (MOUNTEDSOFTDEP(ump->um_mountp)) 1966 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); 1967 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 1968 ffs_oldfscompat_write((struct fs *)bp->b_data, ump); 1969 if (suspended) 1970 bp->b_flags |= B_VALIDSUSPWRT; 1971 if (waitfor != MNT_WAIT) 1972 bawrite(bp); 1973 else if ((error = bwrite(bp)) != 0) 1974 allerror = error; 1975 return (allerror); 1976} 1977 1978static int 1979ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 1980 int attrnamespace, const char *attrname) 1981{ 1982 1983#ifdef UFS_EXTATTR 1984 return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace, 1985 attrname)); 1986#else 1987 return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, 1988 attrname)); 1989#endif 1990} 1991 1992static void 1993ffs_ifree(struct ufsmount *ump, struct inode *ip) 1994{ 1995 1996 if (ump->um_fstype == UFS1 && ip->i_din1 != NULL) 1997 uma_zfree(uma_ufs1, ip->i_din1); 1998 else if (ip->i_din2 != NULL) 1999 uma_zfree(uma_ufs2, ip->i_din2); 2000 uma_zfree(uma_inode, ip); 2001} 2002 2003static int dobkgrdwrite = 1; 2004SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0, 2005 "Do background writes (honoring the BV_BKGRDWRITE flag)?"); 2006 2007/* 2008 * Complete a background write started from bwrite. 2009 */ 2010static void 2011ffs_backgroundwritedone(struct buf *bp) 2012{ 2013 struct bufobj *bufobj; 2014 struct buf *origbp; 2015 2016 /* 2017 * Find the original buffer that we are writing. 2018 */ 2019 bufobj = bp->b_bufobj; 2020 BO_LOCK(bufobj); 2021 if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL) 2022 panic("backgroundwritedone: lost buffer"); 2023 2024 /* 2025 * We should mark the cylinder group buffer origbp as 2026 * dirty, to not loose the failed write. 2027 */ 2028 if ((bp->b_ioflags & BIO_ERROR) != 0) 2029 origbp->b_vflags |= BV_BKGRDERR; 2030 BO_UNLOCK(bufobj); 2031 /* 2032 * Process dependencies then return any unfinished ones. 2033 */ 2034 pbrelvp(bp); 2035 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0) 2036 buf_complete(bp); 2037#ifdef SOFTUPDATES 2038 if (!LIST_EMPTY(&bp->b_dep)) 2039 softdep_move_dependencies(bp, origbp); 2040#endif 2041 /* 2042 * This buffer is marked B_NOCACHE so when it is released 2043 * by biodone it will be tossed. 2044 */ 2045 bp->b_flags |= B_NOCACHE; 2046 bp->b_flags &= ~B_CACHE; 2047 2048 /* 2049 * Prevent brelse() from trying to keep and re-dirtying bp on 2050 * errors. It causes b_bufobj dereference in 2051 * bdirty()/reassignbuf(), and b_bufobj was cleared in 2052 * pbrelvp() above. 2053 */ 2054 if ((bp->b_ioflags & BIO_ERROR) != 0) 2055 bp->b_flags |= B_INVAL; 2056 bufdone(bp); 2057 BO_LOCK(bufobj); 2058 /* 2059 * Clear the BV_BKGRDINPROG flag in the original buffer 2060 * and awaken it if it is waiting for the write to complete. 2061 * If BV_BKGRDINPROG is not set in the original buffer it must 2062 * have been released and re-instantiated - which is not legal. 2063 */ 2064 KASSERT((origbp->b_vflags & BV_BKGRDINPROG), 2065 ("backgroundwritedone: lost buffer2")); 2066 origbp->b_vflags &= ~BV_BKGRDINPROG; 2067 if (origbp->b_vflags & BV_BKGRDWAIT) { 2068 origbp->b_vflags &= ~BV_BKGRDWAIT; 2069 wakeup(&origbp->b_xflags); 2070 } 2071 BO_UNLOCK(bufobj); 2072} 2073 2074 2075/* 2076 * Write, release buffer on completion. (Done by iodone 2077 * if async). Do not bother writing anything if the buffer 2078 * is invalid. 2079 * 2080 * Note that we set B_CACHE here, indicating that buffer is 2081 * fully valid and thus cacheable. This is true even of NFS 2082 * now so we set it generally. This could be set either here 2083 * or in biodone() since the I/O is synchronous. We put it 2084 * here. 2085 */ 2086static int 2087ffs_bufwrite(struct buf *bp) 2088{ 2089 struct buf *newbp; 2090 2091 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2092 if (bp->b_flags & B_INVAL) { 2093 brelse(bp); 2094 return (0); 2095 } 2096 2097 if (!BUF_ISLOCKED(bp)) 2098 panic("bufwrite: buffer is not busy???"); 2099 /* 2100 * If a background write is already in progress, delay 2101 * writing this block if it is asynchronous. Otherwise 2102 * wait for the background write to complete. 2103 */ 2104 BO_LOCK(bp->b_bufobj); 2105 if (bp->b_vflags & BV_BKGRDINPROG) { 2106 if (bp->b_flags & B_ASYNC) { 2107 BO_UNLOCK(bp->b_bufobj); 2108 bdwrite(bp); 2109 return (0); 2110 } 2111 bp->b_vflags |= BV_BKGRDWAIT; 2112 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO, 2113 "bwrbg", 0); 2114 if (bp->b_vflags & BV_BKGRDINPROG) 2115 panic("bufwrite: still writing"); 2116 } 2117 bp->b_vflags &= ~BV_BKGRDERR; 2118 BO_UNLOCK(bp->b_bufobj); 2119 2120 /* 2121 * If this buffer is marked for background writing and we 2122 * do not have to wait for it, make a copy and write the 2123 * copy so as to leave this buffer ready for further use. 2124 * 2125 * This optimization eats a lot of memory. If we have a page 2126 * or buffer shortfall we can't do it. 2127 */ 2128 if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && 2129 (bp->b_flags & B_ASYNC) && 2130 !vm_page_count_severe() && 2131 !buf_dirty_count_severe()) { 2132 KASSERT(bp->b_iodone == NULL, 2133 ("bufwrite: needs chained iodone (%p)", bp->b_iodone)); 2134 2135 /* get a new block */ 2136 newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD); 2137 if (newbp == NULL) 2138 goto normal_write; 2139 2140 KASSERT((bp->b_flags & B_UNMAPPED) == 0, ("Unmapped cg")); 2141 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); 2142 BO_LOCK(bp->b_bufobj); 2143 bp->b_vflags |= BV_BKGRDINPROG; 2144 BO_UNLOCK(bp->b_bufobj); 2145 newbp->b_xflags |= BX_BKGRDMARKER; 2146 newbp->b_lblkno = bp->b_lblkno; 2147 newbp->b_blkno = bp->b_blkno; 2148 newbp->b_offset = bp->b_offset; 2149 newbp->b_iodone = ffs_backgroundwritedone; 2150 newbp->b_flags |= B_ASYNC; 2151 newbp->b_flags &= ~B_INVAL; 2152 pbgetvp(bp->b_vp, newbp); 2153 2154#ifdef SOFTUPDATES 2155 /* 2156 * Move over the dependencies. If there are rollbacks, 2157 * leave the parent buffer dirtied as it will need to 2158 * be written again. 2159 */ 2160 if (LIST_EMPTY(&bp->b_dep) || 2161 softdep_move_dependencies(bp, newbp) == 0) 2162 bundirty(bp); 2163#else 2164 bundirty(bp); 2165#endif 2166 2167 /* 2168 * Initiate write on the copy, release the original. The 2169 * BKGRDINPROG flag prevents it from going away until 2170 * the background write completes. 2171 */ 2172 bqrelse(bp); 2173 bp = newbp; 2174 } else 2175 /* Mark the buffer clean */ 2176 bundirty(bp); 2177 2178 2179 /* Let the normal bufwrite do the rest for us */ 2180normal_write: 2181 return (bufwrite(bp)); 2182} 2183 2184 2185static void 2186ffs_geom_strategy(struct bufobj *bo, struct buf *bp) 2187{ 2188 struct vnode *vp; 2189 int error; 2190 struct buf *tbp; 2191 int nocopy; 2192 2193 vp = bo->__bo_vnode; 2194 if (bp->b_iocmd == BIO_WRITE) { 2195 if ((bp->b_flags & B_VALIDSUSPWRT) == 0 && 2196 bp->b_vp != NULL && bp->b_vp->v_mount != NULL && 2197 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0) 2198 panic("ffs_geom_strategy: bad I/O"); 2199 nocopy = bp->b_flags & B_NOCOPY; 2200 bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY); 2201 if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 && 2202 vp->v_rdev->si_snapdata != NULL) { 2203 if ((bp->b_flags & B_CLUSTER) != 0) { 2204 runningbufwakeup(bp); 2205 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2206 b_cluster.cluster_entry) { 2207 error = ffs_copyonwrite(vp, tbp); 2208 if (error != 0 && 2209 error != EOPNOTSUPP) { 2210 bp->b_error = error; 2211 bp->b_ioflags |= BIO_ERROR; 2212 bufdone(bp); 2213 return; 2214 } 2215 } 2216 bp->b_runningbufspace = bp->b_bufsize; 2217 atomic_add_long(&runningbufspace, 2218 bp->b_runningbufspace); 2219 } else { 2220 error = ffs_copyonwrite(vp, bp); 2221 if (error != 0 && error != EOPNOTSUPP) { 2222 bp->b_error = error; 2223 bp->b_ioflags |= BIO_ERROR; 2224 bufdone(bp); 2225 return; 2226 } 2227 } 2228 } 2229#ifdef SOFTUPDATES 2230 if ((bp->b_flags & B_CLUSTER) != 0) { 2231 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2232 b_cluster.cluster_entry) { 2233 if (!LIST_EMPTY(&tbp->b_dep)) 2234 buf_start(tbp); 2235 } 2236 } else { 2237 if (!LIST_EMPTY(&bp->b_dep)) 2238 buf_start(bp); 2239 } 2240 2241#endif 2242 } 2243 g_vfs_strategy(bo, bp); 2244} 2245 2246int 2247ffs_own_mount(const struct mount *mp) 2248{ 2249 2250 if (mp->mnt_op == &ufs_vfsops) 2251 return (1); 2252 return (0); 2253} 2254 2255#ifdef DDB 2256#ifdef SOFTUPDATES 2257 2258/* defined in ffs_softdep.c */ 2259extern void db_print_ffs(struct ufsmount *ump); 2260 2261DB_SHOW_COMMAND(ffs, db_show_ffs) 2262{ 2263 struct mount *mp; 2264 struct ufsmount *ump; 2265 2266 if (have_addr) { 2267 ump = VFSTOUFS((struct mount *)addr); 2268 db_print_ffs(ump); 2269 return; 2270 } 2271 2272 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 2273 if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name)) 2274 db_print_ffs(VFSTOUFS(mp)); 2275 } 2276} 2277 2278#endif /* SOFTUPDATES */ 2279#endif /* DDB */ 2280