ffs_vfsops.c revision 284021
1/*-
2 * Copyright (c) 1989, 1991, 1993, 1994
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	@(#)ffs_vfsops.c	8.31 (Berkeley) 5/20/95
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/10/sys/ufs/ffs/ffs_vfsops.c 284021 2015-06-05 08:36:25Z kib $");
34
35#include "opt_quota.h"
36#include "opt_ufs.h"
37#include "opt_ffs.h"
38#include "opt_ddb.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/namei.h>
43#include <sys/priv.h>
44#include <sys/proc.h>
45#include <sys/kernel.h>
46#include <sys/vnode.h>
47#include <sys/mount.h>
48#include <sys/bio.h>
49#include <sys/buf.h>
50#include <sys/conf.h>
51#include <sys/fcntl.h>
52#include <sys/ioccom.h>
53#include <sys/malloc.h>
54#include <sys/mutex.h>
55#include <sys/rwlock.h>
56
57#include <security/mac/mac_framework.h>
58
59#include <ufs/ufs/extattr.h>
60#include <ufs/ufs/gjournal.h>
61#include <ufs/ufs/quota.h>
62#include <ufs/ufs/ufsmount.h>
63#include <ufs/ufs/inode.h>
64#include <ufs/ufs/ufs_extern.h>
65
66#include <ufs/ffs/fs.h>
67#include <ufs/ffs/ffs_extern.h>
68
69#include <vm/vm.h>
70#include <vm/uma.h>
71#include <vm/vm_page.h>
72
73#include <geom/geom.h>
74#include <geom/geom_vfs.h>
75
76#include <ddb/ddb.h>
77
78static uma_zone_t uma_inode, uma_ufs1, uma_ufs2;
79
80static int	ffs_mountfs(struct vnode *, struct mount *, struct thread *);
81static void	ffs_oldfscompat_read(struct fs *, struct ufsmount *,
82		    ufs2_daddr_t);
83static void	ffs_ifree(struct ufsmount *ump, struct inode *ip);
84static int	ffs_sync_lazy(struct mount *mp);
85
86static vfs_init_t ffs_init;
87static vfs_uninit_t ffs_uninit;
88static vfs_extattrctl_t ffs_extattrctl;
89static vfs_cmount_t ffs_cmount;
90static vfs_unmount_t ffs_unmount;
91static vfs_mount_t ffs_mount;
92static vfs_statfs_t ffs_statfs;
93static vfs_fhtovp_t ffs_fhtovp;
94static vfs_sync_t ffs_sync;
95
96static struct vfsops ufs_vfsops = {
97	.vfs_extattrctl =	ffs_extattrctl,
98	.vfs_fhtovp =		ffs_fhtovp,
99	.vfs_init =		ffs_init,
100	.vfs_mount =		ffs_mount,
101	.vfs_cmount =		ffs_cmount,
102	.vfs_quotactl =		ufs_quotactl,
103	.vfs_root =		ufs_root,
104	.vfs_statfs =		ffs_statfs,
105	.vfs_sync =		ffs_sync,
106	.vfs_uninit =		ffs_uninit,
107	.vfs_unmount =		ffs_unmount,
108	.vfs_vget =		ffs_vget,
109	.vfs_susp_clean =	process_deferred_inactive,
110};
111
112VFS_SET(ufs_vfsops, ufs, 0);
113MODULE_VERSION(ufs, 1);
114
115static b_strategy_t ffs_geom_strategy;
116static b_write_t ffs_bufwrite;
117
118static struct buf_ops ffs_ops = {
119	.bop_name =	"FFS",
120	.bop_write =	ffs_bufwrite,
121	.bop_strategy =	ffs_geom_strategy,
122	.bop_sync =	bufsync,
123#ifdef NO_FFS_SNAPSHOT
124	.bop_bdflush =	bufbdflush,
125#else
126	.bop_bdflush =	ffs_bdflush,
127#endif
128};
129
130/*
131 * Note that userquota and groupquota options are not currently used
132 * by UFS/FFS code and generally mount(8) does not pass those options
133 * from userland, but they can be passed by loader(8) via
134 * vfs.root.mountfrom.options.
135 */
136static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr",
137    "noclusterw", "noexec", "export", "force", "from", "groupquota",
138    "multilabel", "nfsv4acls", "fsckpid", "snapshot", "nosuid", "suiddir",
139    "nosymfollow", "sync", "union", "userquota", NULL };
140
141static int
142ffs_mount(struct mount *mp)
143{
144	struct vnode *devvp;
145	struct thread *td;
146	struct ufsmount *ump = NULL;
147	struct fs *fs;
148	pid_t fsckpid = 0;
149	int error, flags;
150	uint64_t mntorflags;
151	accmode_t accmode;
152	struct nameidata ndp;
153	char *fspec;
154
155	td = curthread;
156	if (vfs_filteropt(mp->mnt_optnew, ffs_opts))
157		return (EINVAL);
158	if (uma_inode == NULL) {
159		uma_inode = uma_zcreate("FFS inode",
160		    sizeof(struct inode), NULL, NULL, NULL, NULL,
161		    UMA_ALIGN_PTR, 0);
162		uma_ufs1 = uma_zcreate("FFS1 dinode",
163		    sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL,
164		    UMA_ALIGN_PTR, 0);
165		uma_ufs2 = uma_zcreate("FFS2 dinode",
166		    sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL,
167		    UMA_ALIGN_PTR, 0);
168	}
169
170	vfs_deleteopt(mp->mnt_optnew, "groupquota");
171	vfs_deleteopt(mp->mnt_optnew, "userquota");
172
173	fspec = vfs_getopts(mp->mnt_optnew, "from", &error);
174	if (error)
175		return (error);
176
177	mntorflags = 0;
178	if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0)
179		mntorflags |= MNT_ACLS;
180
181	if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) {
182		mntorflags |= MNT_SNAPSHOT;
183		/*
184		 * Once we have set the MNT_SNAPSHOT flag, do not
185		 * persist "snapshot" in the options list.
186		 */
187		vfs_deleteopt(mp->mnt_optnew, "snapshot");
188		vfs_deleteopt(mp->mnt_opt, "snapshot");
189	}
190
191	if (vfs_getopt(mp->mnt_optnew, "fsckpid", NULL, NULL) == 0 &&
192	    vfs_scanopt(mp->mnt_optnew, "fsckpid", "%d", &fsckpid) == 1) {
193		/*
194		 * Once we have set the restricted PID, do not
195		 * persist "fsckpid" in the options list.
196		 */
197		vfs_deleteopt(mp->mnt_optnew, "fsckpid");
198		vfs_deleteopt(mp->mnt_opt, "fsckpid");
199		if (mp->mnt_flag & MNT_UPDATE) {
200			if (VFSTOUFS(mp)->um_fs->fs_ronly == 0 &&
201			     vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) {
202				vfs_mount_error(mp,
203				    "Checker enable: Must be read-only");
204				return (EINVAL);
205			}
206		} else if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) {
207			vfs_mount_error(mp,
208			    "Checker enable: Must be read-only");
209			return (EINVAL);
210		}
211		/* Set to -1 if we are done */
212		if (fsckpid == 0)
213			fsckpid = -1;
214	}
215
216	if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) {
217		if (mntorflags & MNT_ACLS) {
218			vfs_mount_error(mp,
219			    "\"acls\" and \"nfsv4acls\" options "
220			    "are mutually exclusive");
221			return (EINVAL);
222		}
223		mntorflags |= MNT_NFS4ACLS;
224	}
225
226	MNT_ILOCK(mp);
227	mp->mnt_flag |= mntorflags;
228	MNT_IUNLOCK(mp);
229	/*
230	 * If updating, check whether changing from read-only to
231	 * read/write; if there is no device name, that's all we do.
232	 */
233	if (mp->mnt_flag & MNT_UPDATE) {
234		ump = VFSTOUFS(mp);
235		fs = ump->um_fs;
236		devvp = ump->um_devvp;
237		if (fsckpid == -1 && ump->um_fsckpid > 0) {
238			if ((error = ffs_flushfiles(mp, WRITECLOSE, td)) != 0 ||
239			    (error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0)
240				return (error);
241			DROP_GIANT();
242			g_topology_lock();
243			/*
244			 * Return to normal read-only mode.
245			 */
246			error = g_access(ump->um_cp, 0, -1, 0);
247			g_topology_unlock();
248			PICKUP_GIANT();
249			ump->um_fsckpid = 0;
250		}
251		if (fs->fs_ronly == 0 &&
252		    vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
253			/*
254			 * Flush any dirty data and suspend filesystem.
255			 */
256			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
257				return (error);
258			error = vfs_write_suspend_umnt(mp);
259			if (error != 0)
260				return (error);
261			/*
262			 * Check for and optionally get rid of files open
263			 * for writing.
264			 */
265			flags = WRITECLOSE;
266			if (mp->mnt_flag & MNT_FORCE)
267				flags |= FORCECLOSE;
268			if (MOUNTEDSOFTDEP(mp)) {
269				error = softdep_flushfiles(mp, flags, td);
270			} else {
271				error = ffs_flushfiles(mp, flags, td);
272			}
273			if (error) {
274				vfs_write_resume(mp, 0);
275				return (error);
276			}
277			if (fs->fs_pendingblocks != 0 ||
278			    fs->fs_pendinginodes != 0) {
279				printf("WARNING: %s Update error: blocks %jd "
280				    "files %d\n", fs->fs_fsmnt,
281				    (intmax_t)fs->fs_pendingblocks,
282				    fs->fs_pendinginodes);
283				fs->fs_pendingblocks = 0;
284				fs->fs_pendinginodes = 0;
285			}
286			if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
287				fs->fs_clean = 1;
288			if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
289				fs->fs_ronly = 0;
290				fs->fs_clean = 0;
291				vfs_write_resume(mp, 0);
292				return (error);
293			}
294			if (MOUNTEDSOFTDEP(mp))
295				softdep_unmount(mp);
296			DROP_GIANT();
297			g_topology_lock();
298			/*
299			 * Drop our write and exclusive access.
300			 */
301			g_access(ump->um_cp, 0, -1, -1);
302			g_topology_unlock();
303			PICKUP_GIANT();
304			fs->fs_ronly = 1;
305			MNT_ILOCK(mp);
306			mp->mnt_flag |= MNT_RDONLY;
307			MNT_IUNLOCK(mp);
308			/*
309			 * Allow the writers to note that filesystem
310			 * is ro now.
311			 */
312			vfs_write_resume(mp, 0);
313		}
314		if ((mp->mnt_flag & MNT_RELOAD) &&
315		    (error = ffs_reload(mp, td, 0)) != 0)
316			return (error);
317		if (fs->fs_ronly &&
318		    !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
319			/*
320			 * If we are running a checker, do not allow upgrade.
321			 */
322			if (ump->um_fsckpid > 0) {
323				vfs_mount_error(mp,
324				    "Active checker, cannot upgrade to write");
325				return (EINVAL);
326			}
327			/*
328			 * If upgrade to read-write by non-root, then verify
329			 * that user has necessary permissions on the device.
330			 */
331			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
332			error = VOP_ACCESS(devvp, VREAD | VWRITE,
333			    td->td_ucred, td);
334			if (error)
335				error = priv_check(td, PRIV_VFS_MOUNT_PERM);
336			if (error) {
337				VOP_UNLOCK(devvp, 0);
338				return (error);
339			}
340			VOP_UNLOCK(devvp, 0);
341			fs->fs_flags &= ~FS_UNCLEAN;
342			if (fs->fs_clean == 0) {
343				fs->fs_flags |= FS_UNCLEAN;
344				if ((mp->mnt_flag & MNT_FORCE) ||
345				    ((fs->fs_flags &
346				     (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
347				     (fs->fs_flags & FS_DOSOFTDEP))) {
348					printf("WARNING: %s was not properly "
349					   "dismounted\n", fs->fs_fsmnt);
350				} else {
351					vfs_mount_error(mp,
352					   "R/W mount of %s denied. %s.%s",
353					   fs->fs_fsmnt,
354					   "Filesystem is not clean - run fsck",
355					   (fs->fs_flags & FS_SUJ) == 0 ? "" :
356					   " Forced mount will invalidate"
357					   " journal contents");
358					return (EPERM);
359				}
360			}
361			DROP_GIANT();
362			g_topology_lock();
363			/*
364			 * Request exclusive write access.
365			 */
366			error = g_access(ump->um_cp, 0, 1, 1);
367			g_topology_unlock();
368			PICKUP_GIANT();
369			if (error)
370				return (error);
371			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
372				return (error);
373			fs->fs_ronly = 0;
374			MNT_ILOCK(mp);
375			mp->mnt_flag &= ~MNT_RDONLY;
376			MNT_IUNLOCK(mp);
377			fs->fs_mtime = time_second;
378			/* check to see if we need to start softdep */
379			if ((fs->fs_flags & FS_DOSOFTDEP) &&
380			    (error = softdep_mount(devvp, mp, fs, td->td_ucred))){
381				vn_finished_write(mp);
382				return (error);
383			}
384			fs->fs_clean = 0;
385			if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
386				vn_finished_write(mp);
387				return (error);
388			}
389			if (fs->fs_snapinum[0] != 0)
390				ffs_snapshot_mount(mp);
391			vn_finished_write(mp);
392		}
393		/*
394		 * Soft updates is incompatible with "async",
395		 * so if we are doing softupdates stop the user
396		 * from setting the async flag in an update.
397		 * Softdep_mount() clears it in an initial mount
398		 * or ro->rw remount.
399		 */
400		if (MOUNTEDSOFTDEP(mp)) {
401			/* XXX: Reset too late ? */
402			MNT_ILOCK(mp);
403			mp->mnt_flag &= ~MNT_ASYNC;
404			MNT_IUNLOCK(mp);
405		}
406		/*
407		 * Keep MNT_ACLS flag if it is stored in superblock.
408		 */
409		if ((fs->fs_flags & FS_ACLS) != 0) {
410			/* XXX: Set too late ? */
411			MNT_ILOCK(mp);
412			mp->mnt_flag |= MNT_ACLS;
413			MNT_IUNLOCK(mp);
414		}
415
416		if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
417			/* XXX: Set too late ? */
418			MNT_ILOCK(mp);
419			mp->mnt_flag |= MNT_NFS4ACLS;
420			MNT_IUNLOCK(mp);
421		}
422		/*
423		 * If this is a request from fsck to clean up the filesystem,
424		 * then allow the specified pid to proceed.
425		 */
426		if (fsckpid > 0) {
427			if (ump->um_fsckpid != 0) {
428				vfs_mount_error(mp,
429				    "Active checker already running on %s",
430				    fs->fs_fsmnt);
431				return (EINVAL);
432			}
433			KASSERT(MOUNTEDSOFTDEP(mp) == 0,
434			    ("soft updates enabled on read-only file system"));
435			DROP_GIANT();
436			g_topology_lock();
437			/*
438			 * Request write access.
439			 */
440			error = g_access(ump->um_cp, 0, 1, 0);
441			g_topology_unlock();
442			PICKUP_GIANT();
443			if (error) {
444				vfs_mount_error(mp,
445				    "Checker activation failed on %s",
446				    fs->fs_fsmnt);
447				return (error);
448			}
449			ump->um_fsckpid = fsckpid;
450			if (fs->fs_snapinum[0] != 0)
451				ffs_snapshot_mount(mp);
452			fs->fs_mtime = time_second;
453			fs->fs_fmod = 1;
454			fs->fs_clean = 0;
455			(void) ffs_sbupdate(ump, MNT_WAIT, 0);
456		}
457
458		/*
459		 * If this is a snapshot request, take the snapshot.
460		 */
461		if (mp->mnt_flag & MNT_SNAPSHOT)
462			return (ffs_snapshot(mp, fspec));
463	}
464
465	/*
466	 * Not an update, or updating the name: look up the name
467	 * and verify that it refers to a sensible disk device.
468	 */
469	NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td);
470	if ((error = namei(&ndp)) != 0)
471		return (error);
472	NDFREE(&ndp, NDF_ONLY_PNBUF);
473	devvp = ndp.ni_vp;
474	if (!vn_isdisk(devvp, &error)) {
475		vput(devvp);
476		return (error);
477	}
478
479	/*
480	 * If mount by non-root, then verify that user has necessary
481	 * permissions on the device.
482	 */
483	accmode = VREAD;
484	if ((mp->mnt_flag & MNT_RDONLY) == 0)
485		accmode |= VWRITE;
486	error = VOP_ACCESS(devvp, accmode, td->td_ucred, td);
487	if (error)
488		error = priv_check(td, PRIV_VFS_MOUNT_PERM);
489	if (error) {
490		vput(devvp);
491		return (error);
492	}
493
494	if (mp->mnt_flag & MNT_UPDATE) {
495		/*
496		 * Update only
497		 *
498		 * If it's not the same vnode, or at least the same device
499		 * then it's not correct.
500		 */
501
502		if (devvp->v_rdev != ump->um_devvp->v_rdev)
503			error = EINVAL;	/* needs translation */
504		vput(devvp);
505		if (error)
506			return (error);
507	} else {
508		/*
509		 * New mount
510		 *
511		 * We need the name for the mount point (also used for
512		 * "last mounted on") copied in. If an error occurs,
513		 * the mount point is discarded by the upper level code.
514		 * Note that vfs_mount() populates f_mntonname for us.
515		 */
516		if ((error = ffs_mountfs(devvp, mp, td)) != 0) {
517			vrele(devvp);
518			return (error);
519		}
520		if (fsckpid > 0) {
521			KASSERT(MOUNTEDSOFTDEP(mp) == 0,
522			    ("soft updates enabled on read-only file system"));
523			ump = VFSTOUFS(mp);
524			fs = ump->um_fs;
525			DROP_GIANT();
526			g_topology_lock();
527			/*
528			 * Request write access.
529			 */
530			error = g_access(ump->um_cp, 0, 1, 0);
531			g_topology_unlock();
532			PICKUP_GIANT();
533			if (error) {
534				printf("WARNING: %s: Checker activation "
535				    "failed\n", fs->fs_fsmnt);
536			} else {
537				ump->um_fsckpid = fsckpid;
538				if (fs->fs_snapinum[0] != 0)
539					ffs_snapshot_mount(mp);
540				fs->fs_mtime = time_second;
541				fs->fs_clean = 0;
542				(void) ffs_sbupdate(ump, MNT_WAIT, 0);
543			}
544		}
545	}
546	vfs_mountedfrom(mp, fspec);
547	return (0);
548}
549
550/*
551 * Compatibility with old mount system call.
552 */
553
554static int
555ffs_cmount(struct mntarg *ma, void *data, uint64_t flags)
556{
557	struct ufs_args args;
558	struct export_args exp;
559	int error;
560
561	if (data == NULL)
562		return (EINVAL);
563	error = copyin(data, &args, sizeof args);
564	if (error)
565		return (error);
566	vfs_oexport_conv(&args.export, &exp);
567
568	ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN);
569	ma = mount_arg(ma, "export", &exp, sizeof(exp));
570	error = kernel_mount(ma, flags);
571
572	return (error);
573}
574
575/*
576 * Reload all incore data for a filesystem (used after running fsck on
577 * the root filesystem and finding things to fix). If the 'force' flag
578 * is 0, the filesystem must be mounted read-only.
579 *
580 * Things to do to update the mount:
581 *	1) invalidate all cached meta-data.
582 *	2) re-read superblock from disk.
583 *	3) re-read summary information from disk.
584 *	4) invalidate all inactive vnodes.
585 *	5) invalidate all cached file data.
586 *	6) re-read inode data for all active vnodes.
587 */
588int
589ffs_reload(struct mount *mp, struct thread *td, int force)
590{
591	struct vnode *vp, *mvp, *devvp;
592	struct inode *ip;
593	void *space;
594	struct buf *bp;
595	struct fs *fs, *newfs;
596	struct ufsmount *ump;
597	ufs2_daddr_t sblockloc;
598	int i, blks, size, error;
599	int32_t *lp;
600
601	ump = VFSTOUFS(mp);
602
603	MNT_ILOCK(mp);
604	if ((mp->mnt_flag & MNT_RDONLY) == 0 && force == 0) {
605		MNT_IUNLOCK(mp);
606		return (EINVAL);
607	}
608	MNT_IUNLOCK(mp);
609
610	/*
611	 * Step 1: invalidate all cached meta-data.
612	 */
613	devvp = VFSTOUFS(mp)->um_devvp;
614	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
615	if (vinvalbuf(devvp, 0, 0, 0) != 0)
616		panic("ffs_reload: dirty1");
617	VOP_UNLOCK(devvp, 0);
618
619	/*
620	 * Step 2: re-read superblock from disk.
621	 */
622	fs = VFSTOUFS(mp)->um_fs;
623	if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize,
624	    NOCRED, &bp)) != 0)
625		return (error);
626	newfs = (struct fs *)bp->b_data;
627	if ((newfs->fs_magic != FS_UFS1_MAGIC &&
628	     newfs->fs_magic != FS_UFS2_MAGIC) ||
629	    newfs->fs_bsize > MAXBSIZE ||
630	    newfs->fs_bsize < sizeof(struct fs)) {
631			brelse(bp);
632			return (EIO);		/* XXX needs translation */
633	}
634	/*
635	 * Copy pointer fields back into superblock before copying in	XXX
636	 * new superblock. These should really be in the ufsmount.	XXX
637	 * Note that important parameters (eg fs_ncg) are unchanged.
638	 */
639	newfs->fs_csp = fs->fs_csp;
640	newfs->fs_maxcluster = fs->fs_maxcluster;
641	newfs->fs_contigdirs = fs->fs_contigdirs;
642	newfs->fs_active = fs->fs_active;
643	newfs->fs_ronly = fs->fs_ronly;
644	sblockloc = fs->fs_sblockloc;
645	bcopy(newfs, fs, (u_int)fs->fs_sbsize);
646	brelse(bp);
647	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
648	ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc);
649	UFS_LOCK(ump);
650	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
651		printf("WARNING: %s: reload pending error: blocks %jd "
652		    "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
653		    fs->fs_pendinginodes);
654		fs->fs_pendingblocks = 0;
655		fs->fs_pendinginodes = 0;
656	}
657	UFS_UNLOCK(ump);
658
659	/*
660	 * Step 3: re-read summary information from disk.
661	 */
662	size = fs->fs_cssize;
663	blks = howmany(size, fs->fs_fsize);
664	if (fs->fs_contigsumsize > 0)
665		size += fs->fs_ncg * sizeof(int32_t);
666	size += fs->fs_ncg * sizeof(u_int8_t);
667	free(fs->fs_csp, M_UFSMNT);
668	space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
669	fs->fs_csp = space;
670	for (i = 0; i < blks; i += fs->fs_frag) {
671		size = fs->fs_bsize;
672		if (i + fs->fs_frag > blks)
673			size = (blks - i) * fs->fs_fsize;
674		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
675		    NOCRED, &bp);
676		if (error)
677			return (error);
678		bcopy(bp->b_data, space, (u_int)size);
679		space = (char *)space + size;
680		brelse(bp);
681	}
682	/*
683	 * We no longer know anything about clusters per cylinder group.
684	 */
685	if (fs->fs_contigsumsize > 0) {
686		fs->fs_maxcluster = lp = space;
687		for (i = 0; i < fs->fs_ncg; i++)
688			*lp++ = fs->fs_contigsumsize;
689		space = lp;
690	}
691	size = fs->fs_ncg * sizeof(u_int8_t);
692	fs->fs_contigdirs = (u_int8_t *)space;
693	bzero(fs->fs_contigdirs, size);
694
695loop:
696	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
697		/*
698		 * Skip syncer vnode.
699		 */
700		if (vp->v_type == VNON) {
701			VI_UNLOCK(vp);
702			continue;
703		}
704		/*
705		 * Step 4: invalidate all cached file data.
706		 */
707		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
708			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
709			goto loop;
710		}
711		if (vinvalbuf(vp, 0, 0, 0))
712			panic("ffs_reload: dirty2");
713		/*
714		 * Step 5: re-read inode data for all active vnodes.
715		 */
716		ip = VTOI(vp);
717		error =
718		    bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
719		    (int)fs->fs_bsize, NOCRED, &bp);
720		if (error) {
721			VOP_UNLOCK(vp, 0);
722			vrele(vp);
723			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
724			return (error);
725		}
726		ffs_load_inode(bp, ip, fs, ip->i_number);
727		ip->i_effnlink = ip->i_nlink;
728		brelse(bp);
729		VOP_UNLOCK(vp, 0);
730		vrele(vp);
731	}
732	return (0);
733}
734
735/*
736 * Possible superblock locations ordered from most to least likely.
737 */
738static int sblock_try[] = SBLOCKSEARCH;
739
740/*
741 * Common code for mount and mountroot
742 */
743static int
744ffs_mountfs(devvp, mp, td)
745	struct vnode *devvp;
746	struct mount *mp;
747	struct thread *td;
748{
749	struct ufsmount *ump;
750	struct buf *bp;
751	struct fs *fs;
752	struct cdev *dev;
753	void *space;
754	ufs2_daddr_t sblockloc;
755	int error, i, blks, size, ronly;
756	int32_t *lp;
757	struct ucred *cred;
758	struct g_consumer *cp;
759	struct mount *nmp;
760
761	bp = NULL;
762	ump = NULL;
763	cred = td ? td->td_ucred : NOCRED;
764	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
765
766	dev = devvp->v_rdev;
767	dev_ref(dev);
768	DROP_GIANT();
769	g_topology_lock();
770	error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1);
771	g_topology_unlock();
772	PICKUP_GIANT();
773	VOP_UNLOCK(devvp, 0);
774	if (error)
775		goto out;
776	if (devvp->v_rdev->si_iosize_max != 0)
777		mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
778	if (mp->mnt_iosize_max > MAXPHYS)
779		mp->mnt_iosize_max = MAXPHYS;
780
781	devvp->v_bufobj.bo_ops = &ffs_ops;
782
783	fs = NULL;
784	sblockloc = 0;
785	/*
786	 * Try reading the superblock in each of its possible locations.
787	 */
788	for (i = 0; sblock_try[i] != -1; i++) {
789		if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) {
790			error = EINVAL;
791			vfs_mount_error(mp,
792			    "Invalid sectorsize %d for superblock size %d",
793			    cp->provider->sectorsize, SBLOCKSIZE);
794			goto out;
795		}
796		if ((error = bread(devvp, btodb(sblock_try[i]), SBLOCKSIZE,
797		    cred, &bp)) != 0)
798			goto out;
799		fs = (struct fs *)bp->b_data;
800		sblockloc = sblock_try[i];
801		if ((fs->fs_magic == FS_UFS1_MAGIC ||
802		     (fs->fs_magic == FS_UFS2_MAGIC &&
803		      (fs->fs_sblockloc == sblockloc ||
804		       (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0))) &&
805		    fs->fs_bsize <= MAXBSIZE &&
806		    fs->fs_bsize >= sizeof(struct fs))
807			break;
808		brelse(bp);
809		bp = NULL;
810	}
811	if (sblock_try[i] == -1) {
812		error = EINVAL;		/* XXX needs translation */
813		goto out;
814	}
815	fs->fs_fmod = 0;
816	fs->fs_flags &= ~FS_INDEXDIRS;	/* no support for directory indicies */
817	fs->fs_flags &= ~FS_UNCLEAN;
818	if (fs->fs_clean == 0) {
819		fs->fs_flags |= FS_UNCLEAN;
820		if (ronly || (mp->mnt_flag & MNT_FORCE) ||
821		    ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
822		     (fs->fs_flags & FS_DOSOFTDEP))) {
823			printf("WARNING: %s was not properly dismounted\n",
824			    fs->fs_fsmnt);
825		} else {
826			vfs_mount_error(mp, "R/W mount of %s denied. %s%s",
827			    fs->fs_fsmnt, "Filesystem is not clean - run fsck.",
828			    (fs->fs_flags & FS_SUJ) == 0 ? "" :
829			    " Forced mount will invalidate journal contents");
830			error = EPERM;
831			goto out;
832		}
833		if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) &&
834		    (mp->mnt_flag & MNT_FORCE)) {
835			printf("WARNING: %s: lost blocks %jd files %d\n",
836			    fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
837			    fs->fs_pendinginodes);
838			fs->fs_pendingblocks = 0;
839			fs->fs_pendinginodes = 0;
840		}
841	}
842	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
843		printf("WARNING: %s: mount pending error: blocks %jd "
844		    "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
845		    fs->fs_pendinginodes);
846		fs->fs_pendingblocks = 0;
847		fs->fs_pendinginodes = 0;
848	}
849	if ((fs->fs_flags & FS_GJOURNAL) != 0) {
850#ifdef UFS_GJOURNAL
851		/*
852		 * Get journal provider name.
853		 */
854		size = 1024;
855		mp->mnt_gjprovider = malloc(size, M_UFSMNT, M_WAITOK);
856		if (g_io_getattr("GJOURNAL::provider", cp, &size,
857		    mp->mnt_gjprovider) == 0) {
858			mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, size,
859			    M_UFSMNT, M_WAITOK);
860			MNT_ILOCK(mp);
861			mp->mnt_flag |= MNT_GJOURNAL;
862			MNT_IUNLOCK(mp);
863		} else {
864			printf("WARNING: %s: GJOURNAL flag on fs "
865			    "but no gjournal provider below\n",
866			    mp->mnt_stat.f_mntonname);
867			free(mp->mnt_gjprovider, M_UFSMNT);
868			mp->mnt_gjprovider = NULL;
869		}
870#else
871		printf("WARNING: %s: GJOURNAL flag on fs but no "
872		    "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname);
873#endif
874	} else {
875		mp->mnt_gjprovider = NULL;
876	}
877	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
878	ump->um_cp = cp;
879	ump->um_bo = &devvp->v_bufobj;
880	ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT, M_WAITOK);
881	if (fs->fs_magic == FS_UFS1_MAGIC) {
882		ump->um_fstype = UFS1;
883		ump->um_balloc = ffs_balloc_ufs1;
884	} else {
885		ump->um_fstype = UFS2;
886		ump->um_balloc = ffs_balloc_ufs2;
887	}
888	ump->um_blkatoff = ffs_blkatoff;
889	ump->um_truncate = ffs_truncate;
890	ump->um_update = ffs_update;
891	ump->um_valloc = ffs_valloc;
892	ump->um_vfree = ffs_vfree;
893	ump->um_ifree = ffs_ifree;
894	ump->um_rdonly = ffs_rdonly;
895	ump->um_snapgone = ffs_snapgone;
896	mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF);
897	bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize);
898	if (fs->fs_sbsize < SBLOCKSIZE)
899		bp->b_flags |= B_INVAL | B_NOCACHE;
900	brelse(bp);
901	bp = NULL;
902	fs = ump->um_fs;
903	ffs_oldfscompat_read(fs, ump, sblockloc);
904	fs->fs_ronly = ronly;
905	size = fs->fs_cssize;
906	blks = howmany(size, fs->fs_fsize);
907	if (fs->fs_contigsumsize > 0)
908		size += fs->fs_ncg * sizeof(int32_t);
909	size += fs->fs_ncg * sizeof(u_int8_t);
910	space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
911	fs->fs_csp = space;
912	for (i = 0; i < blks; i += fs->fs_frag) {
913		size = fs->fs_bsize;
914		if (i + fs->fs_frag > blks)
915			size = (blks - i) * fs->fs_fsize;
916		if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
917		    cred, &bp)) != 0) {
918			free(fs->fs_csp, M_UFSMNT);
919			goto out;
920		}
921		bcopy(bp->b_data, space, (u_int)size);
922		space = (char *)space + size;
923		brelse(bp);
924		bp = NULL;
925	}
926	if (fs->fs_contigsumsize > 0) {
927		fs->fs_maxcluster = lp = space;
928		for (i = 0; i < fs->fs_ncg; i++)
929			*lp++ = fs->fs_contigsumsize;
930		space = lp;
931	}
932	size = fs->fs_ncg * sizeof(u_int8_t);
933	fs->fs_contigdirs = (u_int8_t *)space;
934	bzero(fs->fs_contigdirs, size);
935	fs->fs_active = NULL;
936	mp->mnt_data = ump;
937	mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
938	mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
939	nmp = NULL;
940	if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
941	    (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) {
942		if (nmp)
943			vfs_rel(nmp);
944		vfs_getnewfsid(mp);
945	}
946	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
947	MNT_ILOCK(mp);
948	mp->mnt_flag |= MNT_LOCAL;
949	MNT_IUNLOCK(mp);
950	if ((fs->fs_flags & FS_MULTILABEL) != 0) {
951#ifdef MAC
952		MNT_ILOCK(mp);
953		mp->mnt_flag |= MNT_MULTILABEL;
954		MNT_IUNLOCK(mp);
955#else
956		printf("WARNING: %s: multilabel flag on fs but "
957		    "no MAC support\n", mp->mnt_stat.f_mntonname);
958#endif
959	}
960	if ((fs->fs_flags & FS_ACLS) != 0) {
961#ifdef UFS_ACL
962		MNT_ILOCK(mp);
963
964		if (mp->mnt_flag & MNT_NFS4ACLS)
965			printf("WARNING: %s: ACLs flag on fs conflicts with "
966			    "\"nfsv4acls\" mount option; option ignored\n",
967			    mp->mnt_stat.f_mntonname);
968		mp->mnt_flag &= ~MNT_NFS4ACLS;
969		mp->mnt_flag |= MNT_ACLS;
970
971		MNT_IUNLOCK(mp);
972#else
973		printf("WARNING: %s: ACLs flag on fs but no ACLs support\n",
974		    mp->mnt_stat.f_mntonname);
975#endif
976	}
977	if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
978#ifdef UFS_ACL
979		MNT_ILOCK(mp);
980
981		if (mp->mnt_flag & MNT_ACLS)
982			printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts "
983			    "with \"acls\" mount option; option ignored\n",
984			    mp->mnt_stat.f_mntonname);
985		mp->mnt_flag &= ~MNT_ACLS;
986		mp->mnt_flag |= MNT_NFS4ACLS;
987
988		MNT_IUNLOCK(mp);
989#else
990		printf("WARNING: %s: NFSv4 ACLs flag on fs but no "
991		    "ACLs support\n", mp->mnt_stat.f_mntonname);
992#endif
993	}
994	if ((fs->fs_flags & FS_TRIM) != 0) {
995		size = sizeof(int);
996		if (g_io_getattr("GEOM::candelete", cp, &size,
997		    &ump->um_candelete) == 0) {
998			if (!ump->um_candelete)
999				printf("WARNING: %s: TRIM flag on fs but disk "
1000				    "does not support TRIM\n",
1001				    mp->mnt_stat.f_mntonname);
1002		} else {
1003			printf("WARNING: %s: TRIM flag on fs but disk does "
1004			    "not confirm that it supports TRIM\n",
1005			    mp->mnt_stat.f_mntonname);
1006			ump->um_candelete = 0;
1007		}
1008	}
1009
1010	ump->um_mountp = mp;
1011	ump->um_dev = dev;
1012	ump->um_devvp = devvp;
1013	ump->um_nindir = fs->fs_nindir;
1014	ump->um_bptrtodb = fs->fs_fsbtodb;
1015	ump->um_seqinc = fs->fs_frag;
1016	for (i = 0; i < MAXQUOTAS; i++)
1017		ump->um_quotas[i] = NULLVP;
1018#ifdef UFS_EXTATTR
1019	ufs_extattr_uepm_init(&ump->um_extattr);
1020#endif
1021	/*
1022	 * Set FS local "last mounted on" information (NULL pad)
1023	 */
1024	bzero(fs->fs_fsmnt, MAXMNTLEN);
1025	strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN);
1026	mp->mnt_stat.f_iosize = fs->fs_bsize;
1027
1028	if (mp->mnt_flag & MNT_ROOTFS) {
1029		/*
1030		 * Root mount; update timestamp in mount structure.
1031		 * this will be used by the common root mount code
1032		 * to update the system clock.
1033		 */
1034		mp->mnt_time = fs->fs_time;
1035	}
1036
1037	if (ronly == 0) {
1038		fs->fs_mtime = time_second;
1039		if ((fs->fs_flags & FS_DOSOFTDEP) &&
1040		    (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
1041			free(fs->fs_csp, M_UFSMNT);
1042			ffs_flushfiles(mp, FORCECLOSE, td);
1043			goto out;
1044		}
1045		if (devvp->v_type == VCHR && devvp->v_rdev != NULL)
1046			devvp->v_rdev->si_mountpt = mp;
1047		if (fs->fs_snapinum[0] != 0)
1048			ffs_snapshot_mount(mp);
1049		fs->fs_fmod = 1;
1050		fs->fs_clean = 0;
1051		(void) ffs_sbupdate(ump, MNT_WAIT, 0);
1052	}
1053	/*
1054	 * Initialize filesystem stat information in mount struct.
1055	 */
1056	MNT_ILOCK(mp);
1057	mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
1058	    MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE;
1059	MNT_IUNLOCK(mp);
1060#ifdef UFS_EXTATTR
1061#ifdef UFS_EXTATTR_AUTOSTART
1062	/*
1063	 *
1064	 * Auto-starting does the following:
1065	 *	- check for /.attribute in the fs, and extattr_start if so
1066	 *	- for each file in .attribute, enable that file with
1067	 * 	  an attribute of the same name.
1068	 * Not clear how to report errors -- probably eat them.
1069	 * This would all happen while the filesystem was busy/not
1070	 * available, so would effectively be "atomic".
1071	 */
1072	(void) ufs_extattr_autostart(mp, td);
1073#endif /* !UFS_EXTATTR_AUTOSTART */
1074#endif /* !UFS_EXTATTR */
1075	return (0);
1076out:
1077	if (bp)
1078		brelse(bp);
1079	if (cp != NULL) {
1080		DROP_GIANT();
1081		g_topology_lock();
1082		g_vfs_close(cp);
1083		g_topology_unlock();
1084		PICKUP_GIANT();
1085	}
1086	if (ump) {
1087		mtx_destroy(UFS_MTX(ump));
1088		if (mp->mnt_gjprovider != NULL) {
1089			free(mp->mnt_gjprovider, M_UFSMNT);
1090			mp->mnt_gjprovider = NULL;
1091		}
1092		free(ump->um_fs, M_UFSMNT);
1093		free(ump, M_UFSMNT);
1094		mp->mnt_data = NULL;
1095	}
1096	dev_rel(dev);
1097	return (error);
1098}
1099
1100#include <sys/sysctl.h>
1101static int bigcgs = 0;
1102SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, "");
1103
1104/*
1105 * Sanity checks for loading old filesystem superblocks.
1106 * See ffs_oldfscompat_write below for unwound actions.
1107 *
1108 * XXX - Parts get retired eventually.
1109 * Unfortunately new bits get added.
1110 */
1111static void
1112ffs_oldfscompat_read(fs, ump, sblockloc)
1113	struct fs *fs;
1114	struct ufsmount *ump;
1115	ufs2_daddr_t sblockloc;
1116{
1117	off_t maxfilesize;
1118
1119	/*
1120	 * If not yet done, update fs_flags location and value of fs_sblockloc.
1121	 */
1122	if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
1123		fs->fs_flags = fs->fs_old_flags;
1124		fs->fs_old_flags |= FS_FLAGS_UPDATED;
1125		fs->fs_sblockloc = sblockloc;
1126	}
1127	/*
1128	 * If not yet done, update UFS1 superblock with new wider fields.
1129	 */
1130	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) {
1131		fs->fs_maxbsize = fs->fs_bsize;
1132		fs->fs_time = fs->fs_old_time;
1133		fs->fs_size = fs->fs_old_size;
1134		fs->fs_dsize = fs->fs_old_dsize;
1135		fs->fs_csaddr = fs->fs_old_csaddr;
1136		fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1137		fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1138		fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1139		fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1140	}
1141	if (fs->fs_magic == FS_UFS1_MAGIC &&
1142	    fs->fs_old_inodefmt < FS_44INODEFMT) {
1143		fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1;
1144		fs->fs_qbmask = ~fs->fs_bmask;
1145		fs->fs_qfmask = ~fs->fs_fmask;
1146	}
1147	if (fs->fs_magic == FS_UFS1_MAGIC) {
1148		ump->um_savedmaxfilesize = fs->fs_maxfilesize;
1149		maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1;
1150		if (fs->fs_maxfilesize > maxfilesize)
1151			fs->fs_maxfilesize = maxfilesize;
1152	}
1153	/* Compatibility for old filesystems */
1154	if (fs->fs_avgfilesize <= 0)
1155		fs->fs_avgfilesize = AVFILESIZ;
1156	if (fs->fs_avgfpdir <= 0)
1157		fs->fs_avgfpdir = AFPDIR;
1158	if (bigcgs) {
1159		fs->fs_save_cgsize = fs->fs_cgsize;
1160		fs->fs_cgsize = fs->fs_bsize;
1161	}
1162}
1163
1164/*
1165 * Unwinding superblock updates for old filesystems.
1166 * See ffs_oldfscompat_read above for details.
1167 *
1168 * XXX - Parts get retired eventually.
1169 * Unfortunately new bits get added.
1170 */
1171void
1172ffs_oldfscompat_write(fs, ump)
1173	struct fs *fs;
1174	struct ufsmount *ump;
1175{
1176
1177	/*
1178	 * Copy back UFS2 updated fields that UFS1 inspects.
1179	 */
1180	if (fs->fs_magic == FS_UFS1_MAGIC) {
1181		fs->fs_old_time = fs->fs_time;
1182		fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1183		fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1184		fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1185		fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1186		fs->fs_maxfilesize = ump->um_savedmaxfilesize;
1187	}
1188	if (bigcgs) {
1189		fs->fs_cgsize = fs->fs_save_cgsize;
1190		fs->fs_save_cgsize = 0;
1191	}
1192}
1193
1194/*
1195 * unmount system call
1196 */
1197static int
1198ffs_unmount(mp, mntflags)
1199	struct mount *mp;
1200	int mntflags;
1201{
1202	struct thread *td;
1203	struct ufsmount *ump = VFSTOUFS(mp);
1204	struct fs *fs;
1205	int error, flags, susp;
1206#ifdef UFS_EXTATTR
1207	int e_restart;
1208#endif
1209
1210	flags = 0;
1211	td = curthread;
1212	fs = ump->um_fs;
1213	susp = 0;
1214	if (mntflags & MNT_FORCE) {
1215		flags |= FORCECLOSE;
1216		susp = fs->fs_ronly == 0;
1217	}
1218#ifdef UFS_EXTATTR
1219	if ((error = ufs_extattr_stop(mp, td))) {
1220		if (error != EOPNOTSUPP)
1221			printf("WARNING: unmount %s: ufs_extattr_stop "
1222			    "returned errno %d\n", mp->mnt_stat.f_mntonname,
1223			    error);
1224		e_restart = 0;
1225	} else {
1226		ufs_extattr_uepm_destroy(&ump->um_extattr);
1227		e_restart = 1;
1228	}
1229#endif
1230	if (susp) {
1231		error = vfs_write_suspend_umnt(mp);
1232		if (error != 0)
1233			goto fail1;
1234	}
1235	if (MOUNTEDSOFTDEP(mp))
1236		error = softdep_flushfiles(mp, flags, td);
1237	else
1238		error = ffs_flushfiles(mp, flags, td);
1239	if (error != 0 && error != ENXIO)
1240		goto fail;
1241
1242	UFS_LOCK(ump);
1243	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1244		printf("WARNING: unmount %s: pending error: blocks %jd "
1245		    "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
1246		    fs->fs_pendinginodes);
1247		fs->fs_pendingblocks = 0;
1248		fs->fs_pendinginodes = 0;
1249	}
1250	UFS_UNLOCK(ump);
1251	if (MOUNTEDSOFTDEP(mp))
1252		softdep_unmount(mp);
1253	if (fs->fs_ronly == 0 || ump->um_fsckpid > 0) {
1254		fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
1255		error = ffs_sbupdate(ump, MNT_WAIT, 0);
1256		if (error && error != ENXIO) {
1257			fs->fs_clean = 0;
1258			goto fail;
1259		}
1260	}
1261	if (susp)
1262		vfs_write_resume(mp, VR_START_WRITE);
1263	DROP_GIANT();
1264	g_topology_lock();
1265	if (ump->um_fsckpid > 0) {
1266		/*
1267		 * Return to normal read-only mode.
1268		 */
1269		error = g_access(ump->um_cp, 0, -1, 0);
1270		ump->um_fsckpid = 0;
1271	}
1272	g_vfs_close(ump->um_cp);
1273	g_topology_unlock();
1274	PICKUP_GIANT();
1275	if (ump->um_devvp->v_type == VCHR && ump->um_devvp->v_rdev != NULL)
1276		ump->um_devvp->v_rdev->si_mountpt = NULL;
1277	vrele(ump->um_devvp);
1278	dev_rel(ump->um_dev);
1279	mtx_destroy(UFS_MTX(ump));
1280	if (mp->mnt_gjprovider != NULL) {
1281		free(mp->mnt_gjprovider, M_UFSMNT);
1282		mp->mnt_gjprovider = NULL;
1283	}
1284	free(fs->fs_csp, M_UFSMNT);
1285	free(fs, M_UFSMNT);
1286	free(ump, M_UFSMNT);
1287	mp->mnt_data = NULL;
1288	MNT_ILOCK(mp);
1289	mp->mnt_flag &= ~MNT_LOCAL;
1290	MNT_IUNLOCK(mp);
1291	return (error);
1292
1293fail:
1294	if (susp)
1295		vfs_write_resume(mp, VR_START_WRITE);
1296fail1:
1297#ifdef UFS_EXTATTR
1298	if (e_restart) {
1299		ufs_extattr_uepm_init(&ump->um_extattr);
1300#ifdef UFS_EXTATTR_AUTOSTART
1301		(void) ufs_extattr_autostart(mp, td);
1302#endif
1303	}
1304#endif
1305
1306	return (error);
1307}
1308
1309/*
1310 * Flush out all the files in a filesystem.
1311 */
1312int
1313ffs_flushfiles(mp, flags, td)
1314	struct mount *mp;
1315	int flags;
1316	struct thread *td;
1317{
1318	struct ufsmount *ump;
1319	int qerror, error;
1320
1321	ump = VFSTOUFS(mp);
1322	qerror = 0;
1323#ifdef QUOTA
1324	if (mp->mnt_flag & MNT_QUOTA) {
1325		int i;
1326		error = vflush(mp, 0, SKIPSYSTEM|flags, td);
1327		if (error)
1328			return (error);
1329		for (i = 0; i < MAXQUOTAS; i++) {
1330			error = quotaoff(td, mp, i);
1331			if (error != 0) {
1332				if ((flags & EARLYFLUSH) == 0)
1333					return (error);
1334				else
1335					qerror = error;
1336			}
1337		}
1338
1339		/*
1340		 * Here we fall through to vflush again to ensure that
1341		 * we have gotten rid of all the system vnodes, unless
1342		 * quotas must not be closed.
1343		 */
1344	}
1345#endif
1346	ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_flushfiles");
1347	if (ump->um_devvp->v_vflag & VV_COPYONWRITE) {
1348		if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0)
1349			return (error);
1350		ffs_snapshot_unmount(mp);
1351		flags |= FORCECLOSE;
1352		/*
1353		 * Here we fall through to vflush again to ensure
1354		 * that we have gotten rid of all the system vnodes.
1355		 */
1356	}
1357
1358	/*
1359	 * Do not close system files if quotas were not closed, to be
1360	 * able to sync the remaining dquots.  The freeblks softupdate
1361	 * workitems might hold a reference on a dquot, preventing
1362	 * quotaoff() from completing.  Next round of
1363	 * softdep_flushworklist() iteration should process the
1364	 * blockers, allowing the next run of quotaoff() to finally
1365	 * flush held dquots.
1366	 *
1367	 * Otherwise, flush all the files.
1368	 */
1369	if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0)
1370		return (error);
1371
1372	/*
1373	 * Flush filesystem metadata.
1374	 */
1375	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1376	error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td);
1377	VOP_UNLOCK(ump->um_devvp, 0);
1378	return (error);
1379}
1380
1381/*
1382 * Get filesystem statistics.
1383 */
1384static int
1385ffs_statfs(mp, sbp)
1386	struct mount *mp;
1387	struct statfs *sbp;
1388{
1389	struct ufsmount *ump;
1390	struct fs *fs;
1391
1392	ump = VFSTOUFS(mp);
1393	fs = ump->um_fs;
1394	if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC)
1395		panic("ffs_statfs");
1396	sbp->f_version = STATFS_VERSION;
1397	sbp->f_bsize = fs->fs_fsize;
1398	sbp->f_iosize = fs->fs_bsize;
1399	sbp->f_blocks = fs->fs_dsize;
1400	UFS_LOCK(ump);
1401	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
1402	    fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1403	sbp->f_bavail = freespace(fs, fs->fs_minfree) +
1404	    dbtofsb(fs, fs->fs_pendingblocks);
1405	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - ROOTINO;
1406	sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1407	UFS_UNLOCK(ump);
1408	sbp->f_namemax = NAME_MAX;
1409	return (0);
1410}
1411
1412/*
1413 * For a lazy sync, we only care about access times, quotas and the
1414 * superblock.  Other filesystem changes are already converted to
1415 * cylinder group blocks or inode blocks updates and are written to
1416 * disk by syncer.
1417 */
1418static int
1419ffs_sync_lazy(mp)
1420     struct mount *mp;
1421{
1422	struct vnode *mvp, *vp;
1423	struct inode *ip;
1424	struct thread *td;
1425	int allerror, error;
1426
1427	allerror = 0;
1428	td = curthread;
1429	if ((mp->mnt_flag & MNT_NOATIME) != 0)
1430		goto qupdate;
1431	MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) {
1432		if (vp->v_type == VNON) {
1433			VI_UNLOCK(vp);
1434			continue;
1435		}
1436		ip = VTOI(vp);
1437
1438		/*
1439		 * The IN_ACCESS flag is converted to IN_MODIFIED by
1440		 * ufs_close() and ufs_getattr() by the calls to
1441		 * ufs_itimes_locked(), without subsequent UFS_UPDATE().
1442		 * Test also all the other timestamp flags too, to pick up
1443		 * any other cases that could be missed.
1444		 */
1445		if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED |
1446		    IN_UPDATE)) == 0) {
1447			VI_UNLOCK(vp);
1448			continue;
1449		}
1450		if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
1451		    td)) != 0)
1452			continue;
1453		error = ffs_update(vp, 0);
1454		if (error != 0)
1455			allerror = error;
1456		vput(vp);
1457	}
1458
1459qupdate:
1460#ifdef QUOTA
1461	qsync(mp);
1462#endif
1463
1464	if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 &&
1465	    (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0)
1466		allerror = error;
1467	return (allerror);
1468}
1469
1470/*
1471 * Go through the disk queues to initiate sandbagged IO;
1472 * go through the inodes to write those that have been modified;
1473 * initiate the writing of the super block if it has been modified.
1474 *
1475 * Note: we are always called with the filesystem marked busy using
1476 * vfs_busy().
1477 */
1478static int
1479ffs_sync(mp, waitfor)
1480	struct mount *mp;
1481	int waitfor;
1482{
1483	struct vnode *mvp, *vp, *devvp;
1484	struct thread *td;
1485	struct inode *ip;
1486	struct ufsmount *ump = VFSTOUFS(mp);
1487	struct fs *fs;
1488	int error, count, lockreq, allerror = 0;
1489	int suspend;
1490	int suspended;
1491	int secondary_writes;
1492	int secondary_accwrites;
1493	int softdep_deps;
1494	int softdep_accdeps;
1495	struct bufobj *bo;
1496
1497	suspend = 0;
1498	suspended = 0;
1499	td = curthread;
1500	fs = ump->um_fs;
1501	if (fs->fs_fmod != 0 && fs->fs_ronly != 0 && ump->um_fsckpid == 0)
1502		panic("%s: ffs_sync: modification on read-only filesystem",
1503		    fs->fs_fsmnt);
1504	if (waitfor == MNT_LAZY) {
1505		if (!rebooting)
1506			return (ffs_sync_lazy(mp));
1507		waitfor = MNT_NOWAIT;
1508	}
1509
1510	/*
1511	 * Write back each (modified) inode.
1512	 */
1513	lockreq = LK_EXCLUSIVE | LK_NOWAIT;
1514	if (waitfor == MNT_SUSPEND) {
1515		suspend = 1;
1516		waitfor = MNT_WAIT;
1517	}
1518	if (waitfor == MNT_WAIT)
1519		lockreq = LK_EXCLUSIVE;
1520	lockreq |= LK_INTERLOCK | LK_SLEEPFAIL;
1521loop:
1522	/* Grab snapshot of secondary write counts */
1523	MNT_ILOCK(mp);
1524	secondary_writes = mp->mnt_secondary_writes;
1525	secondary_accwrites = mp->mnt_secondary_accwrites;
1526	MNT_IUNLOCK(mp);
1527
1528	/* Grab snapshot of softdep dependency counts */
1529	softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps);
1530
1531	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1532		/*
1533		 * Depend on the vnode interlock to keep things stable enough
1534		 * for a quick test.  Since there might be hundreds of
1535		 * thousands of vnodes, we cannot afford even a subroutine
1536		 * call unless there's a good chance that we have work to do.
1537		 */
1538		if (vp->v_type == VNON) {
1539			VI_UNLOCK(vp);
1540			continue;
1541		}
1542		ip = VTOI(vp);
1543		if ((ip->i_flag &
1544		    (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
1545		    vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1546			VI_UNLOCK(vp);
1547			continue;
1548		}
1549		if ((error = vget(vp, lockreq, td)) != 0) {
1550			if (error == ENOENT || error == ENOLCK) {
1551				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1552				goto loop;
1553			}
1554			continue;
1555		}
1556		if ((error = ffs_syncvnode(vp, waitfor, 0)) != 0)
1557			allerror = error;
1558		vput(vp);
1559	}
1560	/*
1561	 * Force stale filesystem control information to be flushed.
1562	 */
1563	if (waitfor == MNT_WAIT || rebooting) {
1564		if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
1565			allerror = error;
1566		/* Flushed work items may create new vnodes to clean */
1567		if (allerror == 0 && count)
1568			goto loop;
1569	}
1570#ifdef QUOTA
1571	qsync(mp);
1572#endif
1573
1574	devvp = ump->um_devvp;
1575	bo = &devvp->v_bufobj;
1576	BO_LOCK(bo);
1577	if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) {
1578		BO_UNLOCK(bo);
1579		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1580		error = VOP_FSYNC(devvp, waitfor, td);
1581		VOP_UNLOCK(devvp, 0);
1582		if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN))
1583			error = ffs_sbupdate(ump, waitfor, 0);
1584		if (error != 0)
1585			allerror = error;
1586		if (allerror == 0 && waitfor == MNT_WAIT)
1587			goto loop;
1588	} else if (suspend != 0) {
1589		if (softdep_check_suspend(mp,
1590					  devvp,
1591					  softdep_deps,
1592					  softdep_accdeps,
1593					  secondary_writes,
1594					  secondary_accwrites) != 0) {
1595			MNT_IUNLOCK(mp);
1596			goto loop;	/* More work needed */
1597		}
1598		mtx_assert(MNT_MTX(mp), MA_OWNED);
1599		mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
1600		MNT_IUNLOCK(mp);
1601		suspended = 1;
1602	} else
1603		BO_UNLOCK(bo);
1604	/*
1605	 * Write back modified superblock.
1606	 */
1607	if (fs->fs_fmod != 0 &&
1608	    (error = ffs_sbupdate(ump, waitfor, suspended)) != 0)
1609		allerror = error;
1610	return (allerror);
1611}
1612
1613int
1614ffs_vget(mp, ino, flags, vpp)
1615	struct mount *mp;
1616	ino_t ino;
1617	int flags;
1618	struct vnode **vpp;
1619{
1620	return (ffs_vgetf(mp, ino, flags, vpp, 0));
1621}
1622
1623int
1624ffs_vgetf(mp, ino, flags, vpp, ffs_flags)
1625	struct mount *mp;
1626	ino_t ino;
1627	int flags;
1628	struct vnode **vpp;
1629	int ffs_flags;
1630{
1631	struct fs *fs;
1632	struct inode *ip;
1633	struct ufsmount *ump;
1634	struct buf *bp;
1635	struct vnode *vp;
1636	struct cdev *dev;
1637	int error;
1638
1639	error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
1640	if (error || *vpp != NULL)
1641		return (error);
1642
1643	/*
1644	 * We must promote to an exclusive lock for vnode creation.  This
1645	 * can happen if lookup is passed LOCKSHARED.
1646	 */
1647	if ((flags & LK_TYPE_MASK) == LK_SHARED) {
1648		flags &= ~LK_TYPE_MASK;
1649		flags |= LK_EXCLUSIVE;
1650	}
1651
1652	/*
1653	 * We do not lock vnode creation as it is believed to be too
1654	 * expensive for such rare case as simultaneous creation of vnode
1655	 * for same ino by different processes. We just allow them to race
1656	 * and check later to decide who wins. Let the race begin!
1657	 */
1658
1659	ump = VFSTOUFS(mp);
1660	dev = ump->um_dev;
1661	fs = ump->um_fs;
1662	ip = uma_zalloc(uma_inode, M_WAITOK | M_ZERO);
1663
1664	/* Allocate a new vnode/inode. */
1665	if (fs->fs_magic == FS_UFS1_MAGIC)
1666		error = getnewvnode("ufs", mp, &ffs_vnodeops1, &vp);
1667	else
1668		error = getnewvnode("ufs", mp, &ffs_vnodeops2, &vp);
1669	if (error) {
1670		*vpp = NULL;
1671		uma_zfree(uma_inode, ip);
1672		return (error);
1673	}
1674	/*
1675	 * FFS supports recursive locking.
1676	 */
1677	lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
1678	VN_LOCK_AREC(vp);
1679	vp->v_data = ip;
1680	vp->v_bufobj.bo_bsize = fs->fs_bsize;
1681	ip->i_vnode = vp;
1682	ip->i_ump = ump;
1683	ip->i_fs = fs;
1684	ip->i_dev = dev;
1685	ip->i_number = ino;
1686	ip->i_ea_refs = 0;
1687	ip->i_nextclustercg = -1;
1688#ifdef QUOTA
1689	{
1690		int i;
1691		for (i = 0; i < MAXQUOTAS; i++)
1692			ip->i_dquot[i] = NODQUOT;
1693	}
1694#endif
1695
1696	if (ffs_flags & FFSV_FORCEINSMQ)
1697		vp->v_vflag |= VV_FORCEINSMQ;
1698	error = insmntque(vp, mp);
1699	if (error != 0) {
1700		uma_zfree(uma_inode, ip);
1701		*vpp = NULL;
1702		return (error);
1703	}
1704	vp->v_vflag &= ~VV_FORCEINSMQ;
1705	error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
1706	if (error || *vpp != NULL)
1707		return (error);
1708
1709	/* Read in the disk contents for the inode, copy into the inode. */
1710	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1711	    (int)fs->fs_bsize, NOCRED, &bp);
1712	if (error) {
1713		/*
1714		 * The inode does not contain anything useful, so it would
1715		 * be misleading to leave it on its hash chain. With mode
1716		 * still zero, it will be unlinked and returned to the free
1717		 * list by vput().
1718		 */
1719		brelse(bp);
1720		vput(vp);
1721		*vpp = NULL;
1722		return (error);
1723	}
1724	if (ip->i_ump->um_fstype == UFS1)
1725		ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK);
1726	else
1727		ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK);
1728	ffs_load_inode(bp, ip, fs, ino);
1729	if (DOINGSOFTDEP(vp))
1730		softdep_load_inodeblock(ip);
1731	else
1732		ip->i_effnlink = ip->i_nlink;
1733	bqrelse(bp);
1734
1735	/*
1736	 * Initialize the vnode from the inode, check for aliases.
1737	 * Note that the underlying vnode may have changed.
1738	 */
1739	if (ip->i_ump->um_fstype == UFS1)
1740		error = ufs_vinit(mp, &ffs_fifoops1, &vp);
1741	else
1742		error = ufs_vinit(mp, &ffs_fifoops2, &vp);
1743	if (error) {
1744		vput(vp);
1745		*vpp = NULL;
1746		return (error);
1747	}
1748
1749	/*
1750	 * Finish inode initialization.
1751	 */
1752	if (vp->v_type != VFIFO) {
1753		/* FFS supports shared locking for all files except fifos. */
1754		VN_LOCK_ASHARE(vp);
1755	}
1756
1757	/*
1758	 * Set up a generation number for this inode if it does not
1759	 * already have one. This should only happen on old filesystems.
1760	 */
1761	if (ip->i_gen == 0) {
1762		ip->i_gen = arc4random() / 2 + 1;
1763		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
1764			ip->i_flag |= IN_MODIFIED;
1765			DIP_SET(ip, i_gen, ip->i_gen);
1766		}
1767	}
1768#ifdef MAC
1769	if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) {
1770		/*
1771		 * If this vnode is already allocated, and we're running
1772		 * multi-label, attempt to perform a label association
1773		 * from the extended attributes on the inode.
1774		 */
1775		error = mac_vnode_associate_extattr(mp, vp);
1776		if (error) {
1777			/* ufs_inactive will release ip->i_devvp ref. */
1778			vput(vp);
1779			*vpp = NULL;
1780			return (error);
1781		}
1782	}
1783#endif
1784
1785	*vpp = vp;
1786	return (0);
1787}
1788
1789/*
1790 * File handle to vnode
1791 *
1792 * Have to be really careful about stale file handles:
1793 * - check that the inode number is valid
1794 * - call ffs_vget() to get the locked inode
1795 * - check for an unallocated inode (i_mode == 0)
1796 * - check that the given client host has export rights and return
1797 *   those rights via. exflagsp and credanonp
1798 */
1799static int
1800ffs_fhtovp(mp, fhp, flags, vpp)
1801	struct mount *mp;
1802	struct fid *fhp;
1803	int flags;
1804	struct vnode **vpp;
1805{
1806	struct ufid *ufhp;
1807	struct fs *fs;
1808
1809	ufhp = (struct ufid *)fhp;
1810	fs = VFSTOUFS(mp)->um_fs;
1811	if (ufhp->ufid_ino < ROOTINO ||
1812	    ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1813		return (ESTALE);
1814	return (ufs_fhtovp(mp, ufhp, flags, vpp));
1815}
1816
1817/*
1818 * Initialize the filesystem.
1819 */
1820static int
1821ffs_init(vfsp)
1822	struct vfsconf *vfsp;
1823{
1824
1825	ffs_susp_initialize();
1826	softdep_initialize();
1827	return (ufs_init(vfsp));
1828}
1829
1830/*
1831 * Undo the work of ffs_init().
1832 */
1833static int
1834ffs_uninit(vfsp)
1835	struct vfsconf *vfsp;
1836{
1837	int ret;
1838
1839	ret = ufs_uninit(vfsp);
1840	softdep_uninitialize();
1841	ffs_susp_uninitialize();
1842	return (ret);
1843}
1844
1845/*
1846 * Write a superblock and associated information back to disk.
1847 */
1848int
1849ffs_sbupdate(ump, waitfor, suspended)
1850	struct ufsmount *ump;
1851	int waitfor;
1852	int suspended;
1853{
1854	struct fs *fs = ump->um_fs;
1855	struct buf *sbbp;
1856	struct buf *bp;
1857	int blks;
1858	void *space;
1859	int i, size, error, allerror = 0;
1860
1861	if (fs->fs_ronly == 1 &&
1862	    (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) !=
1863	    (MNT_RDONLY | MNT_UPDATE) && ump->um_fsckpid == 0)
1864		panic("ffs_sbupdate: write read-only filesystem");
1865	/*
1866	 * We use the superblock's buf to serialize calls to ffs_sbupdate().
1867	 */
1868	sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
1869	    (int)fs->fs_sbsize, 0, 0, 0);
1870	/*
1871	 * First write back the summary information.
1872	 */
1873	blks = howmany(fs->fs_cssize, fs->fs_fsize);
1874	space = fs->fs_csp;
1875	for (i = 0; i < blks; i += fs->fs_frag) {
1876		size = fs->fs_bsize;
1877		if (i + fs->fs_frag > blks)
1878			size = (blks - i) * fs->fs_fsize;
1879		bp = getblk(ump->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1880		    size, 0, 0, 0);
1881		bcopy(space, bp->b_data, (u_int)size);
1882		space = (char *)space + size;
1883		if (suspended)
1884			bp->b_flags |= B_VALIDSUSPWRT;
1885		if (waitfor != MNT_WAIT)
1886			bawrite(bp);
1887		else if ((error = bwrite(bp)) != 0)
1888			allerror = error;
1889	}
1890	/*
1891	 * Now write back the superblock itself. If any errors occurred
1892	 * up to this point, then fail so that the superblock avoids
1893	 * being written out as clean.
1894	 */
1895	if (allerror) {
1896		brelse(sbbp);
1897		return (allerror);
1898	}
1899	bp = sbbp;
1900	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 &&
1901	    (fs->fs_flags & FS_FLAGS_UPDATED) == 0) {
1902		printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
1903		    fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1);
1904		fs->fs_sblockloc = SBLOCK_UFS1;
1905	}
1906	if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 &&
1907	    (fs->fs_flags & FS_FLAGS_UPDATED) == 0) {
1908		printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
1909		    fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2);
1910		fs->fs_sblockloc = SBLOCK_UFS2;
1911	}
1912	fs->fs_fmod = 0;
1913	fs->fs_time = time_second;
1914	if (MOUNTEDSOFTDEP(ump->um_mountp))
1915		softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp);
1916	bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
1917	ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
1918	if (suspended)
1919		bp->b_flags |= B_VALIDSUSPWRT;
1920	if (waitfor != MNT_WAIT)
1921		bawrite(bp);
1922	else if ((error = bwrite(bp)) != 0)
1923		allerror = error;
1924	return (allerror);
1925}
1926
1927static int
1928ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
1929	int attrnamespace, const char *attrname)
1930{
1931
1932#ifdef UFS_EXTATTR
1933	return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace,
1934	    attrname));
1935#else
1936	return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace,
1937	    attrname));
1938#endif
1939}
1940
1941static void
1942ffs_ifree(struct ufsmount *ump, struct inode *ip)
1943{
1944
1945	if (ump->um_fstype == UFS1 && ip->i_din1 != NULL)
1946		uma_zfree(uma_ufs1, ip->i_din1);
1947	else if (ip->i_din2 != NULL)
1948		uma_zfree(uma_ufs2, ip->i_din2);
1949	uma_zfree(uma_inode, ip);
1950}
1951
1952static int dobkgrdwrite = 1;
1953SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
1954    "Do background writes (honoring the BV_BKGRDWRITE flag)?");
1955
1956/*
1957 * Complete a background write started from bwrite.
1958 */
1959static void
1960ffs_backgroundwritedone(struct buf *bp)
1961{
1962	struct bufobj *bufobj;
1963	struct buf *origbp;
1964
1965	/*
1966	 * Find the original buffer that we are writing.
1967	 */
1968	bufobj = bp->b_bufobj;
1969	BO_LOCK(bufobj);
1970	if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL)
1971		panic("backgroundwritedone: lost buffer");
1972	BO_UNLOCK(bufobj);
1973	/*
1974	 * Process dependencies then return any unfinished ones.
1975	 */
1976	pbrelvp(bp);
1977	if (!LIST_EMPTY(&bp->b_dep))
1978		buf_complete(bp);
1979#ifdef SOFTUPDATES
1980	if (!LIST_EMPTY(&bp->b_dep))
1981		softdep_move_dependencies(bp, origbp);
1982#endif
1983	/*
1984	 * This buffer is marked B_NOCACHE so when it is released
1985	 * by biodone it will be tossed.
1986	 */
1987	bp->b_flags |= B_NOCACHE;
1988	bp->b_flags &= ~B_CACHE;
1989	bufdone(bp);
1990	BO_LOCK(bufobj);
1991	/*
1992	 * Clear the BV_BKGRDINPROG flag in the original buffer
1993	 * and awaken it if it is waiting for the write to complete.
1994	 * If BV_BKGRDINPROG is not set in the original buffer it must
1995	 * have been released and re-instantiated - which is not legal.
1996	 */
1997	KASSERT((origbp->b_vflags & BV_BKGRDINPROG),
1998	    ("backgroundwritedone: lost buffer2"));
1999	origbp->b_vflags &= ~BV_BKGRDINPROG;
2000	if (origbp->b_vflags & BV_BKGRDWAIT) {
2001		origbp->b_vflags &= ~BV_BKGRDWAIT;
2002		wakeup(&origbp->b_xflags);
2003	}
2004	BO_UNLOCK(bufobj);
2005}
2006
2007
2008/*
2009 * Write, release buffer on completion.  (Done by iodone
2010 * if async).  Do not bother writing anything if the buffer
2011 * is invalid.
2012 *
2013 * Note that we set B_CACHE here, indicating that buffer is
2014 * fully valid and thus cacheable.  This is true even of NFS
2015 * now so we set it generally.  This could be set either here
2016 * or in biodone() since the I/O is synchronous.  We put it
2017 * here.
2018 */
2019static int
2020ffs_bufwrite(struct buf *bp)
2021{
2022	struct buf *newbp;
2023
2024	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2025	if (bp->b_flags & B_INVAL) {
2026		brelse(bp);
2027		return (0);
2028	}
2029
2030	if (!BUF_ISLOCKED(bp))
2031		panic("bufwrite: buffer is not busy???");
2032	/*
2033	 * If a background write is already in progress, delay
2034	 * writing this block if it is asynchronous. Otherwise
2035	 * wait for the background write to complete.
2036	 */
2037	BO_LOCK(bp->b_bufobj);
2038	if (bp->b_vflags & BV_BKGRDINPROG) {
2039		if (bp->b_flags & B_ASYNC) {
2040			BO_UNLOCK(bp->b_bufobj);
2041			bdwrite(bp);
2042			return (0);
2043		}
2044		bp->b_vflags |= BV_BKGRDWAIT;
2045		msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO,
2046		    "bwrbg", 0);
2047		if (bp->b_vflags & BV_BKGRDINPROG)
2048			panic("bufwrite: still writing");
2049	}
2050	BO_UNLOCK(bp->b_bufobj);
2051
2052	/*
2053	 * If this buffer is marked for background writing and we
2054	 * do not have to wait for it, make a copy and write the
2055	 * copy so as to leave this buffer ready for further use.
2056	 *
2057	 * This optimization eats a lot of memory.  If we have a page
2058	 * or buffer shortfall we can't do it.
2059	 */
2060	if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) &&
2061	    (bp->b_flags & B_ASYNC) &&
2062	    !vm_page_count_severe() &&
2063	    !buf_dirty_count_severe()) {
2064		KASSERT(bp->b_iodone == NULL,
2065		    ("bufwrite: needs chained iodone (%p)", bp->b_iodone));
2066
2067		/* get a new block */
2068		newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD);
2069		if (newbp == NULL)
2070			goto normal_write;
2071
2072		KASSERT((bp->b_flags & B_UNMAPPED) == 0, ("Unmapped cg"));
2073		memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
2074		BO_LOCK(bp->b_bufobj);
2075		bp->b_vflags |= BV_BKGRDINPROG;
2076		BO_UNLOCK(bp->b_bufobj);
2077		newbp->b_xflags |= BX_BKGRDMARKER;
2078		newbp->b_lblkno = bp->b_lblkno;
2079		newbp->b_blkno = bp->b_blkno;
2080		newbp->b_offset = bp->b_offset;
2081		newbp->b_iodone = ffs_backgroundwritedone;
2082		newbp->b_flags |= B_ASYNC;
2083		newbp->b_flags &= ~B_INVAL;
2084		pbgetvp(bp->b_vp, newbp);
2085
2086#ifdef SOFTUPDATES
2087		/*
2088		 * Move over the dependencies.  If there are rollbacks,
2089		 * leave the parent buffer dirtied as it will need to
2090		 * be written again.
2091		 */
2092		if (LIST_EMPTY(&bp->b_dep) ||
2093		    softdep_move_dependencies(bp, newbp) == 0)
2094			bundirty(bp);
2095#else
2096		bundirty(bp);
2097#endif
2098
2099		/*
2100		 * Initiate write on the copy, release the original.  The
2101		 * BKGRDINPROG flag prevents it from going away until
2102		 * the background write completes.
2103		 */
2104		bqrelse(bp);
2105		bp = newbp;
2106	} else
2107		/* Mark the buffer clean */
2108		bundirty(bp);
2109
2110
2111	/* Let the normal bufwrite do the rest for us */
2112normal_write:
2113	return (bufwrite(bp));
2114}
2115
2116
2117static void
2118ffs_geom_strategy(struct bufobj *bo, struct buf *bp)
2119{
2120	struct vnode *vp;
2121	int error;
2122	struct buf *tbp;
2123	int nocopy;
2124
2125	vp = bo->__bo_vnode;
2126	if (bp->b_iocmd == BIO_WRITE) {
2127		if ((bp->b_flags & B_VALIDSUSPWRT) == 0 &&
2128		    bp->b_vp != NULL && bp->b_vp->v_mount != NULL &&
2129		    (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0)
2130			panic("ffs_geom_strategy: bad I/O");
2131		nocopy = bp->b_flags & B_NOCOPY;
2132		bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY);
2133		if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 &&
2134		    vp->v_rdev->si_snapdata != NULL) {
2135			if ((bp->b_flags & B_CLUSTER) != 0) {
2136				runningbufwakeup(bp);
2137				TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2138					      b_cluster.cluster_entry) {
2139					error = ffs_copyonwrite(vp, tbp);
2140					if (error != 0 &&
2141					    error != EOPNOTSUPP) {
2142						bp->b_error = error;
2143						bp->b_ioflags |= BIO_ERROR;
2144						bufdone(bp);
2145						return;
2146					}
2147				}
2148				bp->b_runningbufspace = bp->b_bufsize;
2149				atomic_add_long(&runningbufspace,
2150					       bp->b_runningbufspace);
2151			} else {
2152				error = ffs_copyonwrite(vp, bp);
2153				if (error != 0 && error != EOPNOTSUPP) {
2154					bp->b_error = error;
2155					bp->b_ioflags |= BIO_ERROR;
2156					bufdone(bp);
2157					return;
2158				}
2159			}
2160		}
2161#ifdef SOFTUPDATES
2162		if ((bp->b_flags & B_CLUSTER) != 0) {
2163			TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2164				      b_cluster.cluster_entry) {
2165				if (!LIST_EMPTY(&tbp->b_dep))
2166					buf_start(tbp);
2167			}
2168		} else {
2169			if (!LIST_EMPTY(&bp->b_dep))
2170				buf_start(bp);
2171		}
2172
2173#endif
2174	}
2175	g_vfs_strategy(bo, bp);
2176}
2177
2178int
2179ffs_own_mount(const struct mount *mp)
2180{
2181
2182	if (mp->mnt_op == &ufs_vfsops)
2183		return (1);
2184	return (0);
2185}
2186
2187#ifdef	DDB
2188#ifdef SOFTUPDATES
2189
2190/* defined in ffs_softdep.c */
2191extern void db_print_ffs(struct ufsmount *ump);
2192
2193DB_SHOW_COMMAND(ffs, db_show_ffs)
2194{
2195	struct mount *mp;
2196	struct ufsmount *ump;
2197
2198	if (have_addr) {
2199		ump = VFSTOUFS((struct mount *)addr);
2200		db_print_ffs(ump);
2201		return;
2202	}
2203
2204	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2205		if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name))
2206			db_print_ffs(VFSTOUFS(mp));
2207	}
2208}
2209
2210#endif	/* SOFTUPDATES */
2211#endif	/* DDB */
2212