1/*-
2 * Copyright (c) 1999-2004 Poul-Henning Kamp
3 * Copyright (c) 1999 Michael Smith
4 * Copyright (c) 1989, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: stable/11/sys/kern/vfs_mount.c 369560 2021-04-06 19:22:31Z git2svn $");
39
40#include <sys/param.h>
41#include <sys/conf.h>
42#include <sys/fcntl.h>
43#include <sys/jail.h>
44#include <sys/kernel.h>
45#include <sys/libkern.h>
46#include <sys/malloc.h>
47#include <sys/mount.h>
48#include <sys/mutex.h>
49#include <sys/namei.h>
50#include <sys/priv.h>
51#include <sys/proc.h>
52#include <sys/filedesc.h>
53#include <sys/reboot.h>
54#include <sys/sbuf.h>
55#include <sys/syscallsubr.h>
56#include <sys/sysproto.h>
57#include <sys/sx.h>
58#include <sys/sysctl.h>
59#include <sys/sysent.h>
60#include <sys/systm.h>
61#include <sys/vnode.h>
62#include <vm/uma.h>
63
64#include <geom/geom.h>
65
66#include <machine/stdarg.h>
67
68#include <security/audit/audit.h>
69#include <security/mac/mac_framework.h>
70
71#define	VFS_MOUNTARG_SIZE_MAX	(1024 * 64)
72
73static int	vfs_domount(struct thread *td, const char *fstype, char *fspath,
74		    uint64_t fsflags, struct vfsoptlist **optlist);
75static void	free_mntarg(struct mntarg *ma);
76
77static int	usermount = 0;
78SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0,
79    "Unprivileged users may mount and unmount file systems");
80
81static bool	default_autoro = false;
82SYSCTL_BOOL(_vfs, OID_AUTO, default_autoro, CTLFLAG_RW, &default_autoro, 0,
83    "Retry failed r/w mount as r/o if no explicit ro/rw option is specified");
84
85MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount structure");
86MALLOC_DEFINE(M_STATFS, "statfs", "statfs structure");
87static uma_zone_t mount_zone;
88
89/* List of mounted filesystems. */
90struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
91
92/* For any iteration/modification of mountlist */
93struct mtx mountlist_mtx;
94MTX_SYSINIT(mountlist, &mountlist_mtx, "mountlist", MTX_DEF);
95
96/*
97 * Global opts, taken by all filesystems
98 */
99static const char *global_opts[] = {
100	"errmsg",
101	"fstype",
102	"fspath",
103	"ro",
104	"rw",
105	"nosuid",
106	"noexec",
107	NULL
108};
109
110static int
111mount_init(void *mem, int size, int flags)
112{
113	struct mount *mp;
114
115	mp = (struct mount *)mem;
116	mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
117	lockinit(&mp->mnt_explock, PVFS, "explock", 0, 0);
118	return (0);
119}
120
121static void
122mount_fini(void *mem, int size)
123{
124	struct mount *mp;
125
126	mp = (struct mount *)mem;
127	lockdestroy(&mp->mnt_explock);
128	mtx_destroy(&mp->mnt_mtx);
129}
130
131static void
132vfs_mount_init(void *dummy __unused)
133{
134
135	mount_zone = uma_zcreate("Mountpoints", sizeof(struct mount), NULL,
136	    NULL, mount_init, mount_fini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
137}
138SYSINIT(vfs_mount, SI_SUB_VFS, SI_ORDER_ANY, vfs_mount_init, NULL);
139
140/*
141 * ---------------------------------------------------------------------
142 * Functions for building and sanitizing the mount options
143 */
144
145/* Remove one mount option. */
146static void
147vfs_freeopt(struct vfsoptlist *opts, struct vfsopt *opt)
148{
149
150	TAILQ_REMOVE(opts, opt, link);
151	free(opt->name, M_MOUNT);
152	if (opt->value != NULL)
153		free(opt->value, M_MOUNT);
154	free(opt, M_MOUNT);
155}
156
157/* Release all resources related to the mount options. */
158void
159vfs_freeopts(struct vfsoptlist *opts)
160{
161	struct vfsopt *opt;
162
163	while (!TAILQ_EMPTY(opts)) {
164		opt = TAILQ_FIRST(opts);
165		vfs_freeopt(opts, opt);
166	}
167	free(opts, M_MOUNT);
168}
169
170void
171vfs_deleteopt(struct vfsoptlist *opts, const char *name)
172{
173	struct vfsopt *opt, *temp;
174
175	if (opts == NULL)
176		return;
177	TAILQ_FOREACH_SAFE(opt, opts, link, temp)  {
178		if (strcmp(opt->name, name) == 0)
179			vfs_freeopt(opts, opt);
180	}
181}
182
183static int
184vfs_isopt_ro(const char *opt)
185{
186
187	if (strcmp(opt, "ro") == 0 || strcmp(opt, "rdonly") == 0 ||
188	    strcmp(opt, "norw") == 0)
189		return (1);
190	return (0);
191}
192
193static int
194vfs_isopt_rw(const char *opt)
195{
196
197	if (strcmp(opt, "rw") == 0 || strcmp(opt, "noro") == 0)
198		return (1);
199	return (0);
200}
201
202/*
203 * Check if options are equal (with or without the "no" prefix).
204 */
205static int
206vfs_equalopts(const char *opt1, const char *opt2)
207{
208	char *p;
209
210	/* "opt" vs. "opt" or "noopt" vs. "noopt" */
211	if (strcmp(opt1, opt2) == 0)
212		return (1);
213	/* "noopt" vs. "opt" */
214	if (strncmp(opt1, "no", 2) == 0 && strcmp(opt1 + 2, opt2) == 0)
215		return (1);
216	/* "opt" vs. "noopt" */
217	if (strncmp(opt2, "no", 2) == 0 && strcmp(opt1, opt2 + 2) == 0)
218		return (1);
219	while ((p = strchr(opt1, '.')) != NULL &&
220	    !strncmp(opt1, opt2, ++p - opt1)) {
221		opt2 += p - opt1;
222		opt1 = p;
223		/* "foo.noopt" vs. "foo.opt" */
224		if (strncmp(opt1, "no", 2) == 0 && strcmp(opt1 + 2, opt2) == 0)
225			return (1);
226		/* "foo.opt" vs. "foo.noopt" */
227		if (strncmp(opt2, "no", 2) == 0 && strcmp(opt1, opt2 + 2) == 0)
228			return (1);
229	}
230	/* "ro" / "rdonly" / "norw" / "rw" / "noro" */
231	if ((vfs_isopt_ro(opt1) || vfs_isopt_rw(opt1)) &&
232	    (vfs_isopt_ro(opt2) || vfs_isopt_rw(opt2)))
233		return (1);
234	return (0);
235}
236
237/*
238 * If a mount option is specified several times,
239 * (with or without the "no" prefix) only keep
240 * the last occurrence of it.
241 */
242static void
243vfs_sanitizeopts(struct vfsoptlist *opts)
244{
245	struct vfsopt *opt, *opt2, *tmp;
246
247	TAILQ_FOREACH_REVERSE(opt, opts, vfsoptlist, link) {
248		opt2 = TAILQ_PREV(opt, vfsoptlist, link);
249		while (opt2 != NULL) {
250			if (vfs_equalopts(opt->name, opt2->name)) {
251				tmp = TAILQ_PREV(opt2, vfsoptlist, link);
252				vfs_freeopt(opts, opt2);
253				opt2 = tmp;
254			} else {
255				opt2 = TAILQ_PREV(opt2, vfsoptlist, link);
256			}
257		}
258	}
259}
260
261/*
262 * Build a linked list of mount options from a struct uio.
263 */
264int
265vfs_buildopts(struct uio *auio, struct vfsoptlist **options)
266{
267	struct vfsoptlist *opts;
268	struct vfsopt *opt;
269	size_t memused, namelen, optlen;
270	unsigned int i, iovcnt;
271	int error;
272
273	opts = malloc(sizeof(struct vfsoptlist), M_MOUNT, M_WAITOK);
274	TAILQ_INIT(opts);
275	memused = 0;
276	iovcnt = auio->uio_iovcnt;
277	for (i = 0; i < iovcnt; i += 2) {
278		namelen = auio->uio_iov[i].iov_len;
279		optlen = auio->uio_iov[i + 1].iov_len;
280		memused += sizeof(struct vfsopt) + optlen + namelen;
281		/*
282		 * Avoid consuming too much memory, and attempts to overflow
283		 * memused.
284		 */
285		if (memused > VFS_MOUNTARG_SIZE_MAX ||
286		    optlen > VFS_MOUNTARG_SIZE_MAX ||
287		    namelen > VFS_MOUNTARG_SIZE_MAX) {
288			error = EINVAL;
289			goto bad;
290		}
291
292		opt = malloc(sizeof(struct vfsopt), M_MOUNT, M_WAITOK);
293		opt->name = malloc(namelen, M_MOUNT, M_WAITOK);
294		opt->value = NULL;
295		opt->len = 0;
296		opt->pos = i / 2;
297		opt->seen = 0;
298
299		/*
300		 * Do this early, so jumps to "bad" will free the current
301		 * option.
302		 */
303		TAILQ_INSERT_TAIL(opts, opt, link);
304
305		if (auio->uio_segflg == UIO_SYSSPACE) {
306			bcopy(auio->uio_iov[i].iov_base, opt->name, namelen);
307		} else {
308			error = copyin(auio->uio_iov[i].iov_base, opt->name,
309			    namelen);
310			if (error)
311				goto bad;
312		}
313		/* Ensure names are null-terminated strings. */
314		if (namelen == 0 || opt->name[namelen - 1] != '\0') {
315			error = EINVAL;
316			goto bad;
317		}
318		if (optlen != 0) {
319			opt->len = optlen;
320			opt->value = malloc(optlen, M_MOUNT, M_WAITOK);
321			if (auio->uio_segflg == UIO_SYSSPACE) {
322				bcopy(auio->uio_iov[i + 1].iov_base, opt->value,
323				    optlen);
324			} else {
325				error = copyin(auio->uio_iov[i + 1].iov_base,
326				    opt->value, optlen);
327				if (error)
328					goto bad;
329			}
330		}
331	}
332	vfs_sanitizeopts(opts);
333	*options = opts;
334	return (0);
335bad:
336	vfs_freeopts(opts);
337	return (error);
338}
339
340/*
341 * Merge the old mount options with the new ones passed
342 * in the MNT_UPDATE case.
343 *
344 * XXX: This function will keep a "nofoo" option in the new
345 * options.  E.g, if the option's canonical name is "foo",
346 * "nofoo" ends up in the mount point's active options.
347 */
348static void
349vfs_mergeopts(struct vfsoptlist *toopts, struct vfsoptlist *oldopts)
350{
351	struct vfsopt *opt, *new;
352
353	TAILQ_FOREACH(opt, oldopts, link) {
354		new = malloc(sizeof(struct vfsopt), M_MOUNT, M_WAITOK);
355		new->name = strdup(opt->name, M_MOUNT);
356		if (opt->len != 0) {
357			new->value = malloc(opt->len, M_MOUNT, M_WAITOK);
358			bcopy(opt->value, new->value, opt->len);
359		} else
360			new->value = NULL;
361		new->len = opt->len;
362		new->seen = opt->seen;
363		TAILQ_INSERT_HEAD(toopts, new, link);
364	}
365	vfs_sanitizeopts(toopts);
366}
367
368/*
369 * Mount a filesystem.
370 */
371#ifndef _SYS_SYSPROTO_H_
372struct nmount_args {
373	struct iovec *iovp;
374	unsigned int iovcnt;
375	int flags;
376};
377#endif
378int
379sys_nmount(struct thread *td, struct nmount_args *uap)
380{
381	struct uio *auio;
382	int error;
383	u_int iovcnt;
384	uint64_t flags;
385
386	/*
387	 * Mount flags are now 64-bits. On 32-bit archtectures only
388	 * 32-bits are passed in, but from here on everything handles
389	 * 64-bit flags correctly.
390	 */
391	flags = uap->flags;
392
393	AUDIT_ARG_FFLAGS(flags);
394	CTR4(KTR_VFS, "%s: iovp %p with iovcnt %d and flags %d", __func__,
395	    uap->iovp, uap->iovcnt, flags);
396
397	/*
398	 * Filter out MNT_ROOTFS.  We do not want clients of nmount() in
399	 * userspace to set this flag, but we must filter it out if we want
400	 * MNT_UPDATE on the root file system to work.
401	 * MNT_ROOTFS should only be set by the kernel when mounting its
402	 * root file system.
403	 */
404	flags &= ~MNT_ROOTFS;
405
406	iovcnt = uap->iovcnt;
407	/*
408	 * Check that we have an even number of iovec's
409	 * and that we have at least two options.
410	 */
411	if ((iovcnt & 1) || (iovcnt < 4)) {
412		CTR2(KTR_VFS, "%s: failed for invalid iovcnt %d", __func__,
413		    uap->iovcnt);
414		return (EINVAL);
415	}
416
417	error = copyinuio(uap->iovp, iovcnt, &auio);
418	if (error) {
419		CTR2(KTR_VFS, "%s: failed for invalid uio op with %d errno",
420		    __func__, error);
421		return (error);
422	}
423	error = vfs_donmount(td, flags, auio);
424
425	free(auio, M_IOV);
426	return (error);
427}
428
429/*
430 * ---------------------------------------------------------------------
431 * Various utility functions
432 */
433
434void
435vfs_ref(struct mount *mp)
436{
437
438	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
439	MNT_ILOCK(mp);
440	MNT_REF(mp);
441	MNT_IUNLOCK(mp);
442}
443
444void
445vfs_rel(struct mount *mp)
446{
447
448	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
449	MNT_ILOCK(mp);
450	MNT_REL(mp);
451	MNT_IUNLOCK(mp);
452}
453
454/*
455 * Allocate and initialize the mount point struct.
456 */
457struct mount *
458vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp, const char *fspath,
459    struct ucred *cred)
460{
461	struct mount *mp;
462
463	mp = uma_zalloc(mount_zone, M_WAITOK);
464	bzero(&mp->mnt_startzero,
465	    __rangeof(struct mount, mnt_startzero, mnt_endzero));
466	TAILQ_INIT(&mp->mnt_nvnodelist);
467	mp->mnt_nvnodelistsize = 0;
468	TAILQ_INIT(&mp->mnt_activevnodelist);
469	mp->mnt_activevnodelistsize = 0;
470	mp->mnt_ref = 0;
471	(void) vfs_busy(mp, MBF_NOWAIT);
472	atomic_add_acq_int(&vfsp->vfc_refcount, 1);
473	mp->mnt_op = vfsp->vfc_vfsops;
474	mp->mnt_vfc = vfsp;
475	mp->mnt_stat.f_type = vfsp->vfc_typenum;
476	mp->mnt_gen++;
477	strlcpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
478	mp->mnt_vnodecovered = vp;
479	mp->mnt_cred = crdup(cred);
480	mp->mnt_stat.f_owner = cred->cr_uid;
481	strlcpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN);
482	mp->mnt_iosize_max = DFLTPHYS;
483#ifdef MAC
484	mac_mount_init(mp);
485	mac_mount_create(cred, mp);
486#endif
487	arc4rand(&mp->mnt_hashseed, sizeof mp->mnt_hashseed, 0);
488	TAILQ_INIT(&mp->mnt_uppers);
489	return (mp);
490}
491
492/*
493 * Destroy the mount struct previously allocated by vfs_mount_alloc().
494 */
495void
496vfs_mount_destroy(struct mount *mp)
497{
498
499	MNT_ILOCK(mp);
500	mp->mnt_kern_flag |= MNTK_REFEXPIRE;
501	if (mp->mnt_kern_flag & MNTK_MWAIT) {
502		mp->mnt_kern_flag &= ~MNTK_MWAIT;
503		wakeup(mp);
504	}
505	while (mp->mnt_ref)
506		msleep(mp, MNT_MTX(mp), PVFS, "mntref", 0);
507	KASSERT(mp->mnt_ref == 0,
508	    ("%s: invalid refcount in the drain path @ %s:%d", __func__,
509	    __FILE__, __LINE__));
510	if (mp->mnt_writeopcount != 0)
511		panic("vfs_mount_destroy: nonzero writeopcount");
512	if (mp->mnt_secondary_writes != 0)
513		panic("vfs_mount_destroy: nonzero secondary_writes");
514	atomic_subtract_rel_int(&mp->mnt_vfc->vfc_refcount, 1);
515	if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) {
516		struct vnode *vp;
517
518		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes)
519			vn_printf(vp, "dangling vnode ");
520		panic("unmount: dangling vnode");
521	}
522	KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers"));
523	if (mp->mnt_nvnodelistsize != 0)
524		panic("vfs_mount_destroy: nonzero nvnodelistsize");
525	if (mp->mnt_activevnodelistsize != 0)
526		panic("vfs_mount_destroy: nonzero activevnodelistsize");
527	if (mp->mnt_lockref != 0)
528		panic("vfs_mount_destroy: nonzero lock refcount");
529	MNT_IUNLOCK(mp);
530	if (mp->mnt_vnodecovered != NULL)
531		vrele(mp->mnt_vnodecovered);
532#ifdef MAC
533	mac_mount_destroy(mp);
534#endif
535	if (mp->mnt_opt != NULL)
536		vfs_freeopts(mp->mnt_opt);
537	crfree(mp->mnt_cred);
538	uma_zfree(mount_zone, mp);
539}
540
541static bool
542vfs_should_downgrade_to_ro_mount(uint64_t fsflags, int error)
543{
544	/* This is an upgrade of an exisiting mount. */
545	if ((fsflags & MNT_UPDATE) != 0)
546		return (false);
547	/* This is already an R/O mount. */
548	if ((fsflags & MNT_RDONLY) != 0)
549		return (false);
550
551	switch (error) {
552	case ENODEV:	/* generic, geom, ... */
553	case EACCES:	/* cam/scsi, ... */
554	case EROFS:	/* md, mmcsd, ... */
555		/*
556		 * These errors can be returned by the storage layer to signal
557		 * that the media is read-only.  No harm in the R/O mount
558		 * attempt if the error was returned for some other reason.
559		 */
560		return (true);
561	default:
562		return (false);
563	}
564}
565
566int
567vfs_donmount(struct thread *td, uint64_t fsflags, struct uio *fsoptions)
568{
569	struct vfsoptlist *optlist;
570	struct vfsopt *opt, *tmp_opt;
571	char *fstype, *fspath, *errmsg;
572	int error, fstypelen, fspathlen, errmsg_len, errmsg_pos;
573	bool autoro;
574
575	errmsg = fspath = NULL;
576	errmsg_len = fspathlen = 0;
577	errmsg_pos = -1;
578	autoro = default_autoro;
579
580	error = vfs_buildopts(fsoptions, &optlist);
581	if (error)
582		return (error);
583
584	if (vfs_getopt(optlist, "errmsg", (void **)&errmsg, &errmsg_len) == 0)
585		errmsg_pos = vfs_getopt_pos(optlist, "errmsg");
586
587	/*
588	 * We need these two options before the others,
589	 * and they are mandatory for any filesystem.
590	 * Ensure they are NUL terminated as well.
591	 */
592	fstypelen = 0;
593	error = vfs_getopt(optlist, "fstype", (void **)&fstype, &fstypelen);
594	if (error || fstypelen <= 0 || fstype[fstypelen - 1] != '\0') {
595		error = EINVAL;
596		if (errmsg != NULL)
597			strncpy(errmsg, "Invalid fstype", errmsg_len);
598		goto bail;
599	}
600	fspathlen = 0;
601	error = vfs_getopt(optlist, "fspath", (void **)&fspath, &fspathlen);
602	if (error || fspathlen <= 0 || fspath[fspathlen - 1] != '\0') {
603		error = EINVAL;
604		if (errmsg != NULL)
605			strncpy(errmsg, "Invalid fspath", errmsg_len);
606		goto bail;
607	}
608
609	/*
610	 * We need to see if we have the "update" option
611	 * before we call vfs_domount(), since vfs_domount() has special
612	 * logic based on MNT_UPDATE.  This is very important
613	 * when we want to update the root filesystem.
614	 */
615	TAILQ_FOREACH_SAFE(opt, optlist, link, tmp_opt) {
616		if (strcmp(opt->name, "update") == 0) {
617			fsflags |= MNT_UPDATE;
618			vfs_freeopt(optlist, opt);
619		}
620		else if (strcmp(opt->name, "async") == 0)
621			fsflags |= MNT_ASYNC;
622		else if (strcmp(opt->name, "force") == 0) {
623			fsflags |= MNT_FORCE;
624			vfs_freeopt(optlist, opt);
625		}
626		else if (strcmp(opt->name, "reload") == 0) {
627			fsflags |= MNT_RELOAD;
628			vfs_freeopt(optlist, opt);
629		}
630		else if (strcmp(opt->name, "multilabel") == 0)
631			fsflags |= MNT_MULTILABEL;
632		else if (strcmp(opt->name, "noasync") == 0)
633			fsflags &= ~MNT_ASYNC;
634		else if (strcmp(opt->name, "noatime") == 0)
635			fsflags |= MNT_NOATIME;
636		else if (strcmp(opt->name, "atime") == 0) {
637			free(opt->name, M_MOUNT);
638			opt->name = strdup("nonoatime", M_MOUNT);
639		}
640		else if (strcmp(opt->name, "noclusterr") == 0)
641			fsflags |= MNT_NOCLUSTERR;
642		else if (strcmp(opt->name, "clusterr") == 0) {
643			free(opt->name, M_MOUNT);
644			opt->name = strdup("nonoclusterr", M_MOUNT);
645		}
646		else if (strcmp(opt->name, "noclusterw") == 0)
647			fsflags |= MNT_NOCLUSTERW;
648		else if (strcmp(opt->name, "clusterw") == 0) {
649			free(opt->name, M_MOUNT);
650			opt->name = strdup("nonoclusterw", M_MOUNT);
651		}
652		else if (strcmp(opt->name, "noexec") == 0)
653			fsflags |= MNT_NOEXEC;
654		else if (strcmp(opt->name, "exec") == 0) {
655			free(opt->name, M_MOUNT);
656			opt->name = strdup("nonoexec", M_MOUNT);
657		}
658		else if (strcmp(opt->name, "nosuid") == 0)
659			fsflags |= MNT_NOSUID;
660		else if (strcmp(opt->name, "suid") == 0) {
661			free(opt->name, M_MOUNT);
662			opt->name = strdup("nonosuid", M_MOUNT);
663		}
664		else if (strcmp(opt->name, "nosymfollow") == 0)
665			fsflags |= MNT_NOSYMFOLLOW;
666		else if (strcmp(opt->name, "symfollow") == 0) {
667			free(opt->name, M_MOUNT);
668			opt->name = strdup("nonosymfollow", M_MOUNT);
669		}
670		else if (strcmp(opt->name, "noro") == 0) {
671			fsflags &= ~MNT_RDONLY;
672			autoro = false;
673		}
674		else if (strcmp(opt->name, "rw") == 0) {
675			fsflags &= ~MNT_RDONLY;
676			autoro = false;
677		}
678		else if (strcmp(opt->name, "ro") == 0) {
679			fsflags |= MNT_RDONLY;
680			autoro = false;
681		}
682		else if (strcmp(opt->name, "rdonly") == 0) {
683			free(opt->name, M_MOUNT);
684			opt->name = strdup("ro", M_MOUNT);
685			fsflags |= MNT_RDONLY;
686			autoro = false;
687		}
688		else if (strcmp(opt->name, "autoro") == 0) {
689			vfs_freeopt(optlist, opt);
690			autoro = true;
691		}
692		else if (strcmp(opt->name, "suiddir") == 0)
693			fsflags |= MNT_SUIDDIR;
694		else if (strcmp(opt->name, "sync") == 0)
695			fsflags |= MNT_SYNCHRONOUS;
696		else if (strcmp(opt->name, "union") == 0)
697			fsflags |= MNT_UNION;
698		else if (strcmp(opt->name, "automounted") == 0) {
699			fsflags |= MNT_AUTOMOUNTED;
700			vfs_freeopt(optlist, opt);
701		}
702	}
703
704	/*
705	 * Be ultra-paranoid about making sure the type and fspath
706	 * variables will fit in our mp buffers, including the
707	 * terminating NUL.
708	 */
709	if (fstypelen > MFSNAMELEN || fspathlen > MNAMELEN) {
710		error = ENAMETOOLONG;
711		goto bail;
712	}
713
714	error = vfs_domount(td, fstype, fspath, fsflags, &optlist);
715
716	/*
717	 * See if we can mount in the read-only mode if the error code suggests
718	 * that it could be possible and the mount options allow for that.
719	 * Never try it if "[no]{ro|rw}" has been explicitly requested and not
720	 * overridden by "autoro".
721	 */
722	if (autoro && vfs_should_downgrade_to_ro_mount(fsflags, error)) {
723		printf("%s: R/W mount failed, possibly R/O media,"
724		    " trying R/O mount\n", __func__);
725		fsflags |= MNT_RDONLY;
726		error = vfs_domount(td, fstype, fspath, fsflags, &optlist);
727	}
728bail:
729	/* copyout the errmsg */
730	if (errmsg_pos != -1 && ((2 * errmsg_pos + 1) < fsoptions->uio_iovcnt)
731	    && errmsg_len > 0 && errmsg != NULL) {
732		if (fsoptions->uio_segflg == UIO_SYSSPACE) {
733			bcopy(errmsg,
734			    fsoptions->uio_iov[2 * errmsg_pos + 1].iov_base,
735			    fsoptions->uio_iov[2 * errmsg_pos + 1].iov_len);
736		} else {
737			copyout(errmsg,
738			    fsoptions->uio_iov[2 * errmsg_pos + 1].iov_base,
739			    fsoptions->uio_iov[2 * errmsg_pos + 1].iov_len);
740		}
741	}
742
743	if (optlist != NULL)
744		vfs_freeopts(optlist);
745	return (error);
746}
747
748/*
749 * Old mount API.
750 */
751#ifndef _SYS_SYSPROTO_H_
752struct mount_args {
753	char	*type;
754	char	*path;
755	int	flags;
756	caddr_t	data;
757};
758#endif
759/* ARGSUSED */
760int
761sys_mount(struct thread *td, struct mount_args *uap)
762{
763	char *fstype;
764	struct vfsconf *vfsp = NULL;
765	struct mntarg *ma = NULL;
766	uint64_t flags;
767	int error;
768
769	/*
770	 * Mount flags are now 64-bits. On 32-bit architectures only
771	 * 32-bits are passed in, but from here on everything handles
772	 * 64-bit flags correctly.
773	 */
774	flags = uap->flags;
775
776	AUDIT_ARG_FFLAGS(flags);
777
778	/*
779	 * Filter out MNT_ROOTFS.  We do not want clients of mount() in
780	 * userspace to set this flag, but we must filter it out if we want
781	 * MNT_UPDATE on the root file system to work.
782	 * MNT_ROOTFS should only be set by the kernel when mounting its
783	 * root file system.
784	 */
785	flags &= ~MNT_ROOTFS;
786
787	fstype = malloc(MFSNAMELEN, M_TEMP, M_WAITOK);
788	error = copyinstr(uap->type, fstype, MFSNAMELEN, NULL);
789	if (error) {
790		free(fstype, M_TEMP);
791		return (error);
792	}
793
794	AUDIT_ARG_TEXT(fstype);
795	vfsp = vfs_byname_kld(fstype, td, &error);
796	free(fstype, M_TEMP);
797	if (vfsp == NULL)
798		return (ENOENT);
799	if (vfsp->vfc_vfsops->vfs_cmount == NULL)
800		return (EOPNOTSUPP);
801
802	ma = mount_argsu(ma, "fstype", uap->type, MFSNAMELEN);
803	ma = mount_argsu(ma, "fspath", uap->path, MNAMELEN);
804	ma = mount_argb(ma, flags & MNT_RDONLY, "noro");
805	ma = mount_argb(ma, !(flags & MNT_NOSUID), "nosuid");
806	ma = mount_argb(ma, !(flags & MNT_NOEXEC), "noexec");
807
808	error = vfsp->vfc_vfsops->vfs_cmount(ma, uap->data, flags);
809	return (error);
810}
811
812/*
813 * vfs_domount_first(): first file system mount (not update)
814 */
815static int
816vfs_domount_first(
817	struct thread *td,		/* Calling thread. */
818	struct vfsconf *vfsp,		/* File system type. */
819	char *fspath,			/* Mount path. */
820	struct vnode *vp,		/* Vnode to be covered. */
821	uint64_t fsflags,		/* Flags common to all filesystems. */
822	struct vfsoptlist **optlist	/* Options local to the filesystem. */
823	)
824{
825	struct vattr va;
826	struct mount *mp;
827	struct vnode *newdp;
828	int error, error1;
829
830	ASSERT_VOP_ELOCKED(vp, __func__);
831	KASSERT((fsflags & MNT_UPDATE) == 0, ("MNT_UPDATE shouldn't be here"));
832
833	if (vp == td->td_ucred->cr_prison->pr_root) {
834		vput(vp);
835		return (EPERM);
836	}
837
838	/*
839	 * If the user is not root, ensure that they own the directory
840	 * onto which we are attempting to mount.
841	 */
842	error = VOP_GETATTR(vp, &va, td->td_ucred);
843	if (error == 0 && va.va_uid != td->td_ucred->cr_uid)
844		error = priv_check_cred(td->td_ucred, PRIV_VFS_ADMIN, 0);
845	if (error == 0)
846		error = vinvalbuf(vp, V_SAVE, 0, 0);
847	if (error == 0 && vp->v_type != VDIR)
848		error = ENOTDIR;
849	if (error == 0) {
850		VI_LOCK(vp);
851		if ((vp->v_iflag & VI_MOUNT) == 0 && vp->v_mountedhere == NULL)
852			vp->v_iflag |= VI_MOUNT;
853		else
854			error = EBUSY;
855		VI_UNLOCK(vp);
856	}
857	if (error != 0) {
858		vput(vp);
859		return (error);
860	}
861	VOP_UNLOCK(vp, 0);
862
863	/* Allocate and initialize the filesystem. */
864	mp = vfs_mount_alloc(vp, vfsp, fspath, td->td_ucred);
865	/* XXXMAC: pass to vfs_mount_alloc? */
866	mp->mnt_optnew = *optlist;
867	/* Set the mount level flags. */
868	mp->mnt_flag = (fsflags & (MNT_UPDATEMASK | MNT_ROOTFS | MNT_RDONLY));
869
870	/*
871	 * Mount the filesystem.
872	 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
873	 * get.  No freeing of cn_pnbuf.
874	 */
875	error1 = 0;
876	if ((error = VFS_MOUNT(mp)) != 0 ||
877	    (error1 = VFS_STATFS(mp, &mp->mnt_stat)) != 0 ||
878	    (error1 = VFS_ROOT(mp, LK_EXCLUSIVE, &newdp)) != 0) {
879		if (error1 != 0) {
880			error = error1;
881			if ((error1 = VFS_UNMOUNT(mp, 0)) != 0)
882				printf("VFS_UNMOUNT returned %d\n", error1);
883		}
884		vfs_unbusy(mp);
885		mp->mnt_vnodecovered = NULL;
886		vfs_mount_destroy(mp);
887		VI_LOCK(vp);
888		vp->v_iflag &= ~VI_MOUNT;
889		VI_UNLOCK(vp);
890		vrele(vp);
891		return (error);
892	}
893	VOP_UNLOCK(newdp, 0);
894
895	if (mp->mnt_opt != NULL)
896		vfs_freeopts(mp->mnt_opt);
897	mp->mnt_opt = mp->mnt_optnew;
898	*optlist = NULL;
899
900	/*
901	 * Prevent external consumers of mount options from reading mnt_optnew.
902	 */
903	mp->mnt_optnew = NULL;
904
905	MNT_ILOCK(mp);
906	if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
907	    (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
908		mp->mnt_kern_flag |= MNTK_ASYNC;
909	else
910		mp->mnt_kern_flag &= ~MNTK_ASYNC;
911	MNT_IUNLOCK(mp);
912
913	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
914	cache_purge(vp);
915	VI_LOCK(vp);
916	vp->v_iflag &= ~VI_MOUNT;
917	VI_UNLOCK(vp);
918	vp->v_mountedhere = mp;
919	/* Place the new filesystem at the end of the mount list. */
920	mtx_lock(&mountlist_mtx);
921	TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
922	mtx_unlock(&mountlist_mtx);
923	vfs_event_signal(NULL, VQ_MOUNT, 0);
924	vn_lock(newdp, LK_EXCLUSIVE | LK_RETRY);
925	VOP_UNLOCK(vp, 0);
926	EVENTHANDLER_INVOKE(vfs_mounted, mp, newdp, td);
927	VOP_UNLOCK(newdp, 0);
928	mountcheckdirs(vp, newdp);
929	vrele(newdp);
930	if ((mp->mnt_flag & MNT_RDONLY) == 0)
931		vfs_allocate_syncvnode(mp);
932	vfs_unbusy(mp);
933	return (0);
934}
935
936/*
937 * vfs_domount_update(): update of mounted file system
938 */
939static int
940vfs_domount_update(
941	struct thread *td,		/* Calling thread. */
942	struct vnode *vp,		/* Mount point vnode. */
943	uint64_t fsflags,		/* Flags common to all filesystems. */
944	struct vfsoptlist **optlist	/* Options local to the filesystem. */
945	)
946{
947	struct export_args export;
948	void *bufp;
949	struct mount *mp;
950	int error, export_error, len;
951	uint64_t flag;
952
953	ASSERT_VOP_ELOCKED(vp, __func__);
954	KASSERT((fsflags & MNT_UPDATE) != 0, ("MNT_UPDATE should be here"));
955	mp = vp->v_mount;
956
957	if ((vp->v_vflag & VV_ROOT) == 0) {
958		if (vfs_copyopt(*optlist, "export", &export, sizeof(export))
959		    == 0)
960			error = EXDEV;
961		else
962			error = EINVAL;
963		vput(vp);
964		return (error);
965	}
966
967	/*
968	 * We only allow the filesystem to be reloaded if it
969	 * is currently mounted read-only.
970	 */
971	flag = mp->mnt_flag;
972	if ((fsflags & MNT_RELOAD) != 0 && (flag & MNT_RDONLY) == 0) {
973		vput(vp);
974		return (EOPNOTSUPP);	/* Needs translation */
975	}
976	/*
977	 * Only privileged root, or (if MNT_USER is set) the user that
978	 * did the original mount is permitted to update it.
979	 */
980	error = vfs_suser(mp, td);
981	if (error != 0) {
982		vput(vp);
983		return (error);
984	}
985	if (vfs_busy(mp, MBF_NOWAIT)) {
986		vput(vp);
987		return (EBUSY);
988	}
989	VI_LOCK(vp);
990	if ((vp->v_iflag & VI_MOUNT) != 0 || vp->v_mountedhere != NULL) {
991		VI_UNLOCK(vp);
992		vfs_unbusy(mp);
993		vput(vp);
994		return (EBUSY);
995	}
996	vp->v_iflag |= VI_MOUNT;
997	VI_UNLOCK(vp);
998	VOP_UNLOCK(vp, 0);
999
1000	MNT_ILOCK(mp);
1001	if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
1002		MNT_IUNLOCK(mp);
1003		error = EBUSY;
1004		goto end;
1005	}
1006	mp->mnt_flag &= ~MNT_UPDATEMASK;
1007	mp->mnt_flag |= fsflags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE |
1008	    MNT_SNAPSHOT | MNT_ROOTFS | MNT_UPDATEMASK | MNT_RDONLY);
1009	if ((mp->mnt_flag & MNT_ASYNC) == 0)
1010		mp->mnt_kern_flag &= ~MNTK_ASYNC;
1011	MNT_IUNLOCK(mp);
1012	mp->mnt_optnew = *optlist;
1013	vfs_mergeopts(mp->mnt_optnew, mp->mnt_opt);
1014
1015	/*
1016	 * Mount the filesystem.
1017	 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
1018	 * get.  No freeing of cn_pnbuf.
1019	 */
1020	error = VFS_MOUNT(mp);
1021
1022	export_error = 0;
1023	/* Process the export option. */
1024	if (error == 0 && vfs_getopt(mp->mnt_optnew, "export", &bufp,
1025	    &len) == 0) {
1026		/* Assume that there is only 1 ABI for each length. */
1027		switch (len) {
1028		case (sizeof(struct oexport_args)):
1029			bzero(&export, sizeof(export));
1030			/* FALLTHROUGH */
1031		case (sizeof(export)):
1032			bcopy(bufp, &export, len);
1033			export_error = vfs_export(mp, &export);
1034			break;
1035		default:
1036			export_error = EINVAL;
1037			break;
1038		}
1039	}
1040
1041	MNT_ILOCK(mp);
1042	if (error == 0) {
1043		mp->mnt_flag &=	~(MNT_UPDATE | MNT_RELOAD | MNT_FORCE |
1044		    MNT_SNAPSHOT);
1045	} else {
1046		/*
1047		 * If we fail, restore old mount flags. MNT_QUOTA is special,
1048		 * because it is not part of MNT_UPDATEMASK, but it could have
1049		 * changed in the meantime if quotactl(2) was called.
1050		 * All in all we want current value of MNT_QUOTA, not the old
1051		 * one.
1052		 */
1053		mp->mnt_flag = (mp->mnt_flag & MNT_QUOTA) | (flag & ~MNT_QUOTA);
1054	}
1055	if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
1056	    (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
1057		mp->mnt_kern_flag |= MNTK_ASYNC;
1058	else
1059		mp->mnt_kern_flag &= ~MNTK_ASYNC;
1060	MNT_IUNLOCK(mp);
1061
1062	if (error != 0)
1063		goto end;
1064
1065	if (mp->mnt_opt != NULL)
1066		vfs_freeopts(mp->mnt_opt);
1067	mp->mnt_opt = mp->mnt_optnew;
1068	*optlist = NULL;
1069	(void)VFS_STATFS(mp, &mp->mnt_stat);
1070	/*
1071	 * Prevent external consumers of mount options from reading
1072	 * mnt_optnew.
1073	 */
1074	mp->mnt_optnew = NULL;
1075
1076	if ((mp->mnt_flag & MNT_RDONLY) == 0)
1077		vfs_allocate_syncvnode(mp);
1078	else
1079		vfs_deallocate_syncvnode(mp);
1080end:
1081	vfs_unbusy(mp);
1082	VI_LOCK(vp);
1083	vp->v_iflag &= ~VI_MOUNT;
1084	VI_UNLOCK(vp);
1085	vrele(vp);
1086	return (error != 0 ? error : export_error);
1087}
1088
1089/*
1090 * vfs_domount(): actually attempt a filesystem mount.
1091 */
1092static int
1093vfs_domount(
1094	struct thread *td,		/* Calling thread. */
1095	const char *fstype,		/* Filesystem type. */
1096	char *fspath,			/* Mount path. */
1097	uint64_t fsflags,		/* Flags common to all filesystems. */
1098	struct vfsoptlist **optlist	/* Options local to the filesystem. */
1099	)
1100{
1101	struct vfsconf *vfsp;
1102	struct nameidata nd;
1103	struct vnode *vp;
1104	char *pathbuf;
1105	int error;
1106
1107	/*
1108	 * Be ultra-paranoid about making sure the type and fspath
1109	 * variables will fit in our mp buffers, including the
1110	 * terminating NUL.
1111	 */
1112	if (strlen(fstype) >= MFSNAMELEN || strlen(fspath) >= MNAMELEN)
1113		return (ENAMETOOLONG);
1114
1115	if (jailed(td->td_ucred) || usermount == 0) {
1116		if ((error = priv_check(td, PRIV_VFS_MOUNT)) != 0)
1117			return (error);
1118	}
1119
1120	/*
1121	 * Do not allow NFS export or MNT_SUIDDIR by unprivileged users.
1122	 */
1123	if (fsflags & MNT_EXPORTED) {
1124		error = priv_check(td, PRIV_VFS_MOUNT_EXPORTED);
1125		if (error)
1126			return (error);
1127	}
1128	if (fsflags & MNT_SUIDDIR) {
1129		error = priv_check(td, PRIV_VFS_MOUNT_SUIDDIR);
1130		if (error)
1131			return (error);
1132	}
1133	/*
1134	 * Silently enforce MNT_NOSUID and MNT_USER for unprivileged users.
1135	 */
1136	if ((fsflags & (MNT_NOSUID | MNT_USER)) != (MNT_NOSUID | MNT_USER)) {
1137		if (priv_check(td, PRIV_VFS_MOUNT_NONUSER) != 0)
1138			fsflags |= MNT_NOSUID | MNT_USER;
1139	}
1140
1141	/* Load KLDs before we lock the covered vnode to avoid reversals. */
1142	vfsp = NULL;
1143	if ((fsflags & MNT_UPDATE) == 0) {
1144		/* Don't try to load KLDs if we're mounting the root. */
1145		if (fsflags & MNT_ROOTFS)
1146			vfsp = vfs_byname(fstype);
1147		else
1148			vfsp = vfs_byname_kld(fstype, td, &error);
1149		if (vfsp == NULL)
1150			return (ENODEV);
1151		if (jailed(td->td_ucred) && !(vfsp->vfc_flags & VFCF_JAIL))
1152			return (EPERM);
1153	}
1154
1155	/*
1156	 * Get vnode to be covered or mount point's vnode in case of MNT_UPDATE.
1157	 */
1158	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
1159	    UIO_SYSSPACE, fspath, td);
1160	error = namei(&nd);
1161	if (error != 0)
1162		return (error);
1163	NDFREE(&nd, NDF_ONLY_PNBUF);
1164	vp = nd.ni_vp;
1165	if ((fsflags & MNT_UPDATE) == 0) {
1166		pathbuf = malloc(MNAMELEN, M_TEMP, M_WAITOK);
1167		strcpy(pathbuf, fspath);
1168		error = vn_path_to_global_path(td, vp, pathbuf, MNAMELEN);
1169		/* debug.disablefullpath == 1 results in ENODEV */
1170		if (error == 0 || error == ENODEV) {
1171			error = vfs_domount_first(td, vfsp, pathbuf, vp,
1172			    fsflags, optlist);
1173		}
1174		free(pathbuf, M_TEMP);
1175	} else
1176		error = vfs_domount_update(td, vp, fsflags, optlist);
1177
1178	return (error);
1179}
1180
1181/*
1182 * Unmount a filesystem.
1183 *
1184 * Note: unmount takes a path to the vnode mounted on as argument, not
1185 * special file (as before).
1186 */
1187#ifndef _SYS_SYSPROTO_H_
1188struct unmount_args {
1189	char	*path;
1190	int	flags;
1191};
1192#endif
1193/* ARGSUSED */
1194int
1195sys_unmount(struct thread *td, struct unmount_args *uap)
1196{
1197	struct nameidata nd;
1198	struct mount *mp;
1199	char *pathbuf;
1200	int error, id0, id1;
1201
1202	AUDIT_ARG_VALUE(uap->flags);
1203	if (jailed(td->td_ucred) || usermount == 0) {
1204		error = priv_check(td, PRIV_VFS_UNMOUNT);
1205		if (error)
1206			return (error);
1207	}
1208
1209	pathbuf = malloc(MNAMELEN, M_TEMP, M_WAITOK);
1210	error = copyinstr(uap->path, pathbuf, MNAMELEN, NULL);
1211	if (error) {
1212		free(pathbuf, M_TEMP);
1213		return (error);
1214	}
1215	if (uap->flags & MNT_BYFSID) {
1216		AUDIT_ARG_TEXT(pathbuf);
1217		/* Decode the filesystem ID. */
1218		if (sscanf(pathbuf, "FSID:%d:%d", &id0, &id1) != 2) {
1219			free(pathbuf, M_TEMP);
1220			return (EINVAL);
1221		}
1222
1223		mtx_lock(&mountlist_mtx);
1224		TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) {
1225			if (mp->mnt_stat.f_fsid.val[0] == id0 &&
1226			    mp->mnt_stat.f_fsid.val[1] == id1) {
1227				vfs_ref(mp);
1228				break;
1229			}
1230		}
1231		mtx_unlock(&mountlist_mtx);
1232	} else {
1233		/*
1234		 * Try to find global path for path argument.
1235		 */
1236		NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
1237		    UIO_SYSSPACE, pathbuf, td);
1238		if (namei(&nd) == 0) {
1239			NDFREE(&nd, NDF_ONLY_PNBUF);
1240			error = vn_path_to_global_path(td, nd.ni_vp, pathbuf,
1241			    MNAMELEN);
1242			if (error == 0 || error == ENODEV)
1243				vput(nd.ni_vp);
1244		}
1245		mtx_lock(&mountlist_mtx);
1246		TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) {
1247			if (strcmp(mp->mnt_stat.f_mntonname, pathbuf) == 0) {
1248				vfs_ref(mp);
1249				break;
1250			}
1251		}
1252		mtx_unlock(&mountlist_mtx);
1253	}
1254	free(pathbuf, M_TEMP);
1255	if (mp == NULL) {
1256		/*
1257		 * Previously we returned ENOENT for a nonexistent path and
1258		 * EINVAL for a non-mountpoint.  We cannot tell these apart
1259		 * now, so in the !MNT_BYFSID case return the more likely
1260		 * EINVAL for compatibility.
1261		 */
1262		return ((uap->flags & MNT_BYFSID) ? ENOENT : EINVAL);
1263	}
1264
1265	/*
1266	 * Don't allow unmounting the root filesystem.
1267	 */
1268	if (mp->mnt_flag & MNT_ROOTFS) {
1269		vfs_rel(mp);
1270		return (EINVAL);
1271	}
1272	error = dounmount(mp, uap->flags, td);
1273	return (error);
1274}
1275
1276/*
1277 * Return error if any of the vnodes, ignoring the root vnode
1278 * and the syncer vnode, have non-zero usecount.
1279 *
1280 * This function is purely advisory - it can return false positives
1281 * and negatives.
1282 */
1283static int
1284vfs_check_usecounts(struct mount *mp)
1285{
1286	struct vnode *vp, *mvp;
1287
1288	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1289		if ((vp->v_vflag & VV_ROOT) == 0 && vp->v_type != VNON &&
1290		    vp->v_usecount != 0) {
1291			VI_UNLOCK(vp);
1292			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1293			return (EBUSY);
1294		}
1295		VI_UNLOCK(vp);
1296	}
1297
1298	return (0);
1299}
1300
1301static void
1302dounmount_cleanup(struct mount *mp, struct vnode *coveredvp, int mntkflags)
1303{
1304
1305	mtx_assert(MNT_MTX(mp), MA_OWNED);
1306	mp->mnt_kern_flag &= ~mntkflags;
1307	if ((mp->mnt_kern_flag & MNTK_MWAIT) != 0) {
1308		mp->mnt_kern_flag &= ~MNTK_MWAIT;
1309		wakeup(mp);
1310	}
1311	MNT_IUNLOCK(mp);
1312	if (coveredvp != NULL) {
1313		VOP_UNLOCK(coveredvp, 0);
1314		vdrop(coveredvp);
1315	}
1316	vn_finished_write(mp);
1317}
1318
1319/*
1320 * Do the actual filesystem unmount.
1321 */
1322int
1323dounmount(struct mount *mp, int flags, struct thread *td)
1324{
1325	struct vnode *coveredvp, *fsrootvp;
1326	int error;
1327	uint64_t async_flag;
1328	int mnt_gen_r;
1329
1330	if ((coveredvp = mp->mnt_vnodecovered) != NULL) {
1331		mnt_gen_r = mp->mnt_gen;
1332		VI_LOCK(coveredvp);
1333		vholdl(coveredvp);
1334		vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY);
1335		/*
1336		 * Check for mp being unmounted while waiting for the
1337		 * covered vnode lock.
1338		 */
1339		if (coveredvp->v_mountedhere != mp ||
1340		    coveredvp->v_mountedhere->mnt_gen != mnt_gen_r) {
1341			VOP_UNLOCK(coveredvp, 0);
1342			vdrop(coveredvp);
1343			vfs_rel(mp);
1344			return (EBUSY);
1345		}
1346	}
1347
1348	/*
1349	 * Only privileged root, or (if MNT_USER is set) the user that did the
1350	 * original mount is permitted to unmount this filesystem.
1351	 */
1352	error = vfs_suser(mp, td);
1353	if (error != 0) {
1354		if (coveredvp != NULL) {
1355			VOP_UNLOCK(coveredvp, 0);
1356			vdrop(coveredvp);
1357		}
1358		vfs_rel(mp);
1359		return (error);
1360	}
1361
1362	vn_start_write(NULL, &mp, V_WAIT | V_MNTREF);
1363	MNT_ILOCK(mp);
1364	if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 ||
1365	    (mp->mnt_flag & MNT_UPDATE) != 0 ||
1366	    !TAILQ_EMPTY(&mp->mnt_uppers)) {
1367		dounmount_cleanup(mp, coveredvp, 0);
1368		return (EBUSY);
1369	}
1370	mp->mnt_kern_flag |= MNTK_UNMOUNT | MNTK_NOINSMNTQ;
1371	if (flags & MNT_NONBUSY) {
1372		MNT_IUNLOCK(mp);
1373		error = vfs_check_usecounts(mp);
1374		MNT_ILOCK(mp);
1375		if (error != 0) {
1376			dounmount_cleanup(mp, coveredvp, MNTK_UNMOUNT |
1377			    MNTK_NOINSMNTQ);
1378			return (error);
1379		}
1380	}
1381	/* Allow filesystems to detect that a forced unmount is in progress. */
1382	if (flags & MNT_FORCE) {
1383		mp->mnt_kern_flag |= MNTK_UNMOUNTF;
1384		MNT_IUNLOCK(mp);
1385		/*
1386		 * Must be done after setting MNTK_UNMOUNTF and before
1387		 * waiting for mnt_lockref to become 0.
1388		 */
1389		VFS_PURGE(mp);
1390		MNT_ILOCK(mp);
1391	}
1392	error = 0;
1393	if (mp->mnt_lockref) {
1394		mp->mnt_kern_flag |= MNTK_DRAINING;
1395		error = msleep(&mp->mnt_lockref, MNT_MTX(mp), PVFS,
1396		    "mount drain", 0);
1397	}
1398	MNT_IUNLOCK(mp);
1399	KASSERT(mp->mnt_lockref == 0,
1400	    ("%s: invalid lock refcount in the drain path @ %s:%d",
1401	    __func__, __FILE__, __LINE__));
1402	KASSERT(error == 0,
1403	    ("%s: invalid return value for msleep in the drain path @ %s:%d",
1404	    __func__, __FILE__, __LINE__));
1405
1406	if (mp->mnt_flag & MNT_EXPUBLIC)
1407		vfs_setpublicfs(NULL, NULL, NULL);
1408
1409	/*
1410	 * From now, we can claim that the use reference on the
1411	 * coveredvp is ours, and the ref can be released only by
1412	 * successfull unmount by us, or left for later unmount
1413	 * attempt.  The previously acquired hold reference is no
1414	 * longer needed to protect the vnode from reuse.
1415	 */
1416	if (coveredvp != NULL)
1417		vdrop(coveredvp);
1418
1419	vfs_msync(mp, MNT_WAIT);
1420	MNT_ILOCK(mp);
1421	async_flag = mp->mnt_flag & MNT_ASYNC;
1422	mp->mnt_flag &= ~MNT_ASYNC;
1423	mp->mnt_kern_flag &= ~MNTK_ASYNC;
1424	MNT_IUNLOCK(mp);
1425	cache_purgevfs(mp, false); /* remove cache entries for this file sys */
1426	vfs_deallocate_syncvnode(mp);
1427	/*
1428	 * For forced unmounts, move process cdir/rdir refs on the fs root
1429	 * vnode to the covered vnode.  For non-forced unmounts we want
1430	 * such references to cause an EBUSY error.
1431	 */
1432	if ((flags & MNT_FORCE) &&
1433	    VFS_ROOT(mp, LK_EXCLUSIVE, &fsrootvp) == 0) {
1434		if (mp->mnt_vnodecovered != NULL &&
1435		    (mp->mnt_flag & MNT_IGNORE) == 0)
1436			mountcheckdirs(fsrootvp, mp->mnt_vnodecovered);
1437		if (fsrootvp == rootvnode) {
1438			vrele(rootvnode);
1439			rootvnode = NULL;
1440		}
1441		vput(fsrootvp);
1442	}
1443	error = VFS_UNMOUNT(mp, flags);
1444	vn_finished_write(mp);
1445	/*
1446	 * If we failed to flush the dirty blocks for this mount point,
1447	 * undo all the cdir/rdir and rootvnode changes we made above.
1448	 * Unless we failed to do so because the device is reporting that
1449	 * it doesn't exist anymore.
1450	 */
1451	if (error && error != ENXIO) {
1452		if ((flags & MNT_FORCE) &&
1453		    VFS_ROOT(mp, LK_EXCLUSIVE, &fsrootvp) == 0) {
1454			if (mp->mnt_vnodecovered != NULL &&
1455			    (mp->mnt_flag & MNT_IGNORE) == 0)
1456				mountcheckdirs(mp->mnt_vnodecovered, fsrootvp);
1457			if (rootvnode == NULL) {
1458				rootvnode = fsrootvp;
1459				vref(rootvnode);
1460			}
1461			vput(fsrootvp);
1462		}
1463		MNT_ILOCK(mp);
1464		mp->mnt_kern_flag &= ~MNTK_NOINSMNTQ;
1465		if ((mp->mnt_flag & MNT_RDONLY) == 0) {
1466			MNT_IUNLOCK(mp);
1467			vfs_allocate_syncvnode(mp);
1468			MNT_ILOCK(mp);
1469		}
1470		mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
1471		mp->mnt_flag |= async_flag;
1472		if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
1473		    (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
1474			mp->mnt_kern_flag |= MNTK_ASYNC;
1475		if (mp->mnt_kern_flag & MNTK_MWAIT) {
1476			mp->mnt_kern_flag &= ~MNTK_MWAIT;
1477			wakeup(mp);
1478		}
1479		MNT_IUNLOCK(mp);
1480		if (coveredvp)
1481			VOP_UNLOCK(coveredvp, 0);
1482		return (error);
1483	}
1484	mtx_lock(&mountlist_mtx);
1485	TAILQ_REMOVE(&mountlist, mp, mnt_list);
1486	mtx_unlock(&mountlist_mtx);
1487	EVENTHANDLER_INVOKE(vfs_unmounted, mp, td);
1488	if (coveredvp != NULL) {
1489		coveredvp->v_mountedhere = NULL;
1490		VOP_UNLOCK(coveredvp, 0);
1491	}
1492	vfs_event_signal(NULL, VQ_UNMOUNT, 0);
1493	if (mp == rootdevmp)
1494		rootdevmp = NULL;
1495	vfs_mount_destroy(mp);
1496	return (0);
1497}
1498
1499/*
1500 * Report errors during filesystem mounting.
1501 */
1502void
1503vfs_mount_error(struct mount *mp, const char *fmt, ...)
1504{
1505	struct vfsoptlist *moptlist = mp->mnt_optnew;
1506	va_list ap;
1507	int error, len;
1508	char *errmsg;
1509
1510	error = vfs_getopt(moptlist, "errmsg", (void **)&errmsg, &len);
1511	if (error || errmsg == NULL || len <= 0)
1512		return;
1513
1514	va_start(ap, fmt);
1515	vsnprintf(errmsg, (size_t)len, fmt, ap);
1516	va_end(ap);
1517}
1518
1519void
1520vfs_opterror(struct vfsoptlist *opts, const char *fmt, ...)
1521{
1522	va_list ap;
1523	int error, len;
1524	char *errmsg;
1525
1526	error = vfs_getopt(opts, "errmsg", (void **)&errmsg, &len);
1527	if (error || errmsg == NULL || len <= 0)
1528		return;
1529
1530	va_start(ap, fmt);
1531	vsnprintf(errmsg, (size_t)len, fmt, ap);
1532	va_end(ap);
1533}
1534
1535/*
1536 * ---------------------------------------------------------------------
1537 * Functions for querying mount options/arguments from filesystems.
1538 */
1539
1540/*
1541 * Check that no unknown options are given
1542 */
1543int
1544vfs_filteropt(struct vfsoptlist *opts, const char **legal)
1545{
1546	struct vfsopt *opt;
1547	char errmsg[255];
1548	const char **t, *p, *q;
1549	int ret = 0;
1550
1551	TAILQ_FOREACH(opt, opts, link) {
1552		p = opt->name;
1553		q = NULL;
1554		if (p[0] == 'n' && p[1] == 'o')
1555			q = p + 2;
1556		for(t = global_opts; *t != NULL; t++) {
1557			if (strcmp(*t, p) == 0)
1558				break;
1559			if (q != NULL) {
1560				if (strcmp(*t, q) == 0)
1561					break;
1562			}
1563		}
1564		if (*t != NULL)
1565			continue;
1566		for(t = legal; *t != NULL; t++) {
1567			if (strcmp(*t, p) == 0)
1568				break;
1569			if (q != NULL) {
1570				if (strcmp(*t, q) == 0)
1571					break;
1572			}
1573		}
1574		if (*t != NULL)
1575			continue;
1576		snprintf(errmsg, sizeof(errmsg),
1577		    "mount option <%s> is unknown", p);
1578		ret = EINVAL;
1579	}
1580	if (ret != 0) {
1581		TAILQ_FOREACH(opt, opts, link) {
1582			if (strcmp(opt->name, "errmsg") == 0) {
1583				strncpy((char *)opt->value, errmsg, opt->len);
1584				break;
1585			}
1586		}
1587		if (opt == NULL)
1588			printf("%s\n", errmsg);
1589	}
1590	return (ret);
1591}
1592
1593/*
1594 * Get a mount option by its name.
1595 *
1596 * Return 0 if the option was found, ENOENT otherwise.
1597 * If len is non-NULL it will be filled with the length
1598 * of the option. If buf is non-NULL, it will be filled
1599 * with the address of the option.
1600 */
1601int
1602vfs_getopt(struct vfsoptlist *opts, const char *name, void **buf, int *len)
1603{
1604	struct vfsopt *opt;
1605
1606	KASSERT(opts != NULL, ("vfs_getopt: caller passed 'opts' as NULL"));
1607
1608	TAILQ_FOREACH(opt, opts, link) {
1609		if (strcmp(name, opt->name) == 0) {
1610			opt->seen = 1;
1611			if (len != NULL)
1612				*len = opt->len;
1613			if (buf != NULL)
1614				*buf = opt->value;
1615			return (0);
1616		}
1617	}
1618	return (ENOENT);
1619}
1620
1621int
1622vfs_getopt_pos(struct vfsoptlist *opts, const char *name)
1623{
1624	struct vfsopt *opt;
1625
1626	if (opts == NULL)
1627		return (-1);
1628
1629	TAILQ_FOREACH(opt, opts, link) {
1630		if (strcmp(name, opt->name) == 0) {
1631			opt->seen = 1;
1632			return (opt->pos);
1633		}
1634	}
1635	return (-1);
1636}
1637
1638int
1639vfs_getopt_size(struct vfsoptlist *opts, const char *name, off_t *value)
1640{
1641	char *opt_value, *vtp;
1642	quad_t iv;
1643	int error, opt_len;
1644
1645	error = vfs_getopt(opts, name, (void **)&opt_value, &opt_len);
1646	if (error != 0)
1647		return (error);
1648	if (opt_len == 0 || opt_value == NULL)
1649		return (EINVAL);
1650	if (opt_value[0] == '\0' || opt_value[opt_len - 1] != '\0')
1651		return (EINVAL);
1652	iv = strtoq(opt_value, &vtp, 0);
1653	if (vtp == opt_value || (vtp[0] != '\0' && vtp[1] != '\0'))
1654		return (EINVAL);
1655	if (iv < 0)
1656		return (EINVAL);
1657	switch (vtp[0]) {
1658	case 't':
1659	case 'T':
1660		iv *= 1024;
1661	case 'g':
1662	case 'G':
1663		iv *= 1024;
1664	case 'm':
1665	case 'M':
1666		iv *= 1024;
1667	case 'k':
1668	case 'K':
1669		iv *= 1024;
1670	case '\0':
1671		break;
1672	default:
1673		return (EINVAL);
1674	}
1675	*value = iv;
1676
1677	return (0);
1678}
1679
1680char *
1681vfs_getopts(struct vfsoptlist *opts, const char *name, int *error)
1682{
1683	struct vfsopt *opt;
1684
1685	*error = 0;
1686	TAILQ_FOREACH(opt, opts, link) {
1687		if (strcmp(name, opt->name) != 0)
1688			continue;
1689		opt->seen = 1;
1690		if (opt->len == 0 ||
1691		    ((char *)opt->value)[opt->len - 1] != '\0') {
1692			*error = EINVAL;
1693			return (NULL);
1694		}
1695		return (opt->value);
1696	}
1697	*error = ENOENT;
1698	return (NULL);
1699}
1700
1701int
1702vfs_flagopt(struct vfsoptlist *opts, const char *name, uint64_t *w,
1703	uint64_t val)
1704{
1705	struct vfsopt *opt;
1706
1707	TAILQ_FOREACH(opt, opts, link) {
1708		if (strcmp(name, opt->name) == 0) {
1709			opt->seen = 1;
1710			if (w != NULL)
1711				*w |= val;
1712			return (1);
1713		}
1714	}
1715	if (w != NULL)
1716		*w &= ~val;
1717	return (0);
1718}
1719
1720int
1721vfs_scanopt(struct vfsoptlist *opts, const char *name, const char *fmt, ...)
1722{
1723	va_list ap;
1724	struct vfsopt *opt;
1725	int ret;
1726
1727	KASSERT(opts != NULL, ("vfs_getopt: caller passed 'opts' as NULL"));
1728
1729	TAILQ_FOREACH(opt, opts, link) {
1730		if (strcmp(name, opt->name) != 0)
1731			continue;
1732		opt->seen = 1;
1733		if (opt->len == 0 || opt->value == NULL)
1734			return (0);
1735		if (((char *)opt->value)[opt->len - 1] != '\0')
1736			return (0);
1737		va_start(ap, fmt);
1738		ret = vsscanf(opt->value, fmt, ap);
1739		va_end(ap);
1740		return (ret);
1741	}
1742	return (0);
1743}
1744
1745int
1746vfs_setopt(struct vfsoptlist *opts, const char *name, void *value, int len)
1747{
1748	struct vfsopt *opt;
1749
1750	TAILQ_FOREACH(opt, opts, link) {
1751		if (strcmp(name, opt->name) != 0)
1752			continue;
1753		opt->seen = 1;
1754		if (opt->value == NULL)
1755			opt->len = len;
1756		else {
1757			if (opt->len != len)
1758				return (EINVAL);
1759			bcopy(value, opt->value, len);
1760		}
1761		return (0);
1762	}
1763	return (ENOENT);
1764}
1765
1766int
1767vfs_setopt_part(struct vfsoptlist *opts, const char *name, void *value, int len)
1768{
1769	struct vfsopt *opt;
1770
1771	TAILQ_FOREACH(opt, opts, link) {
1772		if (strcmp(name, opt->name) != 0)
1773			continue;
1774		opt->seen = 1;
1775		if (opt->value == NULL)
1776			opt->len = len;
1777		else {
1778			if (opt->len < len)
1779				return (EINVAL);
1780			opt->len = len;
1781			bcopy(value, opt->value, len);
1782		}
1783		return (0);
1784	}
1785	return (ENOENT);
1786}
1787
1788int
1789vfs_setopts(struct vfsoptlist *opts, const char *name, const char *value)
1790{
1791	struct vfsopt *opt;
1792
1793	TAILQ_FOREACH(opt, opts, link) {
1794		if (strcmp(name, opt->name) != 0)
1795			continue;
1796		opt->seen = 1;
1797		if (opt->value == NULL)
1798			opt->len = strlen(value) + 1;
1799		else if (strlcpy(opt->value, value, opt->len) >= opt->len)
1800			return (EINVAL);
1801		return (0);
1802	}
1803	return (ENOENT);
1804}
1805
1806/*
1807 * Find and copy a mount option.
1808 *
1809 * The size of the buffer has to be specified
1810 * in len, if it is not the same length as the
1811 * mount option, EINVAL is returned.
1812 * Returns ENOENT if the option is not found.
1813 */
1814int
1815vfs_copyopt(struct vfsoptlist *opts, const char *name, void *dest, int len)
1816{
1817	struct vfsopt *opt;
1818
1819	KASSERT(opts != NULL, ("vfs_copyopt: caller passed 'opts' as NULL"));
1820
1821	TAILQ_FOREACH(opt, opts, link) {
1822		if (strcmp(name, opt->name) == 0) {
1823			opt->seen = 1;
1824			if (len != opt->len)
1825				return (EINVAL);
1826			bcopy(opt->value, dest, opt->len);
1827			return (0);
1828		}
1829	}
1830	return (ENOENT);
1831}
1832
1833int
1834__vfs_statfs(struct mount *mp, struct statfs *sbp)
1835{
1836	int error;
1837
1838	error = mp->mnt_op->vfs_statfs(mp, &mp->mnt_stat);
1839	if (sbp != &mp->mnt_stat)
1840		*sbp = mp->mnt_stat;
1841	return (error);
1842}
1843
1844void
1845vfs_mountedfrom(struct mount *mp, const char *from)
1846{
1847
1848	bzero(mp->mnt_stat.f_mntfromname, sizeof mp->mnt_stat.f_mntfromname);
1849	strlcpy(mp->mnt_stat.f_mntfromname, from,
1850	    sizeof mp->mnt_stat.f_mntfromname);
1851}
1852
1853/*
1854 * ---------------------------------------------------------------------
1855 * This is the api for building mount args and mounting filesystems from
1856 * inside the kernel.
1857 *
1858 * The API works by accumulation of individual args.  First error is
1859 * latched.
1860 *
1861 * XXX: should be documented in new manpage kernel_mount(9)
1862 */
1863
1864/* A memory allocation which must be freed when we are done */
1865struct mntaarg {
1866	SLIST_ENTRY(mntaarg)	next;
1867};
1868
1869/* The header for the mount arguments */
1870struct mntarg {
1871	struct iovec *v;
1872	int len;
1873	int error;
1874	SLIST_HEAD(, mntaarg)	list;
1875};
1876
1877/*
1878 * Add a boolean argument.
1879 *
1880 * flag is the boolean value.
1881 * name must start with "no".
1882 */
1883struct mntarg *
1884mount_argb(struct mntarg *ma, int flag, const char *name)
1885{
1886
1887	KASSERT(name[0] == 'n' && name[1] == 'o',
1888	    ("mount_argb(...,%s): name must start with 'no'", name));
1889
1890	return (mount_arg(ma, name + (flag ? 2 : 0), NULL, 0));
1891}
1892
1893/*
1894 * Add an argument printf style
1895 */
1896struct mntarg *
1897mount_argf(struct mntarg *ma, const char *name, const char *fmt, ...)
1898{
1899	va_list ap;
1900	struct mntaarg *maa;
1901	struct sbuf *sb;
1902	int len;
1903
1904	if (ma == NULL) {
1905		ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
1906		SLIST_INIT(&ma->list);
1907	}
1908	if (ma->error)
1909		return (ma);
1910
1911	ma->v = realloc(ma->v, sizeof *ma->v * (ma->len + 2),
1912	    M_MOUNT, M_WAITOK);
1913	ma->v[ma->len].iov_base = (void *)(uintptr_t)name;
1914	ma->v[ma->len].iov_len = strlen(name) + 1;
1915	ma->len++;
1916
1917	sb = sbuf_new_auto();
1918	va_start(ap, fmt);
1919	sbuf_vprintf(sb, fmt, ap);
1920	va_end(ap);
1921	sbuf_finish(sb);
1922	len = sbuf_len(sb) + 1;
1923	maa = malloc(sizeof *maa + len, M_MOUNT, M_WAITOK | M_ZERO);
1924	SLIST_INSERT_HEAD(&ma->list, maa, next);
1925	bcopy(sbuf_data(sb), maa + 1, len);
1926	sbuf_delete(sb);
1927
1928	ma->v[ma->len].iov_base = maa + 1;
1929	ma->v[ma->len].iov_len = len;
1930	ma->len++;
1931
1932	return (ma);
1933}
1934
1935/*
1936 * Add an argument which is a userland string.
1937 */
1938struct mntarg *
1939mount_argsu(struct mntarg *ma, const char *name, const void *val, int len)
1940{
1941	struct mntaarg *maa;
1942	char *tbuf;
1943
1944	if (val == NULL)
1945		return (ma);
1946	if (ma == NULL) {
1947		ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
1948		SLIST_INIT(&ma->list);
1949	}
1950	if (ma->error)
1951		return (ma);
1952	maa = malloc(sizeof *maa + len, M_MOUNT, M_WAITOK | M_ZERO);
1953	SLIST_INSERT_HEAD(&ma->list, maa, next);
1954	tbuf = (void *)(maa + 1);
1955	ma->error = copyinstr(val, tbuf, len, NULL);
1956	return (mount_arg(ma, name, tbuf, -1));
1957}
1958
1959/*
1960 * Plain argument.
1961 *
1962 * If length is -1, treat value as a C string.
1963 */
1964struct mntarg *
1965mount_arg(struct mntarg *ma, const char *name, const void *val, int len)
1966{
1967
1968	if (ma == NULL) {
1969		ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
1970		SLIST_INIT(&ma->list);
1971	}
1972	if (ma->error)
1973		return (ma);
1974
1975	ma->v = realloc(ma->v, sizeof *ma->v * (ma->len + 2),
1976	    M_MOUNT, M_WAITOK);
1977	ma->v[ma->len].iov_base = (void *)(uintptr_t)name;
1978	ma->v[ma->len].iov_len = strlen(name) + 1;
1979	ma->len++;
1980
1981	ma->v[ma->len].iov_base = (void *)(uintptr_t)val;
1982	if (len < 0)
1983		ma->v[ma->len].iov_len = strlen(val) + 1;
1984	else
1985		ma->v[ma->len].iov_len = len;
1986	ma->len++;
1987	return (ma);
1988}
1989
1990/*
1991 * Free a mntarg structure
1992 */
1993static void
1994free_mntarg(struct mntarg *ma)
1995{
1996	struct mntaarg *maa;
1997
1998	while (!SLIST_EMPTY(&ma->list)) {
1999		maa = SLIST_FIRST(&ma->list);
2000		SLIST_REMOVE_HEAD(&ma->list, next);
2001		free(maa, M_MOUNT);
2002	}
2003	free(ma->v, M_MOUNT);
2004	free(ma, M_MOUNT);
2005}
2006
2007/*
2008 * Mount a filesystem
2009 */
2010int
2011kernel_mount(struct mntarg *ma, uint64_t flags)
2012{
2013	struct uio auio;
2014	int error;
2015
2016	KASSERT(ma != NULL, ("kernel_mount NULL ma"));
2017	KASSERT(ma->v != NULL, ("kernel_mount NULL ma->v"));
2018	KASSERT(!(ma->len & 1), ("kernel_mount odd ma->len (%d)", ma->len));
2019
2020	auio.uio_iov = ma->v;
2021	auio.uio_iovcnt = ma->len;
2022	auio.uio_segflg = UIO_SYSSPACE;
2023
2024	error = ma->error;
2025	if (!error)
2026		error = vfs_donmount(curthread, flags, &auio);
2027	free_mntarg(ma);
2028	return (error);
2029}
2030
2031/*
2032 * A printflike function to mount a filesystem.
2033 */
2034int
2035kernel_vmount(int flags, ...)
2036{
2037	struct mntarg *ma = NULL;
2038	va_list ap;
2039	const char *cp;
2040	const void *vp;
2041	int error;
2042
2043	va_start(ap, flags);
2044	for (;;) {
2045		cp = va_arg(ap, const char *);
2046		if (cp == NULL)
2047			break;
2048		vp = va_arg(ap, const void *);
2049		ma = mount_arg(ma, cp, vp, (vp != NULL ? -1 : 0));
2050	}
2051	va_end(ap);
2052
2053	error = kernel_mount(ma, flags);
2054	return (error);
2055}
2056
2057/*
2058 * Convert the old export args format into new export args.
2059 *
2060 * The old export args struct does not have security flavors.  Otherwise, the
2061 * structs are identical.  The default security flavor 'sys' is applied by
2062 * vfs_export when .ex_numsecflavors is 0.
2063 */
2064void
2065vfs_oexport_conv(const struct oexport_args *oexp, struct export_args *exp)
2066{
2067
2068	bcopy(oexp, exp, sizeof(*oexp));
2069	exp->ex_numsecflavors = 0;
2070}
2071