1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 1993, David Greenman
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30#include "opt_capsicum.h"
31#include "opt_hwpmc_hooks.h"
32#include "opt_ktrace.h"
33#include "opt_vm.h"
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/acct.h>
38#include <sys/asan.h>
39#include <sys/capsicum.h>
40#include <sys/compressor.h>
41#include <sys/eventhandler.h>
42#include <sys/exec.h>
43#include <sys/fcntl.h>
44#include <sys/filedesc.h>
45#include <sys/imgact.h>
46#include <sys/imgact_elf.h>
47#include <sys/kernel.h>
48#include <sys/lock.h>
49#include <sys/malloc.h>
50#include <sys/mman.h>
51#include <sys/mount.h>
52#include <sys/mutex.h>
53#include <sys/namei.h>
54#include <sys/priv.h>
55#include <sys/proc.h>
56#include <sys/ptrace.h>
57#include <sys/reg.h>
58#include <sys/resourcevar.h>
59#include <sys/rwlock.h>
60#include <sys/sched.h>
61#include <sys/sdt.h>
62#include <sys/sf_buf.h>
63#include <sys/shm.h>
64#include <sys/signalvar.h>
65#include <sys/smp.h>
66#include <sys/stat.h>
67#include <sys/syscallsubr.h>
68#include <sys/sysctl.h>
69#include <sys/sysent.h>
70#include <sys/sysproto.h>
71#include <sys/timers.h>
72#include <sys/umtxvar.h>
73#include <sys/vnode.h>
74#include <sys/wait.h>
75#ifdef KTRACE
76#include <sys/ktrace.h>
77#endif
78
79#include <vm/vm.h>
80#include <vm/vm_param.h>
81#include <vm/pmap.h>
82#include <vm/vm_page.h>
83#include <vm/vm_map.h>
84#include <vm/vm_kern.h>
85#include <vm/vm_extern.h>
86#include <vm/vm_object.h>
87#include <vm/vm_pager.h>
88
89#ifdef	HWPMC_HOOKS
90#include <sys/pmckern.h>
91#endif
92
93#include <security/audit/audit.h>
94#include <security/mac/mac_framework.h>
95
96#ifdef KDTRACE_HOOKS
97#include <sys/dtrace_bsd.h>
98dtrace_execexit_func_t	dtrace_fasttrap_exec;
99#endif
100
101SDT_PROVIDER_DECLARE(proc);
102SDT_PROBE_DEFINE1(proc, , , exec, "char *");
103SDT_PROBE_DEFINE1(proc, , , exec__failure, "int");
104SDT_PROBE_DEFINE1(proc, , , exec__success, "char *");
105
106MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
107
108int coredump_pack_fileinfo = 1;
109SYSCTL_INT(_kern, OID_AUTO, coredump_pack_fileinfo, CTLFLAG_RWTUN,
110    &coredump_pack_fileinfo, 0,
111    "Enable file path packing in 'procstat -f' coredump notes");
112
113int coredump_pack_vmmapinfo = 1;
114SYSCTL_INT(_kern, OID_AUTO, coredump_pack_vmmapinfo, CTLFLAG_RWTUN,
115    &coredump_pack_vmmapinfo, 0,
116    "Enable file path packing in 'procstat -v' coredump notes");
117
118static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
119static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
120static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
121static int do_execve(struct thread *td, struct image_args *args,
122    struct mac *mac_p, struct vmspace *oldvmspace);
123
124/* XXX This should be vm_size_t. */
125SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD|
126    CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_ps_strings, "LU",
127    "Location of process' ps_strings structure");
128
129/* XXX This should be vm_size_t. */
130SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD|
131    CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_usrstack, "LU",
132    "Top of process stack");
133
134SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_MPSAFE,
135    NULL, 0, sysctl_kern_stackprot, "I",
136    "Stack memory permissions");
137
138u_long ps_arg_cache_limit = PAGE_SIZE / 16;
139SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW,
140    &ps_arg_cache_limit, 0,
141    "Process' command line characters cache limit");
142
143static int disallow_high_osrel;
144SYSCTL_INT(_kern, OID_AUTO, disallow_high_osrel, CTLFLAG_RW,
145    &disallow_high_osrel, 0,
146    "Disallow execution of binaries built for higher version of the world");
147
148static int map_at_zero = 0;
149SYSCTL_INT(_security_bsd, OID_AUTO, map_at_zero, CTLFLAG_RWTUN, &map_at_zero, 0,
150    "Permit processes to map an object at virtual address 0.");
151
152static int core_dump_can_intr = 1;
153SYSCTL_INT(_kern, OID_AUTO, core_dump_can_intr, CTLFLAG_RWTUN,
154    &core_dump_can_intr, 0,
155    "Core dumping interruptible with SIGKILL");
156
157static int
158sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS)
159{
160	struct proc *p;
161	vm_offset_t ps_strings;
162
163	p = curproc;
164#ifdef SCTL_MASK32
165	if (req->flags & SCTL_MASK32) {
166		unsigned int val;
167		val = (unsigned int)PROC_PS_STRINGS(p);
168		return (SYSCTL_OUT(req, &val, sizeof(val)));
169	}
170#endif
171	ps_strings = PROC_PS_STRINGS(p);
172	return (SYSCTL_OUT(req, &ps_strings, sizeof(ps_strings)));
173}
174
175static int
176sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS)
177{
178	struct proc *p;
179	vm_offset_t val;
180
181	p = curproc;
182#ifdef SCTL_MASK32
183	if (req->flags & SCTL_MASK32) {
184		unsigned int val32;
185
186		val32 = round_page((unsigned int)p->p_vmspace->vm_stacktop);
187		return (SYSCTL_OUT(req, &val32, sizeof(val32)));
188	}
189#endif
190	val = round_page(p->p_vmspace->vm_stacktop);
191	return (SYSCTL_OUT(req, &val, sizeof(val)));
192}
193
194static int
195sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS)
196{
197	struct proc *p;
198
199	p = curproc;
200	return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot,
201	    sizeof(p->p_sysent->sv_stackprot)));
202}
203
204/*
205 * Each of the items is a pointer to a `const struct execsw', hence the
206 * double pointer here.
207 */
208static const struct execsw **execsw;
209
210#ifndef _SYS_SYSPROTO_H_
211struct execve_args {
212	char    *fname;
213	char    **argv;
214	char    **envv;
215};
216#endif
217
218int
219sys_execve(struct thread *td, struct execve_args *uap)
220{
221	struct image_args args;
222	struct vmspace *oldvmspace;
223	int error;
224
225	error = pre_execve(td, &oldvmspace);
226	if (error != 0)
227		return (error);
228	error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
229	    uap->argv, uap->envv);
230	if (error == 0)
231		error = kern_execve(td, &args, NULL, oldvmspace);
232	post_execve(td, error, oldvmspace);
233	AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
234	return (error);
235}
236
237#ifndef _SYS_SYSPROTO_H_
238struct fexecve_args {
239	int	fd;
240	char	**argv;
241	char	**envv;
242};
243#endif
244int
245sys_fexecve(struct thread *td, struct fexecve_args *uap)
246{
247	struct image_args args;
248	struct vmspace *oldvmspace;
249	int error;
250
251	error = pre_execve(td, &oldvmspace);
252	if (error != 0)
253		return (error);
254	error = exec_copyin_args(&args, NULL, UIO_SYSSPACE,
255	    uap->argv, uap->envv);
256	if (error == 0) {
257		args.fd = uap->fd;
258		error = kern_execve(td, &args, NULL, oldvmspace);
259	}
260	post_execve(td, error, oldvmspace);
261	AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
262	return (error);
263}
264
265#ifndef _SYS_SYSPROTO_H_
266struct __mac_execve_args {
267	char	*fname;
268	char	**argv;
269	char	**envv;
270	struct mac	*mac_p;
271};
272#endif
273
274int
275sys___mac_execve(struct thread *td, struct __mac_execve_args *uap)
276{
277#ifdef MAC
278	struct image_args args;
279	struct vmspace *oldvmspace;
280	int error;
281
282	error = pre_execve(td, &oldvmspace);
283	if (error != 0)
284		return (error);
285	error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
286	    uap->argv, uap->envv);
287	if (error == 0)
288		error = kern_execve(td, &args, uap->mac_p, oldvmspace);
289	post_execve(td, error, oldvmspace);
290	AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
291	return (error);
292#else
293	return (ENOSYS);
294#endif
295}
296
297int
298pre_execve(struct thread *td, struct vmspace **oldvmspace)
299{
300	struct proc *p;
301	int error;
302
303	KASSERT(td == curthread, ("non-current thread %p", td));
304	error = 0;
305	p = td->td_proc;
306	if ((p->p_flag & P_HADTHREADS) != 0) {
307		PROC_LOCK(p);
308		if (thread_single(p, SINGLE_BOUNDARY) != 0)
309			error = ERESTART;
310		PROC_UNLOCK(p);
311	}
312	KASSERT(error != 0 || (td->td_pflags & TDP_EXECVMSPC) == 0,
313	    ("nested execve"));
314	*oldvmspace = p->p_vmspace;
315	return (error);
316}
317
318void
319post_execve(struct thread *td, int error, struct vmspace *oldvmspace)
320{
321	struct proc *p;
322
323	KASSERT(td == curthread, ("non-current thread %p", td));
324	p = td->td_proc;
325	if ((p->p_flag & P_HADTHREADS) != 0) {
326		PROC_LOCK(p);
327		/*
328		 * If success, we upgrade to SINGLE_EXIT state to
329		 * force other threads to suicide.
330		 */
331		if (error == EJUSTRETURN)
332			thread_single(p, SINGLE_EXIT);
333		else
334			thread_single_end(p, SINGLE_BOUNDARY);
335		PROC_UNLOCK(p);
336	}
337	exec_cleanup(td, oldvmspace);
338}
339
340/*
341 * kern_execve() has the astonishing property of not always returning to
342 * the caller.  If sufficiently bad things happen during the call to
343 * do_execve(), it can end up calling exit1(); as a result, callers must
344 * avoid doing anything which they might need to undo (e.g., allocating
345 * memory).
346 */
347int
348kern_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
349    struct vmspace *oldvmspace)
350{
351
352	TSEXEC(td->td_proc->p_pid, args->begin_argv);
353	AUDIT_ARG_ARGV(args->begin_argv, args->argc,
354	    exec_args_get_begin_envv(args) - args->begin_argv);
355	AUDIT_ARG_ENVV(exec_args_get_begin_envv(args), args->envc,
356	    args->endp - exec_args_get_begin_envv(args));
357
358	/* Must have at least one argument. */
359	if (args->argc == 0) {
360		exec_free_args(args);
361		return (EINVAL);
362	}
363	return (do_execve(td, args, mac_p, oldvmspace));
364}
365
366static void
367execve_nosetid(struct image_params *imgp)
368{
369	imgp->credential_setid = false;
370	if (imgp->newcred != NULL) {
371		crfree(imgp->newcred);
372		imgp->newcred = NULL;
373	}
374}
375
376/*
377 * In-kernel implementation of execve().  All arguments are assumed to be
378 * userspace pointers from the passed thread.
379 */
380static int
381do_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
382    struct vmspace *oldvmspace)
383{
384	struct proc *p = td->td_proc;
385	struct nameidata nd;
386	struct ucred *oldcred;
387	struct uidinfo *euip = NULL;
388	uintptr_t stack_base;
389	struct image_params image_params, *imgp;
390	struct vattr attr;
391	struct pargs *oldargs = NULL, *newargs = NULL;
392	struct sigacts *oldsigacts = NULL, *newsigacts = NULL;
393#ifdef KTRACE
394	struct ktr_io_params *kiop;
395#endif
396	struct vnode *oldtextvp, *newtextvp;
397	struct vnode *oldtextdvp, *newtextdvp;
398	char *oldbinname, *newbinname;
399	bool credential_changing;
400#ifdef MAC
401	struct label *interpvplabel = NULL;
402	bool will_transition;
403#endif
404#ifdef HWPMC_HOOKS
405	struct pmckern_procexec pe;
406#endif
407	int error, i, orig_osrel;
408	uint32_t orig_fctl0;
409	Elf_Brandinfo *orig_brandinfo;
410	size_t freepath_size;
411	static const char fexecv_proc_title[] = "(fexecv)";
412
413	imgp = &image_params;
414	oldtextvp = oldtextdvp = NULL;
415	newtextvp = newtextdvp = NULL;
416	newbinname = oldbinname = NULL;
417#ifdef KTRACE
418	kiop = NULL;
419#endif
420
421	/*
422	 * Lock the process and set the P_INEXEC flag to indicate that
423	 * it should be left alone until we're done here.  This is
424	 * necessary to avoid race conditions - e.g. in ptrace() -
425	 * that might allow a local user to illicitly obtain elevated
426	 * privileges.
427	 */
428	PROC_LOCK(p);
429	KASSERT((p->p_flag & P_INEXEC) == 0,
430	    ("%s(): process already has P_INEXEC flag", __func__));
431	p->p_flag |= P_INEXEC;
432	PROC_UNLOCK(p);
433
434	/*
435	 * Initialize part of the common data
436	 */
437	bzero(imgp, sizeof(*imgp));
438	imgp->proc = p;
439	imgp->attr = &attr;
440	imgp->args = args;
441	oldcred = p->p_ucred;
442	orig_osrel = p->p_osrel;
443	orig_fctl0 = p->p_fctl0;
444	orig_brandinfo = p->p_elf_brandinfo;
445
446#ifdef MAC
447	error = mac_execve_enter(imgp, mac_p);
448	if (error)
449		goto exec_fail;
450#endif
451
452	SDT_PROBE1(proc, , , exec, args->fname);
453
454interpret:
455	if (args->fname != NULL) {
456#ifdef CAPABILITY_MODE
457		if (CAP_TRACING(td))
458			ktrcapfail(CAPFAIL_NAMEI, args->fname);
459		/*
460		 * While capability mode can't reach this point via direct
461		 * path arguments to execve(), we also don't allow
462		 * interpreters to be used in capability mode (for now).
463		 * Catch indirect lookups and return a permissions error.
464		 */
465		if (IN_CAPABILITY_MODE(td)) {
466			error = ECAPMODE;
467			goto exec_fail;
468		}
469#endif
470
471		/*
472		 * Translate the file name. namei() returns a vnode
473		 * pointer in ni_vp among other things.
474		 */
475		NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | LOCKSHARED | FOLLOW |
476		    AUDITVNODE1 | WANTPARENT, UIO_SYSSPACE,
477		    args->fname);
478
479		error = namei(&nd);
480		if (error)
481			goto exec_fail;
482
483		newtextvp = nd.ni_vp;
484		newtextdvp = nd.ni_dvp;
485		nd.ni_dvp = NULL;
486		newbinname = malloc(nd.ni_cnd.cn_namelen + 1, M_PARGS,
487		    M_WAITOK);
488		memcpy(newbinname, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen);
489		newbinname[nd.ni_cnd.cn_namelen] = '\0';
490		imgp->vp = newtextvp;
491
492		/*
493		 * Do the best to calculate the full path to the image file.
494		 */
495		if (args->fname[0] == '/') {
496			imgp->execpath = args->fname;
497		} else {
498			VOP_UNLOCK(imgp->vp);
499			freepath_size = MAXPATHLEN;
500			if (vn_fullpath_hardlink(newtextvp, newtextdvp,
501			    newbinname, nd.ni_cnd.cn_namelen, &imgp->execpath,
502			    &imgp->freepath, &freepath_size) != 0)
503				imgp->execpath = args->fname;
504			vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
505		}
506	} else if (imgp->interpreter_vp) {
507		/*
508		 * An image activator has already provided an open vnode
509		 */
510		newtextvp = imgp->interpreter_vp;
511		imgp->interpreter_vp = NULL;
512		if (vn_fullpath(newtextvp, &imgp->execpath,
513		    &imgp->freepath) != 0)
514			imgp->execpath = args->fname;
515		vn_lock(newtextvp, LK_SHARED | LK_RETRY);
516		AUDIT_ARG_VNODE1(newtextvp);
517		imgp->vp = newtextvp;
518	} else {
519		AUDIT_ARG_FD(args->fd);
520
521		/*
522		 * If the descriptors was not opened with O_PATH, then
523		 * we require that it was opened with O_EXEC or
524		 * O_RDONLY.  In either case, exec_check_permissions()
525		 * below checks _current_ file access mode regardless
526		 * of the permissions additionally checked at the
527		 * open(2).
528		 */
529		error = fgetvp_exec(td, args->fd, &cap_fexecve_rights,
530		    &newtextvp);
531		if (error != 0)
532			goto exec_fail;
533
534		if (vn_fullpath(newtextvp, &imgp->execpath,
535		    &imgp->freepath) != 0)
536			imgp->execpath = args->fname;
537		vn_lock(newtextvp, LK_SHARED | LK_RETRY);
538		AUDIT_ARG_VNODE1(newtextvp);
539		imgp->vp = newtextvp;
540	}
541
542	/*
543	 * Check file permissions.  Also 'opens' file and sets its vnode to
544	 * text mode.
545	 */
546	error = exec_check_permissions(imgp);
547	if (error)
548		goto exec_fail_dealloc;
549
550	imgp->object = imgp->vp->v_object;
551	if (imgp->object != NULL)
552		vm_object_reference(imgp->object);
553
554	error = exec_map_first_page(imgp);
555	if (error)
556		goto exec_fail_dealloc;
557
558	imgp->proc->p_osrel = 0;
559	imgp->proc->p_fctl0 = 0;
560	imgp->proc->p_elf_brandinfo = NULL;
561
562	/*
563	 * Implement image setuid/setgid.
564	 *
565	 * Determine new credentials before attempting image activators
566	 * so that it can be used by process_exec handlers to determine
567	 * credential/setid changes.
568	 *
569	 * Don't honor setuid/setgid if the filesystem prohibits it or if
570	 * the process is being traced.
571	 *
572	 * We disable setuid/setgid/etc in capability mode on the basis
573	 * that most setugid applications are not written with that
574	 * environment in mind, and will therefore almost certainly operate
575	 * incorrectly. In principle there's no reason that setugid
576	 * applications might not be useful in capability mode, so we may want
577	 * to reconsider this conservative design choice in the future.
578	 *
579	 * XXXMAC: For the time being, use NOSUID to also prohibit
580	 * transitions on the file system.
581	 */
582	credential_changing = false;
583	credential_changing |= (attr.va_mode & S_ISUID) &&
584	    oldcred->cr_uid != attr.va_uid;
585	credential_changing |= (attr.va_mode & S_ISGID) &&
586	    oldcred->cr_gid != attr.va_gid;
587#ifdef MAC
588	will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp,
589	    interpvplabel, imgp) != 0;
590	credential_changing |= will_transition;
591#endif
592
593	/* Don't inherit PROC_PDEATHSIG_CTL value if setuid/setgid. */
594	if (credential_changing)
595		imgp->proc->p_pdeathsig = 0;
596
597	if (credential_changing &&
598#ifdef CAPABILITY_MODE
599	    ((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) &&
600#endif
601	    (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
602	    (p->p_flag & P_TRACED) == 0) {
603		imgp->credential_setid = true;
604		VOP_UNLOCK(imgp->vp);
605		imgp->newcred = crdup(oldcred);
606		if (attr.va_mode & S_ISUID) {
607			euip = uifind(attr.va_uid);
608			change_euid(imgp->newcred, euip);
609		}
610		vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
611		if (attr.va_mode & S_ISGID)
612			change_egid(imgp->newcred, attr.va_gid);
613		/*
614		 * Implement correct POSIX saved-id behavior.
615		 *
616		 * XXXMAC: Note that the current logic will save the
617		 * uid and gid if a MAC domain transition occurs, even
618		 * though maybe it shouldn't.
619		 */
620		change_svuid(imgp->newcred, imgp->newcred->cr_uid);
621		change_svgid(imgp->newcred, imgp->newcred->cr_gid);
622	} else {
623		/*
624		 * Implement correct POSIX saved-id behavior.
625		 *
626		 * XXX: It's not clear that the existing behavior is
627		 * POSIX-compliant.  A number of sources indicate that the
628		 * saved uid/gid should only be updated if the new ruid is
629		 * not equal to the old ruid, or the new euid is not equal
630		 * to the old euid and the new euid is not equal to the old
631		 * ruid.  The FreeBSD code always updates the saved uid/gid.
632		 * Also, this code uses the new (replaced) euid and egid as
633		 * the source, which may or may not be the right ones to use.
634		 */
635		if (oldcred->cr_svuid != oldcred->cr_uid ||
636		    oldcred->cr_svgid != oldcred->cr_gid) {
637			VOP_UNLOCK(imgp->vp);
638			imgp->newcred = crdup(oldcred);
639			vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
640			change_svuid(imgp->newcred, imgp->newcred->cr_uid);
641			change_svgid(imgp->newcred, imgp->newcred->cr_gid);
642		}
643	}
644	/* The new credentials are installed into the process later. */
645
646	/*
647	 *	Loop through the list of image activators, calling each one.
648	 *	An activator returns -1 if there is no match, 0 on success,
649	 *	and an error otherwise.
650	 */
651	error = -1;
652	for (i = 0; error == -1 && execsw[i]; ++i) {
653		if (execsw[i]->ex_imgact == NULL)
654			continue;
655		error = (*execsw[i]->ex_imgact)(imgp);
656	}
657
658	if (error) {
659		if (error == -1)
660			error = ENOEXEC;
661		goto exec_fail_dealloc;
662	}
663
664	/*
665	 * Special interpreter operation, cleanup and loop up to try to
666	 * activate the interpreter.
667	 */
668	if (imgp->interpreted) {
669		exec_unmap_first_page(imgp);
670		/*
671		 * The text reference needs to be removed for scripts.
672		 * There is a short period before we determine that
673		 * something is a script where text reference is active.
674		 * The vnode lock is held over this entire period
675		 * so nothing should illegitimately be blocked.
676		 */
677		MPASS(imgp->textset);
678		VOP_UNSET_TEXT_CHECKED(newtextvp);
679		imgp->textset = false;
680		/* free name buffer and old vnode */
681#ifdef MAC
682		mac_execve_interpreter_enter(newtextvp, &interpvplabel);
683#endif
684		if (imgp->opened) {
685			VOP_CLOSE(newtextvp, FREAD, td->td_ucred, td);
686			imgp->opened = false;
687		}
688		vput(newtextvp);
689		imgp->vp = newtextvp = NULL;
690		if (args->fname != NULL) {
691			if (newtextdvp != NULL) {
692				vrele(newtextdvp);
693				newtextdvp = NULL;
694			}
695			NDFREE_PNBUF(&nd);
696			free(newbinname, M_PARGS);
697			newbinname = NULL;
698		}
699		vm_object_deallocate(imgp->object);
700		imgp->object = NULL;
701		execve_nosetid(imgp);
702		imgp->execpath = NULL;
703		free(imgp->freepath, M_TEMP);
704		imgp->freepath = NULL;
705		/* set new name to that of the interpreter */
706		if (imgp->interpreter_vp) {
707			args->fname = NULL;
708		} else {
709			args->fname = imgp->interpreter_name;
710		}
711		goto interpret;
712	}
713
714	/*
715	 * NB: We unlock the vnode here because it is believed that none
716	 * of the sv_copyout_strings/sv_fixup operations require the vnode.
717	 */
718	VOP_UNLOCK(imgp->vp);
719
720	if (disallow_high_osrel &&
721	    P_OSREL_MAJOR(p->p_osrel) > P_OSREL_MAJOR(__FreeBSD_version)) {
722		error = ENOEXEC;
723		uprintf("Osrel %d for image %s too high\n", p->p_osrel,
724		    imgp->execpath != NULL ? imgp->execpath : "<unresolved>");
725		vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
726		goto exec_fail_dealloc;
727	}
728
729	/*
730	 * Copy out strings (args and env) and initialize stack base.
731	 */
732	error = (*p->p_sysent->sv_copyout_strings)(imgp, &stack_base);
733	if (error != 0) {
734		vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
735		goto exec_fail_dealloc;
736	}
737
738	/*
739	 * Stack setup.
740	 */
741	error = (*p->p_sysent->sv_fixup)(&stack_base, imgp);
742	if (error != 0) {
743		vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
744		goto exec_fail_dealloc;
745	}
746
747	/*
748	 * For security and other reasons, the file descriptor table cannot be
749	 * shared after an exec.
750	 */
751	fdunshare(td);
752	pdunshare(td);
753	/* close files on exec */
754	fdcloseexec(td);
755
756	/*
757	 * Malloc things before we need locks.
758	 */
759	i = exec_args_get_begin_envv(imgp->args) - imgp->args->begin_argv;
760	/* Cache arguments if they fit inside our allowance */
761	if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
762		newargs = pargs_alloc(i);
763		bcopy(imgp->args->begin_argv, newargs->ar_args, i);
764	}
765
766	/*
767	 * For security and other reasons, signal handlers cannot
768	 * be shared after an exec. The new process gets a copy of the old
769	 * handlers. In execsigs(), the new process will have its signals
770	 * reset.
771	 */
772	if (sigacts_shared(p->p_sigacts)) {
773		oldsigacts = p->p_sigacts;
774		newsigacts = sigacts_alloc();
775		sigacts_copy(newsigacts, oldsigacts);
776	}
777
778	vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
779
780	PROC_LOCK(p);
781	if (oldsigacts)
782		p->p_sigacts = newsigacts;
783	/* Stop profiling */
784	stopprofclock(p);
785
786	/* reset caught signals */
787	execsigs(p);
788
789	/* name this process - nameiexec(p, ndp) */
790	bzero(p->p_comm, sizeof(p->p_comm));
791	if (args->fname)
792		bcopy(nd.ni_cnd.cn_nameptr, p->p_comm,
793		    min(nd.ni_cnd.cn_namelen, MAXCOMLEN));
794	else if (vn_commname(newtextvp, p->p_comm, sizeof(p->p_comm)) != 0)
795		bcopy(fexecv_proc_title, p->p_comm, sizeof(fexecv_proc_title));
796	bcopy(p->p_comm, td->td_name, sizeof(td->td_name));
797#ifdef KTR
798	sched_clear_tdname(td);
799#endif
800
801	/*
802	 * mark as execed, wakeup the process that vforked (if any) and tell
803	 * it that it now has its own resources back
804	 */
805	p->p_flag |= P_EXEC;
806	if ((p->p_flag2 & P2_NOTRACE_EXEC) == 0)
807		p->p_flag2 &= ~P2_NOTRACE;
808	if ((p->p_flag2 & P2_STKGAP_DISABLE_EXEC) == 0)
809		p->p_flag2 &= ~P2_STKGAP_DISABLE;
810	p->p_flag2 &= ~(P2_MEMBAR_PRIVE | P2_MEMBAR_PRIVE_SYNCORE |
811	    P2_MEMBAR_GLOBE);
812	if (p->p_flag & P_PPWAIT) {
813		p->p_flag &= ~(P_PPWAIT | P_PPTRACE);
814		cv_broadcast(&p->p_pwait);
815		/* STOPs are no longer ignored, arrange for AST */
816		signotify(td);
817	}
818
819	if ((imgp->sysent->sv_setid_allowed != NULL &&
820	    !(*imgp->sysent->sv_setid_allowed)(td, imgp)) ||
821	    (p->p_flag2 & P2_NO_NEW_PRIVS) != 0)
822		execve_nosetid(imgp);
823
824	/*
825	 * Implement image setuid/setgid installation.
826	 */
827	if (imgp->credential_setid) {
828		/*
829		 * Turn off syscall tracing for set-id programs, except for
830		 * root.  Record any set-id flags first to make sure that
831		 * we do not regain any tracing during a possible block.
832		 */
833		setsugid(p);
834#ifdef KTRACE
835		kiop = ktrprocexec(p);
836#endif
837		/*
838		 * Close any file descriptors 0..2 that reference procfs,
839		 * then make sure file descriptors 0..2 are in use.
840		 *
841		 * Both fdsetugidsafety() and fdcheckstd() may call functions
842		 * taking sleepable locks, so temporarily drop our locks.
843		 */
844		PROC_UNLOCK(p);
845		VOP_UNLOCK(imgp->vp);
846		fdsetugidsafety(td);
847		error = fdcheckstd(td);
848		vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
849		if (error != 0)
850			goto exec_fail_dealloc;
851		PROC_LOCK(p);
852#ifdef MAC
853		if (will_transition) {
854			mac_vnode_execve_transition(oldcred, imgp->newcred,
855			    imgp->vp, interpvplabel, imgp);
856		}
857#endif
858	} else {
859		if (oldcred->cr_uid == oldcred->cr_ruid &&
860		    oldcred->cr_gid == oldcred->cr_rgid)
861			p->p_flag &= ~P_SUGID;
862	}
863	/*
864	 * Set the new credentials.
865	 */
866	if (imgp->newcred != NULL) {
867		proc_set_cred(p, imgp->newcred);
868		crfree(oldcred);
869		oldcred = NULL;
870	}
871
872	/*
873	 * Store the vp for use in kern.proc.pathname.  This vnode was
874	 * referenced by namei() or by fexecve variant of fname handling.
875	 */
876	oldtextvp = p->p_textvp;
877	p->p_textvp = newtextvp;
878	oldtextdvp = p->p_textdvp;
879	p->p_textdvp = newtextdvp;
880	newtextdvp = NULL;
881	oldbinname = p->p_binname;
882	p->p_binname = newbinname;
883	newbinname = NULL;
884
885#ifdef KDTRACE_HOOKS
886	/*
887	 * Tell the DTrace fasttrap provider about the exec if it
888	 * has declared an interest.
889	 */
890	if (dtrace_fasttrap_exec)
891		dtrace_fasttrap_exec(p);
892#endif
893
894	/*
895	 * Notify others that we exec'd, and clear the P_INEXEC flag
896	 * as we're now a bona fide freshly-execed process.
897	 */
898	KNOTE_LOCKED(p->p_klist, NOTE_EXEC);
899	p->p_flag &= ~P_INEXEC;
900
901	/* clear "fork but no exec" flag, as we _are_ execing */
902	p->p_acflag &= ~AFORK;
903
904	/*
905	 * Free any previous argument cache and replace it with
906	 * the new argument cache, if any.
907	 */
908	oldargs = p->p_args;
909	p->p_args = newargs;
910	newargs = NULL;
911
912	PROC_UNLOCK(p);
913
914#ifdef	HWPMC_HOOKS
915	/*
916	 * Check if system-wide sampling is in effect or if the
917	 * current process is using PMCs.  If so, do exec() time
918	 * processing.  This processing needs to happen AFTER the
919	 * P_INEXEC flag is cleared.
920	 */
921	if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) {
922		VOP_UNLOCK(imgp->vp);
923		pe.pm_credentialschanged = credential_changing;
924		pe.pm_baseaddr = imgp->reloc_base;
925		pe.pm_dynaddr = imgp->et_dyn_addr;
926
927		PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe);
928		vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
929	}
930#endif
931
932	/* Set values passed into the program in registers. */
933	(*p->p_sysent->sv_setregs)(td, imgp, stack_base);
934
935	VOP_MMAPPED(imgp->vp);
936
937	SDT_PROBE1(proc, , , exec__success, args->fname);
938
939exec_fail_dealloc:
940	if (error != 0) {
941		p->p_osrel = orig_osrel;
942		p->p_fctl0 = orig_fctl0;
943		p->p_elf_brandinfo = orig_brandinfo;
944	}
945
946	if (imgp->firstpage != NULL)
947		exec_unmap_first_page(imgp);
948
949	if (imgp->vp != NULL) {
950		if (imgp->opened)
951			VOP_CLOSE(imgp->vp, FREAD, td->td_ucred, td);
952		if (imgp->textset)
953			VOP_UNSET_TEXT_CHECKED(imgp->vp);
954		if (error != 0)
955			vput(imgp->vp);
956		else
957			VOP_UNLOCK(imgp->vp);
958		if (args->fname != NULL)
959			NDFREE_PNBUF(&nd);
960		if (newtextdvp != NULL)
961			vrele(newtextdvp);
962		free(newbinname, M_PARGS);
963	}
964
965	if (imgp->object != NULL)
966		vm_object_deallocate(imgp->object);
967
968	free(imgp->freepath, M_TEMP);
969
970	if (error == 0) {
971		if (p->p_ptevents & PTRACE_EXEC) {
972			PROC_LOCK(p);
973			if (p->p_ptevents & PTRACE_EXEC)
974				td->td_dbgflags |= TDB_EXEC;
975			PROC_UNLOCK(p);
976		}
977	} else {
978exec_fail:
979		/* we're done here, clear P_INEXEC */
980		PROC_LOCK(p);
981		p->p_flag &= ~P_INEXEC;
982		PROC_UNLOCK(p);
983
984		SDT_PROBE1(proc, , , exec__failure, error);
985	}
986
987	if (imgp->newcred != NULL && oldcred != NULL)
988		crfree(imgp->newcred);
989
990#ifdef MAC
991	mac_execve_exit(imgp);
992	mac_execve_interpreter_exit(interpvplabel);
993#endif
994	exec_free_args(args);
995
996	/*
997	 * Handle deferred decrement of ref counts.
998	 */
999	if (oldtextvp != NULL)
1000		vrele(oldtextvp);
1001	if (oldtextdvp != NULL)
1002		vrele(oldtextdvp);
1003	free(oldbinname, M_PARGS);
1004#ifdef KTRACE
1005	ktr_io_params_free(kiop);
1006#endif
1007	pargs_drop(oldargs);
1008	pargs_drop(newargs);
1009	if (oldsigacts != NULL)
1010		sigacts_free(oldsigacts);
1011	if (euip != NULL)
1012		uifree(euip);
1013
1014	if (error && imgp->vmspace_destroyed) {
1015		/* sorry, no more process anymore. exit gracefully */
1016		exec_cleanup(td, oldvmspace);
1017		exit1(td, 0, SIGABRT);
1018		/* NOT REACHED */
1019	}
1020
1021#ifdef KTRACE
1022	if (error == 0)
1023		ktrprocctor(p);
1024#endif
1025
1026	/*
1027	 * We don't want cpu_set_syscall_retval() to overwrite any of
1028	 * the register values put in place by exec_setregs().
1029	 * Implementations of cpu_set_syscall_retval() will leave
1030	 * registers unmodified when returning EJUSTRETURN.
1031	 */
1032	return (error == 0 ? EJUSTRETURN : error);
1033}
1034
1035void
1036exec_cleanup(struct thread *td, struct vmspace *oldvmspace)
1037{
1038	if ((td->td_pflags & TDP_EXECVMSPC) != 0) {
1039		KASSERT(td->td_proc->p_vmspace != oldvmspace,
1040		    ("oldvmspace still used"));
1041		vmspace_free(oldvmspace);
1042		td->td_pflags &= ~TDP_EXECVMSPC;
1043	}
1044}
1045
1046int
1047exec_map_first_page(struct image_params *imgp)
1048{
1049	vm_object_t object;
1050	vm_page_t m;
1051	int error;
1052
1053	if (imgp->firstpage != NULL)
1054		exec_unmap_first_page(imgp);
1055
1056	object = imgp->vp->v_object;
1057	if (object == NULL)
1058		return (EACCES);
1059#if VM_NRESERVLEVEL > 0
1060	if ((object->flags & OBJ_COLORED) == 0) {
1061		VM_OBJECT_WLOCK(object);
1062		vm_object_color(object, 0);
1063		VM_OBJECT_WUNLOCK(object);
1064	}
1065#endif
1066	error = vm_page_grab_valid_unlocked(&m, object, 0,
1067	    VM_ALLOC_COUNT(VM_INITIAL_PAGEIN) |
1068	    VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
1069
1070	if (error != VM_PAGER_OK)
1071		return (EIO);
1072	imgp->firstpage = sf_buf_alloc(m, 0);
1073	imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
1074
1075	return (0);
1076}
1077
1078void
1079exec_unmap_first_page(struct image_params *imgp)
1080{
1081	vm_page_t m;
1082
1083	if (imgp->firstpage != NULL) {
1084		m = sf_buf_page(imgp->firstpage);
1085		sf_buf_free(imgp->firstpage);
1086		imgp->firstpage = NULL;
1087		vm_page_unwire(m, PQ_ACTIVE);
1088	}
1089}
1090
1091void
1092exec_onexec_old(struct thread *td)
1093{
1094	sigfastblock_clear(td);
1095	umtx_exec(td->td_proc);
1096}
1097
1098/*
1099 * This is an optimization which removes the unmanaged shared page
1100 * mapping. In combination with pmap_remove_pages(), which cleans all
1101 * managed mappings in the process' vmspace pmap, no work will be left
1102 * for pmap_remove(min, max).
1103 */
1104void
1105exec_free_abi_mappings(struct proc *p)
1106{
1107	struct vmspace *vmspace;
1108
1109	vmspace = p->p_vmspace;
1110	if (refcount_load(&vmspace->vm_refcnt) != 1)
1111		return;
1112
1113	if (!PROC_HAS_SHP(p))
1114		return;
1115
1116	pmap_remove(vmspace_pmap(vmspace), vmspace->vm_shp_base,
1117	    vmspace->vm_shp_base + p->p_sysent->sv_shared_page_len);
1118}
1119
1120/*
1121 * Run down the current address space and install a new one.
1122 */
1123int
1124exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv)
1125{
1126	int error;
1127	struct proc *p = imgp->proc;
1128	struct vmspace *vmspace = p->p_vmspace;
1129	struct thread *td = curthread;
1130	vm_offset_t sv_minuser;
1131	vm_map_t map;
1132
1133	imgp->vmspace_destroyed = true;
1134	imgp->sysent = sv;
1135
1136	if (p->p_sysent->sv_onexec_old != NULL)
1137		p->p_sysent->sv_onexec_old(td);
1138	itimers_exec(p);
1139
1140	EVENTHANDLER_DIRECT_INVOKE(process_exec, p, imgp);
1141
1142	/*
1143	 * Blow away entire process VM, if address space not shared,
1144	 * otherwise, create a new VM space so that other threads are
1145	 * not disrupted
1146	 */
1147	map = &vmspace->vm_map;
1148	if (map_at_zero)
1149		sv_minuser = sv->sv_minuser;
1150	else
1151		sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE);
1152	if (refcount_load(&vmspace->vm_refcnt) == 1 &&
1153	    vm_map_min(map) == sv_minuser &&
1154	    vm_map_max(map) == sv->sv_maxuser &&
1155	    cpu_exec_vmspace_reuse(p, map)) {
1156		exec_free_abi_mappings(p);
1157		shmexit(vmspace);
1158		pmap_remove_pages(vmspace_pmap(vmspace));
1159		vm_map_remove(map, vm_map_min(map), vm_map_max(map));
1160		/*
1161		 * An exec terminates mlockall(MCL_FUTURE).
1162		 * ASLR and W^X states must be re-evaluated.
1163		 */
1164		vm_map_lock(map);
1165		vm_map_modflags(map, 0, MAP_WIREFUTURE | MAP_ASLR |
1166		    MAP_ASLR_IGNSTART | MAP_ASLR_STACK | MAP_WXORX);
1167		vm_map_unlock(map);
1168	} else {
1169		error = vmspace_exec(p, sv_minuser, sv->sv_maxuser);
1170		if (error)
1171			return (error);
1172		vmspace = p->p_vmspace;
1173		map = &vmspace->vm_map;
1174	}
1175	map->flags |= imgp->map_flags;
1176
1177	return (sv->sv_onexec != NULL ? sv->sv_onexec(p, imgp) : 0);
1178}
1179
1180/*
1181 * Compute the stack size limit and map the main process stack.
1182 * Map the shared page.
1183 */
1184int
1185exec_map_stack(struct image_params *imgp)
1186{
1187	struct rlimit rlim_stack;
1188	struct sysentvec *sv;
1189	struct proc *p;
1190	vm_map_t map;
1191	struct vmspace *vmspace;
1192	vm_offset_t stack_addr, stack_top;
1193	vm_offset_t sharedpage_addr;
1194	u_long ssiz;
1195	int error, find_space, stack_off;
1196	vm_prot_t stack_prot;
1197	vm_object_t obj;
1198
1199	p = imgp->proc;
1200	sv = p->p_sysent;
1201
1202	if (imgp->stack_sz != 0) {
1203		ssiz = trunc_page(imgp->stack_sz);
1204		PROC_LOCK(p);
1205		lim_rlimit_proc(p, RLIMIT_STACK, &rlim_stack);
1206		PROC_UNLOCK(p);
1207		if (ssiz > rlim_stack.rlim_max)
1208			ssiz = rlim_stack.rlim_max;
1209		if (ssiz > rlim_stack.rlim_cur) {
1210			rlim_stack.rlim_cur = ssiz;
1211			kern_setrlimit(curthread, RLIMIT_STACK, &rlim_stack);
1212		}
1213	} else if (sv->sv_maxssiz != NULL) {
1214		ssiz = *sv->sv_maxssiz;
1215	} else {
1216		ssiz = maxssiz;
1217	}
1218
1219	vmspace = p->p_vmspace;
1220	map = &vmspace->vm_map;
1221
1222	stack_prot = sv->sv_shared_page_obj != NULL && imgp->stack_prot != 0 ?
1223	    imgp->stack_prot : sv->sv_stackprot;
1224	if ((map->flags & MAP_ASLR_STACK) != 0) {
1225		stack_addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr +
1226		    lim_max(curthread, RLIMIT_DATA));
1227		find_space = VMFS_ANY_SPACE;
1228	} else {
1229		stack_addr = sv->sv_usrstack - ssiz;
1230		find_space = VMFS_NO_SPACE;
1231	}
1232	error = vm_map_find(map, NULL, 0, &stack_addr, (vm_size_t)ssiz,
1233	    sv->sv_usrstack, find_space, stack_prot, VM_PROT_ALL,
1234	    MAP_STACK_GROWS_DOWN);
1235	if (error != KERN_SUCCESS) {
1236		uprintf("exec_new_vmspace: mapping stack size %#jx prot %#x "
1237		    "failed, mach error %d errno %d\n", (uintmax_t)ssiz,
1238		    stack_prot, error, vm_mmap_to_errno(error));
1239		return (vm_mmap_to_errno(error));
1240	}
1241
1242	stack_top = stack_addr + ssiz;
1243	if ((map->flags & MAP_ASLR_STACK) != 0) {
1244		/* Randomize within the first page of the stack. */
1245		arc4rand(&stack_off, sizeof(stack_off), 0);
1246		stack_top -= rounddown2(stack_off & PAGE_MASK, sizeof(void *));
1247	}
1248
1249	/* Map a shared page */
1250	obj = sv->sv_shared_page_obj;
1251	if (obj == NULL) {
1252		sharedpage_addr = 0;
1253		goto out;
1254	}
1255
1256	/*
1257	 * If randomization is disabled then the shared page will
1258	 * be mapped at address specified in sysentvec.
1259	 * Otherwise any address above .data section can be selected.
1260	 * Same logic is used for stack address randomization.
1261	 * If the address randomization is applied map a guard page
1262	 * at the top of UVA.
1263	 */
1264	vm_object_reference(obj);
1265	if ((imgp->imgp_flags & IMGP_ASLR_SHARED_PAGE) != 0) {
1266		sharedpage_addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr +
1267		    lim_max(curthread, RLIMIT_DATA));
1268
1269		error = vm_map_fixed(map, NULL, 0,
1270		    sv->sv_maxuser - PAGE_SIZE, PAGE_SIZE,
1271		    VM_PROT_NONE, VM_PROT_NONE, MAP_CREATE_GUARD);
1272		if (error != KERN_SUCCESS) {
1273			/*
1274			 * This is not fatal, so let's just print a warning
1275			 * and continue.
1276			 */
1277			uprintf("%s: Mapping guard page at the top of UVA failed"
1278			    " mach error %d errno %d",
1279			    __func__, error, vm_mmap_to_errno(error));
1280		}
1281
1282		error = vm_map_find(map, obj, 0,
1283		    &sharedpage_addr, sv->sv_shared_page_len,
1284		    sv->sv_maxuser, VMFS_ANY_SPACE,
1285		    VM_PROT_READ | VM_PROT_EXECUTE,
1286		    VM_PROT_READ | VM_PROT_EXECUTE,
1287		    MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
1288	} else {
1289		sharedpage_addr = sv->sv_shared_page_base;
1290		vm_map_fixed(map, obj, 0,
1291		    sharedpage_addr, sv->sv_shared_page_len,
1292		    VM_PROT_READ | VM_PROT_EXECUTE,
1293		    VM_PROT_READ | VM_PROT_EXECUTE,
1294		    MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
1295	}
1296	if (error != KERN_SUCCESS) {
1297		uprintf("%s: mapping shared page at addr: %p"
1298		    "failed, mach error %d errno %d\n", __func__,
1299		    (void *)sharedpage_addr, error, vm_mmap_to_errno(error));
1300		vm_object_deallocate(obj);
1301		return (vm_mmap_to_errno(error));
1302	}
1303out:
1304	/*
1305	 * vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they
1306	 * are still used to enforce the stack rlimit on the process stack.
1307	 */
1308	vmspace->vm_maxsaddr = (char *)stack_addr;
1309	vmspace->vm_stacktop = stack_top;
1310	vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
1311	vmspace->vm_shp_base = sharedpage_addr;
1312
1313	return (0);
1314}
1315
1316/*
1317 * Copy out argument and environment strings from the old process address
1318 * space into the temporary string buffer.
1319 */
1320int
1321exec_copyin_args(struct image_args *args, const char *fname,
1322    enum uio_seg segflg, char **argv, char **envv)
1323{
1324	u_long arg, env;
1325	int error;
1326
1327	bzero(args, sizeof(*args));
1328	if (argv == NULL)
1329		return (EFAULT);
1330
1331	/*
1332	 * Allocate demand-paged memory for the file name, argument, and
1333	 * environment strings.
1334	 */
1335	error = exec_alloc_args(args);
1336	if (error != 0)
1337		return (error);
1338
1339	/*
1340	 * Copy the file name.
1341	 */
1342	error = exec_args_add_fname(args, fname, segflg);
1343	if (error != 0)
1344		goto err_exit;
1345
1346	/*
1347	 * extract arguments first
1348	 */
1349	for (;;) {
1350		error = fueword(argv++, &arg);
1351		if (error == -1) {
1352			error = EFAULT;
1353			goto err_exit;
1354		}
1355		if (arg == 0)
1356			break;
1357		error = exec_args_add_arg(args, (char *)(uintptr_t)arg,
1358		    UIO_USERSPACE);
1359		if (error != 0)
1360			goto err_exit;
1361	}
1362
1363	/*
1364	 * extract environment strings
1365	 */
1366	if (envv) {
1367		for (;;) {
1368			error = fueword(envv++, &env);
1369			if (error == -1) {
1370				error = EFAULT;
1371				goto err_exit;
1372			}
1373			if (env == 0)
1374				break;
1375			error = exec_args_add_env(args,
1376			    (char *)(uintptr_t)env, UIO_USERSPACE);
1377			if (error != 0)
1378				goto err_exit;
1379		}
1380	}
1381
1382	return (0);
1383
1384err_exit:
1385	exec_free_args(args);
1386	return (error);
1387}
1388
1389struct exec_args_kva {
1390	vm_offset_t addr;
1391	u_int gen;
1392	SLIST_ENTRY(exec_args_kva) next;
1393};
1394
1395DPCPU_DEFINE_STATIC(struct exec_args_kva *, exec_args_kva);
1396
1397static SLIST_HEAD(, exec_args_kva) exec_args_kva_freelist;
1398static struct mtx exec_args_kva_mtx;
1399static u_int exec_args_gen;
1400
1401static void
1402exec_prealloc_args_kva(void *arg __unused)
1403{
1404	struct exec_args_kva *argkva;
1405	u_int i;
1406
1407	SLIST_INIT(&exec_args_kva_freelist);
1408	mtx_init(&exec_args_kva_mtx, "exec args kva", NULL, MTX_DEF);
1409	for (i = 0; i < exec_map_entries; i++) {
1410		argkva = malloc(sizeof(*argkva), M_PARGS, M_WAITOK);
1411		argkva->addr = kmap_alloc_wait(exec_map, exec_map_entry_size);
1412		argkva->gen = exec_args_gen;
1413		SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
1414	}
1415}
1416SYSINIT(exec_args_kva, SI_SUB_EXEC, SI_ORDER_ANY, exec_prealloc_args_kva, NULL);
1417
1418static vm_offset_t
1419exec_alloc_args_kva(void **cookie)
1420{
1421	struct exec_args_kva *argkva;
1422
1423	argkva = (void *)atomic_readandclear_ptr(
1424	    (uintptr_t *)DPCPU_PTR(exec_args_kva));
1425	if (argkva == NULL) {
1426		mtx_lock(&exec_args_kva_mtx);
1427		while ((argkva = SLIST_FIRST(&exec_args_kva_freelist)) == NULL)
1428			(void)mtx_sleep(&exec_args_kva_freelist,
1429			    &exec_args_kva_mtx, 0, "execkva", 0);
1430		SLIST_REMOVE_HEAD(&exec_args_kva_freelist, next);
1431		mtx_unlock(&exec_args_kva_mtx);
1432	}
1433	kasan_mark((void *)argkva->addr, exec_map_entry_size,
1434	    exec_map_entry_size, 0);
1435	*(struct exec_args_kva **)cookie = argkva;
1436	return (argkva->addr);
1437}
1438
1439static void
1440exec_release_args_kva(struct exec_args_kva *argkva, u_int gen)
1441{
1442	vm_offset_t base;
1443
1444	base = argkva->addr;
1445	kasan_mark((void *)argkva->addr, 0, exec_map_entry_size,
1446	    KASAN_EXEC_ARGS_FREED);
1447	if (argkva->gen != gen) {
1448		(void)vm_map_madvise(exec_map, base, base + exec_map_entry_size,
1449		    MADV_FREE);
1450		argkva->gen = gen;
1451	}
1452	if (!atomic_cmpset_ptr((uintptr_t *)DPCPU_PTR(exec_args_kva),
1453	    (uintptr_t)NULL, (uintptr_t)argkva)) {
1454		mtx_lock(&exec_args_kva_mtx);
1455		SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
1456		wakeup_one(&exec_args_kva_freelist);
1457		mtx_unlock(&exec_args_kva_mtx);
1458	}
1459}
1460
1461static void
1462exec_free_args_kva(void *cookie)
1463{
1464
1465	exec_release_args_kva(cookie, exec_args_gen);
1466}
1467
1468static void
1469exec_args_kva_lowmem(void *arg __unused)
1470{
1471	SLIST_HEAD(, exec_args_kva) head;
1472	struct exec_args_kva *argkva;
1473	u_int gen;
1474	int i;
1475
1476	gen = atomic_fetchadd_int(&exec_args_gen, 1) + 1;
1477
1478	/*
1479	 * Force an madvise of each KVA range. Any currently allocated ranges
1480	 * will have MADV_FREE applied once they are freed.
1481	 */
1482	SLIST_INIT(&head);
1483	mtx_lock(&exec_args_kva_mtx);
1484	SLIST_SWAP(&head, &exec_args_kva_freelist, exec_args_kva);
1485	mtx_unlock(&exec_args_kva_mtx);
1486	while ((argkva = SLIST_FIRST(&head)) != NULL) {
1487		SLIST_REMOVE_HEAD(&head, next);
1488		exec_release_args_kva(argkva, gen);
1489	}
1490
1491	CPU_FOREACH(i) {
1492		argkva = (void *)atomic_readandclear_ptr(
1493		    (uintptr_t *)DPCPU_ID_PTR(i, exec_args_kva));
1494		if (argkva != NULL)
1495			exec_release_args_kva(argkva, gen);
1496	}
1497}
1498EVENTHANDLER_DEFINE(vm_lowmem, exec_args_kva_lowmem, NULL,
1499    EVENTHANDLER_PRI_ANY);
1500
1501/*
1502 * Allocate temporary demand-paged, zero-filled memory for the file name,
1503 * argument, and environment strings.
1504 */
1505int
1506exec_alloc_args(struct image_args *args)
1507{
1508
1509	args->buf = (char *)exec_alloc_args_kva(&args->bufkva);
1510	return (0);
1511}
1512
1513void
1514exec_free_args(struct image_args *args)
1515{
1516
1517	if (args->buf != NULL) {
1518		exec_free_args_kva(args->bufkva);
1519		args->buf = NULL;
1520	}
1521	if (args->fname_buf != NULL) {
1522		free(args->fname_buf, M_TEMP);
1523		args->fname_buf = NULL;
1524	}
1525}
1526
1527/*
1528 * A set to functions to fill struct image args.
1529 *
1530 * NOTE: exec_args_add_fname() must be called (possibly with a NULL
1531 * fname) before the other functions.  All exec_args_add_arg() calls must
1532 * be made before any exec_args_add_env() calls.  exec_args_adjust_args()
1533 * may be called any time after exec_args_add_fname().
1534 *
1535 * exec_args_add_fname() - install path to be executed
1536 * exec_args_add_arg() - append an argument string
1537 * exec_args_add_env() - append an env string
1538 * exec_args_adjust_args() - adjust location of the argument list to
1539 *                           allow new arguments to be prepended
1540 */
1541int
1542exec_args_add_fname(struct image_args *args, const char *fname,
1543    enum uio_seg segflg)
1544{
1545	int error;
1546	size_t length;
1547
1548	KASSERT(args->fname == NULL, ("fname already appended"));
1549	KASSERT(args->endp == NULL, ("already appending to args"));
1550
1551	if (fname != NULL) {
1552		args->fname = args->buf;
1553		error = segflg == UIO_SYSSPACE ?
1554		    copystr(fname, args->fname, PATH_MAX, &length) :
1555		    copyinstr(fname, args->fname, PATH_MAX, &length);
1556		if (error != 0)
1557			return (error == ENAMETOOLONG ? E2BIG : error);
1558	} else
1559		length = 0;
1560
1561	/* Set up for _arg_*()/_env_*() */
1562	args->endp = args->buf + length;
1563	/* begin_argv must be set and kept updated */
1564	args->begin_argv = args->endp;
1565	KASSERT(exec_map_entry_size - length >= ARG_MAX,
1566	    ("too little space remaining for arguments %zu < %zu",
1567	    exec_map_entry_size - length, (size_t)ARG_MAX));
1568	args->stringspace = ARG_MAX;
1569
1570	return (0);
1571}
1572
1573static int
1574exec_args_add_str(struct image_args *args, const char *str,
1575    enum uio_seg segflg, int *countp)
1576{
1577	int error;
1578	size_t length;
1579
1580	KASSERT(args->endp != NULL, ("endp not initialized"));
1581	KASSERT(args->begin_argv != NULL, ("begin_argp not initialized"));
1582
1583	error = (segflg == UIO_SYSSPACE) ?
1584	    copystr(str, args->endp, args->stringspace, &length) :
1585	    copyinstr(str, args->endp, args->stringspace, &length);
1586	if (error != 0)
1587		return (error == ENAMETOOLONG ? E2BIG : error);
1588	args->stringspace -= length;
1589	args->endp += length;
1590	(*countp)++;
1591
1592	return (0);
1593}
1594
1595int
1596exec_args_add_arg(struct image_args *args, const char *argp,
1597    enum uio_seg segflg)
1598{
1599
1600	KASSERT(args->envc == 0, ("appending args after env"));
1601
1602	return (exec_args_add_str(args, argp, segflg, &args->argc));
1603}
1604
1605int
1606exec_args_add_env(struct image_args *args, const char *envp,
1607    enum uio_seg segflg)
1608{
1609
1610	if (args->envc == 0)
1611		args->begin_envv = args->endp;
1612
1613	return (exec_args_add_str(args, envp, segflg, &args->envc));
1614}
1615
1616int
1617exec_args_adjust_args(struct image_args *args, size_t consume, ssize_t extend)
1618{
1619	ssize_t offset;
1620
1621	KASSERT(args->endp != NULL, ("endp not initialized"));
1622	KASSERT(args->begin_argv != NULL, ("begin_argp not initialized"));
1623
1624	offset = extend - consume;
1625	if (args->stringspace < offset)
1626		return (E2BIG);
1627	memmove(args->begin_argv + extend, args->begin_argv + consume,
1628	    args->endp - args->begin_argv + consume);
1629	if (args->envc > 0)
1630		args->begin_envv += offset;
1631	args->endp += offset;
1632	args->stringspace -= offset;
1633	return (0);
1634}
1635
1636char *
1637exec_args_get_begin_envv(struct image_args *args)
1638{
1639
1640	KASSERT(args->endp != NULL, ("endp not initialized"));
1641
1642	if (args->envc > 0)
1643		return (args->begin_envv);
1644	return (args->endp);
1645}
1646
1647/*
1648 * Copy strings out to the new process address space, constructing new arg
1649 * and env vector tables. Return a pointer to the base so that it can be used
1650 * as the initial stack pointer.
1651 */
1652int
1653exec_copyout_strings(struct image_params *imgp, uintptr_t *stack_base)
1654{
1655	int argc, envc;
1656	char **vectp;
1657	char *stringp;
1658	uintptr_t destp, ustringp;
1659	struct ps_strings *arginfo;
1660	struct proc *p;
1661	struct sysentvec *sysent;
1662	size_t execpath_len;
1663	int error, szsigcode;
1664	char canary[sizeof(long) * 8];
1665
1666	p = imgp->proc;
1667	sysent = p->p_sysent;
1668
1669	destp =	PROC_PS_STRINGS(p);
1670	arginfo = imgp->ps_strings = (void *)destp;
1671
1672	/*
1673	 * Install sigcode.
1674	 */
1675	if (sysent->sv_shared_page_base == 0 && sysent->sv_szsigcode != NULL) {
1676		szsigcode = *(sysent->sv_szsigcode);
1677		destp -= szsigcode;
1678		destp = rounddown2(destp, sizeof(void *));
1679		error = copyout(sysent->sv_sigcode, (void *)destp, szsigcode);
1680		if (error != 0)
1681			return (error);
1682	}
1683
1684	/*
1685	 * Copy the image path for the rtld.
1686	 */
1687	if (imgp->execpath != NULL && imgp->auxargs != NULL) {
1688		execpath_len = strlen(imgp->execpath) + 1;
1689		destp -= execpath_len;
1690		destp = rounddown2(destp, sizeof(void *));
1691		imgp->execpathp = (void *)destp;
1692		error = copyout(imgp->execpath, imgp->execpathp, execpath_len);
1693		if (error != 0)
1694			return (error);
1695	}
1696
1697	/*
1698	 * Prepare the canary for SSP.
1699	 */
1700	arc4rand(canary, sizeof(canary), 0);
1701	destp -= sizeof(canary);
1702	imgp->canary = (void *)destp;
1703	error = copyout(canary, imgp->canary, sizeof(canary));
1704	if (error != 0)
1705		return (error);
1706	imgp->canarylen = sizeof(canary);
1707
1708	/*
1709	 * Prepare the pagesizes array.
1710	 */
1711	imgp->pagesizeslen = sizeof(pagesizes[0]) * MAXPAGESIZES;
1712	destp -= imgp->pagesizeslen;
1713	destp = rounddown2(destp, sizeof(void *));
1714	imgp->pagesizes = (void *)destp;
1715	error = copyout(pagesizes, imgp->pagesizes, imgp->pagesizeslen);
1716	if (error != 0)
1717		return (error);
1718
1719	/*
1720	 * Allocate room for the argument and environment strings.
1721	 */
1722	destp -= ARG_MAX - imgp->args->stringspace;
1723	destp = rounddown2(destp, sizeof(void *));
1724	ustringp = destp;
1725
1726	if (imgp->auxargs) {
1727		/*
1728		 * Allocate room on the stack for the ELF auxargs
1729		 * array.  It has up to AT_COUNT entries.
1730		 */
1731		destp -= AT_COUNT * sizeof(Elf_Auxinfo);
1732		destp = rounddown2(destp, sizeof(void *));
1733	}
1734
1735	vectp = (char **)destp;
1736
1737	/*
1738	 * Allocate room for the argv[] and env vectors including the
1739	 * terminating NULL pointers.
1740	 */
1741	vectp -= imgp->args->argc + 1 + imgp->args->envc + 1;
1742
1743	/*
1744	 * vectp also becomes our initial stack base
1745	 */
1746	*stack_base = (uintptr_t)vectp;
1747
1748	stringp = imgp->args->begin_argv;
1749	argc = imgp->args->argc;
1750	envc = imgp->args->envc;
1751
1752	/*
1753	 * Copy out strings - arguments and environment.
1754	 */
1755	error = copyout(stringp, (void *)ustringp,
1756	    ARG_MAX - imgp->args->stringspace);
1757	if (error != 0)
1758		return (error);
1759
1760	/*
1761	 * Fill in "ps_strings" struct for ps, w, etc.
1762	 */
1763	imgp->argv = vectp;
1764	if (suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp) != 0 ||
1765	    suword32(&arginfo->ps_nargvstr, argc) != 0)
1766		return (EFAULT);
1767
1768	/*
1769	 * Fill in argument portion of vector table.
1770	 */
1771	for (; argc > 0; --argc) {
1772		if (suword(vectp++, ustringp) != 0)
1773			return (EFAULT);
1774		while (*stringp++ != 0)
1775			ustringp++;
1776		ustringp++;
1777	}
1778
1779	/* a null vector table pointer separates the argp's from the envp's */
1780	if (suword(vectp++, 0) != 0)
1781		return (EFAULT);
1782
1783	imgp->envv = vectp;
1784	if (suword(&arginfo->ps_envstr, (long)(intptr_t)vectp) != 0 ||
1785	    suword32(&arginfo->ps_nenvstr, envc) != 0)
1786		return (EFAULT);
1787
1788	/*
1789	 * Fill in environment portion of vector table.
1790	 */
1791	for (; envc > 0; --envc) {
1792		if (suword(vectp++, ustringp) != 0)
1793			return (EFAULT);
1794		while (*stringp++ != 0)
1795			ustringp++;
1796		ustringp++;
1797	}
1798
1799	/* end of vector table is a null pointer */
1800	if (suword(vectp, 0) != 0)
1801		return (EFAULT);
1802
1803	if (imgp->auxargs) {
1804		vectp++;
1805		error = imgp->sysent->sv_copyout_auxargs(imgp,
1806		    (uintptr_t)vectp);
1807		if (error != 0)
1808			return (error);
1809	}
1810
1811	return (0);
1812}
1813
1814/*
1815 * Check permissions of file to execute.
1816 *	Called with imgp->vp locked.
1817 *	Return 0 for success or error code on failure.
1818 */
1819int
1820exec_check_permissions(struct image_params *imgp)
1821{
1822	struct vnode *vp = imgp->vp;
1823	struct vattr *attr = imgp->attr;
1824	struct thread *td;
1825	int error;
1826
1827	td = curthread;
1828
1829	/* Get file attributes */
1830	error = VOP_GETATTR(vp, attr, td->td_ucred);
1831	if (error)
1832		return (error);
1833
1834#ifdef MAC
1835	error = mac_vnode_check_exec(td->td_ucred, imgp->vp, imgp);
1836	if (error)
1837		return (error);
1838#endif
1839
1840	/*
1841	 * 1) Check if file execution is disabled for the filesystem that
1842	 *    this file resides on.
1843	 * 2) Ensure that at least one execute bit is on. Otherwise, a
1844	 *    privileged user will always succeed, and we don't want this
1845	 *    to happen unless the file really is executable.
1846	 * 3) Ensure that the file is a regular file.
1847	 */
1848	if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
1849	    (attr->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0 ||
1850	    (attr->va_type != VREG))
1851		return (EACCES);
1852
1853	/*
1854	 * Zero length files can't be exec'd
1855	 */
1856	if (attr->va_size == 0)
1857		return (ENOEXEC);
1858
1859	/*
1860	 *  Check for execute permission to file based on current credentials.
1861	 */
1862	error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
1863	if (error)
1864		return (error);
1865
1866	/*
1867	 * Check number of open-for-writes on the file and deny execution
1868	 * if there are any.
1869	 *
1870	 * Add a text reference now so no one can write to the
1871	 * executable while we're activating it.
1872	 *
1873	 * Remember if this was set before and unset it in case this is not
1874	 * actually an executable image.
1875	 */
1876	error = VOP_SET_TEXT(vp);
1877	if (error != 0)
1878		return (error);
1879	imgp->textset = true;
1880
1881	/*
1882	 * Call filesystem specific open routine (which does nothing in the
1883	 * general case).
1884	 */
1885	error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
1886	if (error == 0)
1887		imgp->opened = true;
1888	return (error);
1889}
1890
1891/*
1892 * Exec handler registration
1893 */
1894int
1895exec_register(const struct execsw *execsw_arg)
1896{
1897	const struct execsw **es, **xs, **newexecsw;
1898	u_int count = 2;	/* New slot and trailing NULL */
1899
1900	if (execsw)
1901		for (es = execsw; *es; es++)
1902			count++;
1903	newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1904	xs = newexecsw;
1905	if (execsw)
1906		for (es = execsw; *es; es++)
1907			*xs++ = *es;
1908	*xs++ = execsw_arg;
1909	*xs = NULL;
1910	if (execsw)
1911		free(execsw, M_TEMP);
1912	execsw = newexecsw;
1913	return (0);
1914}
1915
1916int
1917exec_unregister(const struct execsw *execsw_arg)
1918{
1919	const struct execsw **es, **xs, **newexecsw;
1920	int count = 1;
1921
1922	if (execsw == NULL)
1923		panic("unregister with no handlers left?\n");
1924
1925	for (es = execsw; *es; es++) {
1926		if (*es == execsw_arg)
1927			break;
1928	}
1929	if (*es == NULL)
1930		return (ENOENT);
1931	for (es = execsw; *es; es++)
1932		if (*es != execsw_arg)
1933			count++;
1934	newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1935	xs = newexecsw;
1936	for (es = execsw; *es; es++)
1937		if (*es != execsw_arg)
1938			*xs++ = *es;
1939	*xs = NULL;
1940	if (execsw)
1941		free(execsw, M_TEMP);
1942	execsw = newexecsw;
1943	return (0);
1944}
1945
1946/*
1947 * Write out a core segment to the compression stream.
1948 */
1949static int
1950compress_chunk(struct coredump_params *cp, char *base, char *buf, size_t len)
1951{
1952	size_t chunk_len;
1953	int error;
1954
1955	while (len > 0) {
1956		chunk_len = MIN(len, CORE_BUF_SIZE);
1957
1958		/*
1959		 * We can get EFAULT error here.
1960		 * In that case zero out the current chunk of the segment.
1961		 */
1962		error = copyin(base, buf, chunk_len);
1963		if (error != 0)
1964			bzero(buf, chunk_len);
1965		error = compressor_write(cp->comp, buf, chunk_len);
1966		if (error != 0)
1967			break;
1968		base += chunk_len;
1969		len -= chunk_len;
1970	}
1971	return (error);
1972}
1973
1974int
1975core_write(struct coredump_params *cp, const void *base, size_t len,
1976    off_t offset, enum uio_seg seg, size_t *resid)
1977{
1978
1979	return (vn_rdwr_inchunks(UIO_WRITE, cp->vp, __DECONST(void *, base),
1980	    len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED,
1981	    cp->active_cred, cp->file_cred, resid, cp->td));
1982}
1983
1984int
1985core_output(char *base, size_t len, off_t offset, struct coredump_params *cp,
1986    void *tmpbuf)
1987{
1988	vm_map_t map;
1989	struct mount *mp;
1990	size_t resid, runlen;
1991	int error;
1992	bool success;
1993
1994	KASSERT((uintptr_t)base % PAGE_SIZE == 0,
1995	    ("%s: user address %p is not page-aligned", __func__, base));
1996
1997	if (cp->comp != NULL)
1998		return (compress_chunk(cp, base, tmpbuf, len));
1999
2000	map = &cp->td->td_proc->p_vmspace->vm_map;
2001	for (; len > 0; base += runlen, offset += runlen, len -= runlen) {
2002		/*
2003		 * Attempt to page in all virtual pages in the range.  If a
2004		 * virtual page is not backed by the pager, it is represented as
2005		 * a hole in the file.  This can occur with zero-filled
2006		 * anonymous memory or truncated files, for example.
2007		 */
2008		for (runlen = 0; runlen < len; runlen += PAGE_SIZE) {
2009			if (core_dump_can_intr && curproc_sigkilled())
2010				return (EINTR);
2011			error = vm_fault(map, (uintptr_t)base + runlen,
2012			    VM_PROT_READ, VM_FAULT_NOFILL, NULL);
2013			if (runlen == 0)
2014				success = error == KERN_SUCCESS;
2015			else if ((error == KERN_SUCCESS) != success)
2016				break;
2017		}
2018
2019		if (success) {
2020			error = core_write(cp, base, runlen, offset,
2021			    UIO_USERSPACE, &resid);
2022			if (error != 0) {
2023				if (error != EFAULT)
2024					break;
2025
2026				/*
2027				 * EFAULT may be returned if the user mapping
2028				 * could not be accessed, e.g., because a mapped
2029				 * file has been truncated.  Skip the page if no
2030				 * progress was made, to protect against a
2031				 * hypothetical scenario where vm_fault() was
2032				 * successful but core_write() returns EFAULT
2033				 * anyway.
2034				 */
2035				runlen -= resid;
2036				if (runlen == 0) {
2037					success = false;
2038					runlen = PAGE_SIZE;
2039				}
2040			}
2041		}
2042		if (!success) {
2043			error = vn_start_write(cp->vp, &mp, V_WAIT);
2044			if (error != 0)
2045				break;
2046			vn_lock(cp->vp, LK_EXCLUSIVE | LK_RETRY);
2047			error = vn_truncate_locked(cp->vp, offset + runlen,
2048			    false, cp->td->td_ucred);
2049			VOP_UNLOCK(cp->vp);
2050			vn_finished_write(mp);
2051			if (error != 0)
2052				break;
2053		}
2054	}
2055	return (error);
2056}
2057
2058/*
2059 * Drain into a core file.
2060 */
2061int
2062sbuf_drain_core_output(void *arg, const char *data, int len)
2063{
2064	struct coredump_params *cp;
2065	struct proc *p;
2066	int error, locked;
2067
2068	cp = arg;
2069	p = cp->td->td_proc;
2070
2071	/*
2072	 * Some kern_proc out routines that print to this sbuf may
2073	 * call us with the process lock held. Draining with the
2074	 * non-sleepable lock held is unsafe. The lock is needed for
2075	 * those routines when dumping a live process. In our case we
2076	 * can safely release the lock before draining and acquire
2077	 * again after.
2078	 */
2079	locked = PROC_LOCKED(p);
2080	if (locked)
2081		PROC_UNLOCK(p);
2082	if (cp->comp != NULL)
2083		error = compressor_write(cp->comp, __DECONST(char *, data),
2084		    len);
2085	else
2086		error = core_write(cp, __DECONST(void *, data), len, cp->offset,
2087		    UIO_SYSSPACE, NULL);
2088	if (locked)
2089		PROC_LOCK(p);
2090	if (error != 0)
2091		return (-error);
2092	cp->offset += len;
2093	return (len);
2094}
2095