1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 */
36
37#include <sys/cdefs.h>
38#include "opt_capsicum.h"
39#include "opt_ddb.h"
40#include "opt_ktrace.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44
45#include <sys/capsicum.h>
46#include <sys/conf.h>
47#include <sys/fcntl.h>
48#include <sys/file.h>
49#include <sys/filedesc.h>
50#include <sys/filio.h>
51#include <sys/jail.h>
52#include <sys/kernel.h>
53#include <sys/limits.h>
54#include <sys/lock.h>
55#include <sys/malloc.h>
56#include <sys/mount.h>
57#include <sys/mutex.h>
58#include <sys/namei.h>
59#include <sys/selinfo.h>
60#include <sys/poll.h>
61#include <sys/priv.h>
62#include <sys/proc.h>
63#include <sys/protosw.h>
64#include <sys/racct.h>
65#include <sys/resourcevar.h>
66#include <sys/sbuf.h>
67#include <sys/signalvar.h>
68#include <sys/kdb.h>
69#include <sys/smr.h>
70#include <sys/stat.h>
71#include <sys/sx.h>
72#include <sys/syscallsubr.h>
73#include <sys/sysctl.h>
74#include <sys/sysproto.h>
75#include <sys/unistd.h>
76#include <sys/user.h>
77#include <sys/vnode.h>
78#include <sys/ktrace.h>
79
80#include <net/vnet.h>
81
82#include <security/audit/audit.h>
83
84#include <vm/uma.h>
85#include <vm/vm.h>
86
87#include <ddb/ddb.h>
88
89static MALLOC_DEFINE(M_FILEDESC, "filedesc", "Open file descriptor table");
90static MALLOC_DEFINE(M_PWD, "pwd", "Descriptor table vnodes");
91static MALLOC_DEFINE(M_PWDDESC, "pwddesc", "Pwd descriptors");
92static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "filedesc_to_leader",
93    "file desc to leader structures");
94static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
95MALLOC_DEFINE(M_FILECAPS, "filecaps", "descriptor capabilities");
96
97MALLOC_DECLARE(M_FADVISE);
98
99static __read_mostly uma_zone_t file_zone;
100static __read_mostly uma_zone_t filedesc0_zone;
101__read_mostly uma_zone_t pwd_zone;
102VFS_SMR_DECLARE;
103
104static int	closefp(struct filedesc *fdp, int fd, struct file *fp,
105		    struct thread *td, bool holdleaders, bool audit);
106static void	export_file_to_kinfo(struct file *fp, int fd,
107		    cap_rights_t *rightsp, struct kinfo_file *kif,
108		    struct filedesc *fdp, int flags);
109static int	fd_first_free(struct filedesc *fdp, int low, int size);
110static void	fdgrowtable(struct filedesc *fdp, int nfd);
111static void	fdgrowtable_exp(struct filedesc *fdp, int nfd);
112static void	fdunused(struct filedesc *fdp, int fd);
113static void	fdused(struct filedesc *fdp, int fd);
114static int	fget_unlocked_seq(struct thread *td, int fd,
115		    cap_rights_t *needrightsp, struct file **fpp, seqc_t *seqp);
116static int	getmaxfd(struct thread *td);
117static u_long	*filecaps_copy_prep(const struct filecaps *src);
118static void	filecaps_copy_finish(const struct filecaps *src,
119		    struct filecaps *dst, u_long *ioctls);
120static u_long 	*filecaps_free_prep(struct filecaps *fcaps);
121static void	filecaps_free_finish(u_long *ioctls);
122
123static struct pwd *pwd_alloc(void);
124
125/*
126 * Each process has:
127 *
128 * - An array of open file descriptors (fd_ofiles)
129 * - An array of file flags (fd_ofileflags)
130 * - A bitmap recording which descriptors are in use (fd_map)
131 *
132 * A process starts out with NDFILE descriptors.  The value of NDFILE has
133 * been selected based the historical limit of 20 open files, and an
134 * assumption that the majority of processes, especially short-lived
135 * processes like shells, will never need more.
136 *
137 * If this initial allocation is exhausted, a larger descriptor table and
138 * map are allocated dynamically, and the pointers in the process's struct
139 * filedesc are updated to point to those.  This is repeated every time
140 * the process runs out of file descriptors (provided it hasn't hit its
141 * resource limit).
142 *
143 * Since threads may hold references to individual descriptor table
144 * entries, the tables are never freed.  Instead, they are placed on a
145 * linked list and freed only when the struct filedesc is released.
146 */
147#define NDFILE		20
148#define NDSLOTSIZE	sizeof(NDSLOTTYPE)
149#define	NDENTRIES	(NDSLOTSIZE * __CHAR_BIT)
150#define NDSLOT(x)	((x) / NDENTRIES)
151#define NDBIT(x)	((NDSLOTTYPE)1 << ((x) % NDENTRIES))
152#define	NDSLOTS(x)	(((x) + NDENTRIES - 1) / NDENTRIES)
153
154#define	FILEDESC_FOREACH_FDE(fdp, _iterator, _fde)				\
155	struct filedesc *_fdp = (fdp);						\
156	int _lastfile = fdlastfile_single(_fdp);				\
157	for (_iterator = 0; _iterator <= _lastfile; _iterator++)		\
158		if ((_fde = &_fdp->fd_ofiles[_iterator])->fde_file != NULL)
159
160#define	FILEDESC_FOREACH_FP(fdp, _iterator, _fp)				\
161	struct filedesc *_fdp = (fdp);						\
162	int _lastfile = fdlastfile_single(_fdp);				\
163	for (_iterator = 0; _iterator <= _lastfile; _iterator++)		\
164		if ((_fp = _fdp->fd_ofiles[_iterator].fde_file) != NULL)
165
166/*
167 * SLIST entry used to keep track of ofiles which must be reclaimed when
168 * the process exits.
169 */
170struct freetable {
171	struct fdescenttbl *ft_table;
172	SLIST_ENTRY(freetable) ft_next;
173};
174
175/*
176 * Initial allocation: a filedesc structure + the head of SLIST used to
177 * keep track of old ofiles + enough space for NDFILE descriptors.
178 */
179
180struct fdescenttbl0 {
181	int	fdt_nfiles;
182	struct	filedescent fdt_ofiles[NDFILE];
183};
184
185struct filedesc0 {
186	struct filedesc fd_fd;
187	SLIST_HEAD(, freetable) fd_free;
188	struct	fdescenttbl0 fd_dfiles;
189	NDSLOTTYPE fd_dmap[NDSLOTS(NDFILE)];
190};
191
192/*
193 * Descriptor management.
194 */
195static int __exclusive_cache_line openfiles; /* actual number of open files */
196struct mtx sigio_lock;		/* mtx to protect pointers to sigio */
197void __read_mostly (*mq_fdclose)(struct thread *td, int fd, struct file *fp);
198
199/*
200 * If low >= size, just return low. Otherwise find the first zero bit in the
201 * given bitmap, starting at low and not exceeding size - 1. Return size if
202 * not found.
203 */
204static int
205fd_first_free(struct filedesc *fdp, int low, int size)
206{
207	NDSLOTTYPE *map = fdp->fd_map;
208	NDSLOTTYPE mask;
209	int off, maxoff;
210
211	if (low >= size)
212		return (low);
213
214	off = NDSLOT(low);
215	if (low % NDENTRIES) {
216		mask = ~(~(NDSLOTTYPE)0 >> (NDENTRIES - (low % NDENTRIES)));
217		if ((mask &= ~map[off]) != 0UL)
218			return (off * NDENTRIES + ffsl(mask) - 1);
219		++off;
220	}
221	for (maxoff = NDSLOTS(size); off < maxoff; ++off)
222		if (map[off] != ~0UL)
223			return (off * NDENTRIES + ffsl(~map[off]) - 1);
224	return (size);
225}
226
227/*
228 * Find the last used fd.
229 *
230 * Call this variant if fdp can't be modified by anyone else (e.g, during exec).
231 * Otherwise use fdlastfile.
232 */
233int
234fdlastfile_single(struct filedesc *fdp)
235{
236	NDSLOTTYPE *map = fdp->fd_map;
237	int off, minoff;
238
239	off = NDSLOT(fdp->fd_nfiles - 1);
240	for (minoff = NDSLOT(0); off >= minoff; --off)
241		if (map[off] != 0)
242			return (off * NDENTRIES + flsl(map[off]) - 1);
243	return (-1);
244}
245
246int
247fdlastfile(struct filedesc *fdp)
248{
249
250	FILEDESC_LOCK_ASSERT(fdp);
251	return (fdlastfile_single(fdp));
252}
253
254static int
255fdisused(struct filedesc *fdp, int fd)
256{
257
258	KASSERT(fd >= 0 && fd < fdp->fd_nfiles,
259	    ("file descriptor %d out of range (0, %d)", fd, fdp->fd_nfiles));
260
261	return ((fdp->fd_map[NDSLOT(fd)] & NDBIT(fd)) != 0);
262}
263
264/*
265 * Mark a file descriptor as used.
266 */
267static void
268fdused_init(struct filedesc *fdp, int fd)
269{
270
271	KASSERT(!fdisused(fdp, fd), ("fd=%d is already used", fd));
272
273	fdp->fd_map[NDSLOT(fd)] |= NDBIT(fd);
274}
275
276static void
277fdused(struct filedesc *fdp, int fd)
278{
279
280	FILEDESC_XLOCK_ASSERT(fdp);
281
282	fdused_init(fdp, fd);
283	if (fd == fdp->fd_freefile)
284		fdp->fd_freefile++;
285}
286
287/*
288 * Mark a file descriptor as unused.
289 */
290static void
291fdunused(struct filedesc *fdp, int fd)
292{
293
294	FILEDESC_XLOCK_ASSERT(fdp);
295
296	KASSERT(fdisused(fdp, fd), ("fd=%d is already unused", fd));
297	KASSERT(fdp->fd_ofiles[fd].fde_file == NULL,
298	    ("fd=%d is still in use", fd));
299
300	fdp->fd_map[NDSLOT(fd)] &= ~NDBIT(fd);
301	if (fd < fdp->fd_freefile)
302		fdp->fd_freefile = fd;
303}
304
305/*
306 * Free a file descriptor.
307 *
308 * Avoid some work if fdp is about to be destroyed.
309 */
310static inline void
311fdefree_last(struct filedescent *fde)
312{
313
314	filecaps_free(&fde->fde_caps);
315}
316
317static inline void
318fdfree(struct filedesc *fdp, int fd)
319{
320	struct filedescent *fde;
321
322	FILEDESC_XLOCK_ASSERT(fdp);
323	fde = &fdp->fd_ofiles[fd];
324#ifdef CAPABILITIES
325	seqc_write_begin(&fde->fde_seqc);
326#endif
327	fde->fde_file = NULL;
328#ifdef CAPABILITIES
329	seqc_write_end(&fde->fde_seqc);
330#endif
331	fdefree_last(fde);
332	fdunused(fdp, fd);
333}
334
335/*
336 * System calls on descriptors.
337 */
338#ifndef _SYS_SYSPROTO_H_
339struct getdtablesize_args {
340	int	dummy;
341};
342#endif
343/* ARGSUSED */
344int
345sys_getdtablesize(struct thread *td, struct getdtablesize_args *uap)
346{
347#ifdef	RACCT
348	uint64_t lim;
349#endif
350
351	td->td_retval[0] = getmaxfd(td);
352#ifdef	RACCT
353	PROC_LOCK(td->td_proc);
354	lim = racct_get_limit(td->td_proc, RACCT_NOFILE);
355	PROC_UNLOCK(td->td_proc);
356	if (lim < td->td_retval[0])
357		td->td_retval[0] = lim;
358#endif
359	return (0);
360}
361
362/*
363 * Duplicate a file descriptor to a particular value.
364 *
365 * Note: keep in mind that a potential race condition exists when closing
366 * descriptors from a shared descriptor table (via rfork).
367 */
368#ifndef _SYS_SYSPROTO_H_
369struct dup2_args {
370	u_int	from;
371	u_int	to;
372};
373#endif
374/* ARGSUSED */
375int
376sys_dup2(struct thread *td, struct dup2_args *uap)
377{
378
379	return (kern_dup(td, FDDUP_FIXED, 0, (int)uap->from, (int)uap->to));
380}
381
382/*
383 * Duplicate a file descriptor.
384 */
385#ifndef _SYS_SYSPROTO_H_
386struct dup_args {
387	u_int	fd;
388};
389#endif
390/* ARGSUSED */
391int
392sys_dup(struct thread *td, struct dup_args *uap)
393{
394
395	return (kern_dup(td, FDDUP_NORMAL, 0, (int)uap->fd, 0));
396}
397
398/*
399 * The file control system call.
400 */
401#ifndef _SYS_SYSPROTO_H_
402struct fcntl_args {
403	int	fd;
404	int	cmd;
405	long	arg;
406};
407#endif
408/* ARGSUSED */
409int
410sys_fcntl(struct thread *td, struct fcntl_args *uap)
411{
412
413	return (kern_fcntl_freebsd(td, uap->fd, uap->cmd, uap->arg));
414}
415
416int
417kern_fcntl_freebsd(struct thread *td, int fd, int cmd, intptr_t arg)
418{
419	struct flock fl;
420	struct __oflock ofl;
421	intptr_t arg1;
422	int error, newcmd;
423
424	error = 0;
425	newcmd = cmd;
426	switch (cmd) {
427	case F_OGETLK:
428	case F_OSETLK:
429	case F_OSETLKW:
430		/*
431		 * Convert old flock structure to new.
432		 */
433		error = copyin((void *)arg, &ofl, sizeof(ofl));
434		fl.l_start = ofl.l_start;
435		fl.l_len = ofl.l_len;
436		fl.l_pid = ofl.l_pid;
437		fl.l_type = ofl.l_type;
438		fl.l_whence = ofl.l_whence;
439		fl.l_sysid = 0;
440
441		switch (cmd) {
442		case F_OGETLK:
443			newcmd = F_GETLK;
444			break;
445		case F_OSETLK:
446			newcmd = F_SETLK;
447			break;
448		case F_OSETLKW:
449			newcmd = F_SETLKW;
450			break;
451		}
452		arg1 = (intptr_t)&fl;
453		break;
454	case F_GETLK:
455	case F_SETLK:
456	case F_SETLKW:
457	case F_SETLK_REMOTE:
458		error = copyin((void *)arg, &fl, sizeof(fl));
459		arg1 = (intptr_t)&fl;
460		break;
461	default:
462		arg1 = arg;
463		break;
464	}
465	if (error)
466		return (error);
467	error = kern_fcntl(td, fd, newcmd, arg1);
468	if (error)
469		return (error);
470	if (cmd == F_OGETLK) {
471		ofl.l_start = fl.l_start;
472		ofl.l_len = fl.l_len;
473		ofl.l_pid = fl.l_pid;
474		ofl.l_type = fl.l_type;
475		ofl.l_whence = fl.l_whence;
476		error = copyout(&ofl, (void *)arg, sizeof(ofl));
477	} else if (cmd == F_GETLK) {
478		error = copyout(&fl, (void *)arg, sizeof(fl));
479	}
480	return (error);
481}
482
483int
484kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
485{
486	struct filedesc *fdp;
487	struct flock *flp;
488	struct file *fp, *fp2;
489	struct filedescent *fde;
490	struct proc *p;
491	struct vnode *vp;
492	struct mount *mp;
493	struct kinfo_file *kif;
494	int error, flg, kif_sz, seals, tmp, got_set, got_cleared;
495	uint64_t bsize;
496	off_t foffset;
497
498	error = 0;
499	flg = F_POSIX;
500	p = td->td_proc;
501	fdp = p->p_fd;
502
503	AUDIT_ARG_FD(cmd);
504	AUDIT_ARG_CMD(cmd);
505	switch (cmd) {
506	case F_DUPFD:
507		tmp = arg;
508		error = kern_dup(td, FDDUP_FCNTL, 0, fd, tmp);
509		break;
510
511	case F_DUPFD_CLOEXEC:
512		tmp = arg;
513		error = kern_dup(td, FDDUP_FCNTL, FDDUP_FLAG_CLOEXEC, fd, tmp);
514		break;
515
516	case F_DUP2FD:
517		tmp = arg;
518		error = kern_dup(td, FDDUP_FIXED, 0, fd, tmp);
519		break;
520
521	case F_DUP2FD_CLOEXEC:
522		tmp = arg;
523		error = kern_dup(td, FDDUP_FIXED, FDDUP_FLAG_CLOEXEC, fd, tmp);
524		break;
525
526	case F_GETFD:
527		error = EBADF;
528		FILEDESC_SLOCK(fdp);
529		fde = fdeget_noref(fdp, fd);
530		if (fde != NULL) {
531			td->td_retval[0] =
532			    (fde->fde_flags & UF_EXCLOSE) ? FD_CLOEXEC : 0;
533			error = 0;
534		}
535		FILEDESC_SUNLOCK(fdp);
536		break;
537
538	case F_SETFD:
539		error = EBADF;
540		FILEDESC_XLOCK(fdp);
541		fde = fdeget_noref(fdp, fd);
542		if (fde != NULL) {
543			fde->fde_flags = (fde->fde_flags & ~UF_EXCLOSE) |
544			    (arg & FD_CLOEXEC ? UF_EXCLOSE : 0);
545			error = 0;
546		}
547		FILEDESC_XUNLOCK(fdp);
548		break;
549
550	case F_GETFL:
551		error = fget_fcntl(td, fd, &cap_fcntl_rights, F_GETFL, &fp);
552		if (error != 0)
553			break;
554		td->td_retval[0] = OFLAGS(fp->f_flag);
555		fdrop(fp, td);
556		break;
557
558	case F_SETFL:
559		error = fget_fcntl(td, fd, &cap_fcntl_rights, F_SETFL, &fp);
560		if (error != 0)
561			break;
562		if (fp->f_ops == &path_fileops) {
563			fdrop(fp, td);
564			error = EBADF;
565			break;
566		}
567		do {
568			tmp = flg = fp->f_flag;
569			tmp &= ~FCNTLFLAGS;
570			tmp |= FFLAGS(arg & ~O_ACCMODE) & FCNTLFLAGS;
571		} while (atomic_cmpset_int(&fp->f_flag, flg, tmp) == 0);
572		got_set = tmp & ~flg;
573		got_cleared = flg & ~tmp;
574		tmp = fp->f_flag & FNONBLOCK;
575		error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
576		if (error != 0)
577			goto revert_f_setfl;
578		tmp = fp->f_flag & FASYNC;
579		error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td);
580		if (error == 0) {
581			fdrop(fp, td);
582			break;
583		}
584		atomic_clear_int(&fp->f_flag, FNONBLOCK);
585		tmp = 0;
586		(void)fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
587revert_f_setfl:
588		do {
589			tmp = flg = fp->f_flag;
590			tmp &= ~FCNTLFLAGS;
591			tmp |= got_cleared;
592			tmp &= ~got_set;
593		} while (atomic_cmpset_int(&fp->f_flag, flg, tmp) == 0);
594		fdrop(fp, td);
595		break;
596
597	case F_GETOWN:
598		error = fget_fcntl(td, fd, &cap_fcntl_rights, F_GETOWN, &fp);
599		if (error != 0)
600			break;
601		error = fo_ioctl(fp, FIOGETOWN, &tmp, td->td_ucred, td);
602		if (error == 0)
603			td->td_retval[0] = tmp;
604		fdrop(fp, td);
605		break;
606
607	case F_SETOWN:
608		error = fget_fcntl(td, fd, &cap_fcntl_rights, F_SETOWN, &fp);
609		if (error != 0)
610			break;
611		tmp = arg;
612		error = fo_ioctl(fp, FIOSETOWN, &tmp, td->td_ucred, td);
613		fdrop(fp, td);
614		break;
615
616	case F_SETLK_REMOTE:
617		error = priv_check(td, PRIV_NFS_LOCKD);
618		if (error != 0)
619			return (error);
620		flg = F_REMOTE;
621		goto do_setlk;
622
623	case F_SETLKW:
624		flg |= F_WAIT;
625		/* FALLTHROUGH F_SETLK */
626
627	case F_SETLK:
628	do_setlk:
629		flp = (struct flock *)arg;
630		if ((flg & F_REMOTE) != 0 && flp->l_sysid == 0) {
631			error = EINVAL;
632			break;
633		}
634
635		error = fget_unlocked(td, fd, &cap_flock_rights, &fp);
636		if (error != 0)
637			break;
638		if (fp->f_type != DTYPE_VNODE || fp->f_ops == &path_fileops) {
639			error = EBADF;
640			fdrop(fp, td);
641			break;
642		}
643
644		if (flp->l_whence == SEEK_CUR) {
645			foffset = foffset_get(fp);
646			if (foffset < 0 ||
647			    (flp->l_start > 0 &&
648			     foffset > OFF_MAX - flp->l_start)) {
649				error = EOVERFLOW;
650				fdrop(fp, td);
651				break;
652			}
653			flp->l_start += foffset;
654		}
655
656		vp = fp->f_vnode;
657		switch (flp->l_type) {
658		case F_RDLCK:
659			if ((fp->f_flag & FREAD) == 0) {
660				error = EBADF;
661				break;
662			}
663			if ((p->p_leader->p_flag & P_ADVLOCK) == 0) {
664				PROC_LOCK(p->p_leader);
665				p->p_leader->p_flag |= P_ADVLOCK;
666				PROC_UNLOCK(p->p_leader);
667			}
668			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
669			    flp, flg);
670			break;
671		case F_WRLCK:
672			if ((fp->f_flag & FWRITE) == 0) {
673				error = EBADF;
674				break;
675			}
676			if ((p->p_leader->p_flag & P_ADVLOCK) == 0) {
677				PROC_LOCK(p->p_leader);
678				p->p_leader->p_flag |= P_ADVLOCK;
679				PROC_UNLOCK(p->p_leader);
680			}
681			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
682			    flp, flg);
683			break;
684		case F_UNLCK:
685			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
686			    flp, flg);
687			break;
688		case F_UNLCKSYS:
689			if (flg != F_REMOTE) {
690				error = EINVAL;
691				break;
692			}
693			error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
694			    F_UNLCKSYS, flp, flg);
695			break;
696		default:
697			error = EINVAL;
698			break;
699		}
700		if (error != 0 || flp->l_type == F_UNLCK ||
701		    flp->l_type == F_UNLCKSYS) {
702			fdrop(fp, td);
703			break;
704		}
705
706		/*
707		 * Check for a race with close.
708		 *
709		 * The vnode is now advisory locked (or unlocked, but this case
710		 * is not really important) as the caller requested.
711		 * We had to drop the filedesc lock, so we need to recheck if
712		 * the descriptor is still valid, because if it was closed
713		 * in the meantime we need to remove advisory lock from the
714		 * vnode - close on any descriptor leading to an advisory
715		 * locked vnode, removes that lock.
716		 * We will return 0 on purpose in that case, as the result of
717		 * successful advisory lock might have been externally visible
718		 * already. This is fine - effectively we pretend to the caller
719		 * that the closing thread was a bit slower and that the
720		 * advisory lock succeeded before the close.
721		 */
722		error = fget_unlocked(td, fd, &cap_no_rights, &fp2);
723		if (error != 0) {
724			fdrop(fp, td);
725			break;
726		}
727		if (fp != fp2) {
728			flp->l_whence = SEEK_SET;
729			flp->l_start = 0;
730			flp->l_len = 0;
731			flp->l_type = F_UNLCK;
732			(void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
733			    F_UNLCK, flp, F_POSIX);
734		}
735		fdrop(fp, td);
736		fdrop(fp2, td);
737		break;
738
739	case F_GETLK:
740		error = fget_unlocked(td, fd, &cap_flock_rights, &fp);
741		if (error != 0)
742			break;
743		if (fp->f_type != DTYPE_VNODE || fp->f_ops == &path_fileops) {
744			error = EBADF;
745			fdrop(fp, td);
746			break;
747		}
748		flp = (struct flock *)arg;
749		if (flp->l_type != F_RDLCK && flp->l_type != F_WRLCK &&
750		    flp->l_type != F_UNLCK) {
751			error = EINVAL;
752			fdrop(fp, td);
753			break;
754		}
755		if (flp->l_whence == SEEK_CUR) {
756			foffset = foffset_get(fp);
757			if ((flp->l_start > 0 &&
758			    foffset > OFF_MAX - flp->l_start) ||
759			    (flp->l_start < 0 &&
760			    foffset < OFF_MIN - flp->l_start)) {
761				error = EOVERFLOW;
762				fdrop(fp, td);
763				break;
764			}
765			flp->l_start += foffset;
766		}
767		vp = fp->f_vnode;
768		error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, flp,
769		    F_POSIX);
770		fdrop(fp, td);
771		break;
772
773	case F_ADD_SEALS:
774		error = fget_unlocked(td, fd, &cap_no_rights, &fp);
775		if (error != 0)
776			break;
777		error = fo_add_seals(fp, arg);
778		fdrop(fp, td);
779		break;
780
781	case F_GET_SEALS:
782		error = fget_unlocked(td, fd, &cap_no_rights, &fp);
783		if (error != 0)
784			break;
785		if (fo_get_seals(fp, &seals) == 0)
786			td->td_retval[0] = seals;
787		else
788			error = EINVAL;
789		fdrop(fp, td);
790		break;
791
792	case F_RDAHEAD:
793		arg = arg ? 128 * 1024: 0;
794		/* FALLTHROUGH */
795	case F_READAHEAD:
796		error = fget_unlocked(td, fd, &cap_no_rights, &fp);
797		if (error != 0)
798			break;
799		if (fp->f_type != DTYPE_VNODE || fp->f_ops == &path_fileops) {
800			fdrop(fp, td);
801			error = EBADF;
802			break;
803		}
804		vp = fp->f_vnode;
805		if (vp->v_type != VREG) {
806			fdrop(fp, td);
807			error = ENOTTY;
808			break;
809		}
810
811		/*
812		 * Exclusive lock synchronizes against f_seqcount reads and
813		 * writes in sequential_heuristic().
814		 */
815		error = vn_lock(vp, LK_EXCLUSIVE);
816		if (error != 0) {
817			fdrop(fp, td);
818			break;
819		}
820		if (arg >= 0) {
821			bsize = fp->f_vnode->v_mount->mnt_stat.f_iosize;
822			arg = MIN(arg, INT_MAX - bsize + 1);
823			fp->f_seqcount[UIO_READ] = MIN(IO_SEQMAX,
824			    (arg + bsize - 1) / bsize);
825			atomic_set_int(&fp->f_flag, FRDAHEAD);
826		} else {
827			atomic_clear_int(&fp->f_flag, FRDAHEAD);
828		}
829		VOP_UNLOCK(vp);
830		fdrop(fp, td);
831		break;
832
833	case F_ISUNIONSTACK:
834		/*
835		 * Check if the vnode is part of a union stack (either the
836		 * "union" flag from mount(2) or unionfs).
837		 *
838		 * Prior to introduction of this op libc's readdir would call
839		 * fstatfs(2), in effect unnecessarily copying kilobytes of
840		 * data just to check fs name and a mount flag.
841		 *
842		 * Fixing the code to handle everything in the kernel instead
843		 * is a non-trivial endeavor and has low priority, thus this
844		 * horrible kludge facilitates the current behavior in a much
845		 * cheaper manner until someone(tm) sorts this out.
846		 */
847		error = fget_unlocked(td, fd, &cap_no_rights, &fp);
848		if (error != 0)
849			break;
850		if (fp->f_type != DTYPE_VNODE) {
851			fdrop(fp, td);
852			error = EBADF;
853			break;
854		}
855		vp = fp->f_vnode;
856		/*
857		 * Since we don't prevent dooming the vnode even non-null mp
858		 * found can become immediately stale. This is tolerable since
859		 * mount points are type-stable (providing safe memory access)
860		 * and any vfs op on this vnode going forward will return an
861		 * error (meaning return value in this case is meaningless).
862		 */
863		mp = atomic_load_ptr(&vp->v_mount);
864		if (__predict_false(mp == NULL)) {
865			fdrop(fp, td);
866			error = EBADF;
867			break;
868		}
869		td->td_retval[0] = 0;
870		if (mp->mnt_kern_flag & MNTK_UNIONFS ||
871		    mp->mnt_flag & MNT_UNION)
872			td->td_retval[0] = 1;
873		fdrop(fp, td);
874		break;
875
876	case F_KINFO:
877#ifdef CAPABILITY_MODE
878		if (CAP_TRACING(td))
879			ktrcapfail(CAPFAIL_SYSCALL, &cmd);
880		if (IN_CAPABILITY_MODE(td)) {
881			error = ECAPMODE;
882			break;
883		}
884#endif
885		error = copyin((void *)arg, &kif_sz, sizeof(kif_sz));
886		if (error != 0)
887			break;
888		if (kif_sz != sizeof(*kif)) {
889			error = EINVAL;
890			break;
891		}
892		kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK | M_ZERO);
893		FILEDESC_SLOCK(fdp);
894		error = fget_cap_noref(fdp, fd, &cap_fcntl_rights, &fp, NULL);
895		if (error == 0 && fhold(fp)) {
896			export_file_to_kinfo(fp, fd, NULL, kif, fdp, 0);
897			FILEDESC_SUNLOCK(fdp);
898			fdrop(fp, td);
899			if ((kif->kf_status & KF_ATTR_VALID) != 0) {
900				kif->kf_structsize = sizeof(*kif);
901				error = copyout(kif, (void *)arg, sizeof(*kif));
902			} else {
903				error = EBADF;
904			}
905		} else {
906			FILEDESC_SUNLOCK(fdp);
907			if (error == 0)
908				error = EBADF;
909		}
910		free(kif, M_TEMP);
911		break;
912
913	default:
914		error = EINVAL;
915		break;
916	}
917	return (error);
918}
919
920static int
921getmaxfd(struct thread *td)
922{
923
924	return (min((int)lim_cur(td, RLIMIT_NOFILE), maxfilesperproc));
925}
926
927/*
928 * Common code for dup, dup2, fcntl(F_DUPFD) and fcntl(F_DUP2FD).
929 */
930int
931kern_dup(struct thread *td, u_int mode, int flags, int old, int new)
932{
933	struct filedesc *fdp;
934	struct filedescent *oldfde, *newfde;
935	struct proc *p;
936	struct file *delfp, *oldfp;
937	u_long *oioctls, *nioctls;
938	int error, maxfd;
939
940	p = td->td_proc;
941	fdp = p->p_fd;
942	oioctls = NULL;
943
944	MPASS((flags & ~(FDDUP_FLAG_CLOEXEC)) == 0);
945	MPASS(mode < FDDUP_LASTMODE);
946
947	AUDIT_ARG_FD(old);
948	/* XXXRW: if (flags & FDDUP_FIXED) AUDIT_ARG_FD2(new); */
949
950	/*
951	 * Verify we have a valid descriptor to dup from and possibly to
952	 * dup to. Unlike dup() and dup2(), fcntl()'s F_DUPFD should
953	 * return EINVAL when the new descriptor is out of bounds.
954	 */
955	if (old < 0)
956		return (EBADF);
957	if (new < 0)
958		return (mode == FDDUP_FCNTL ? EINVAL : EBADF);
959	maxfd = getmaxfd(td);
960	if (new >= maxfd)
961		return (mode == FDDUP_FCNTL ? EINVAL : EBADF);
962
963	error = EBADF;
964	FILEDESC_XLOCK(fdp);
965	if (fget_noref(fdp, old) == NULL)
966		goto unlock;
967	if (mode == FDDUP_FIXED && old == new) {
968		td->td_retval[0] = new;
969		if (flags & FDDUP_FLAG_CLOEXEC)
970			fdp->fd_ofiles[new].fde_flags |= UF_EXCLOSE;
971		error = 0;
972		goto unlock;
973	}
974
975	oldfde = &fdp->fd_ofiles[old];
976	oldfp = oldfde->fde_file;
977	if (!fhold(oldfp))
978		goto unlock;
979
980	/*
981	 * If the caller specified a file descriptor, make sure the file
982	 * table is large enough to hold it, and grab it.  Otherwise, just
983	 * allocate a new descriptor the usual way.
984	 */
985	switch (mode) {
986	case FDDUP_NORMAL:
987	case FDDUP_FCNTL:
988		if ((error = fdalloc(td, new, &new)) != 0) {
989			fdrop(oldfp, td);
990			goto unlock;
991		}
992		break;
993	case FDDUP_FIXED:
994		if (new >= fdp->fd_nfiles) {
995			/*
996			 * The resource limits are here instead of e.g.
997			 * fdalloc(), because the file descriptor table may be
998			 * shared between processes, so we can't really use
999			 * racct_add()/racct_sub().  Instead of counting the
1000			 * number of actually allocated descriptors, just put
1001			 * the limit on the size of the file descriptor table.
1002			 */
1003#ifdef RACCT
1004			if (RACCT_ENABLED()) {
1005				error = racct_set_unlocked(p, RACCT_NOFILE, new + 1);
1006				if (error != 0) {
1007					error = EMFILE;
1008					fdrop(oldfp, td);
1009					goto unlock;
1010				}
1011			}
1012#endif
1013			fdgrowtable_exp(fdp, new + 1);
1014		}
1015		if (!fdisused(fdp, new))
1016			fdused(fdp, new);
1017		break;
1018	default:
1019		KASSERT(0, ("%s unsupported mode %d", __func__, mode));
1020	}
1021
1022	KASSERT(old != new, ("new fd is same as old"));
1023
1024	/* Refetch oldfde because the table may have grown and old one freed. */
1025	oldfde = &fdp->fd_ofiles[old];
1026	KASSERT(oldfp == oldfde->fde_file,
1027	    ("fdt_ofiles shift from growth observed at fd %d",
1028	    old));
1029
1030	newfde = &fdp->fd_ofiles[new];
1031	delfp = newfde->fde_file;
1032
1033	nioctls = filecaps_copy_prep(&oldfde->fde_caps);
1034
1035	/*
1036	 * Duplicate the source descriptor.
1037	 */
1038#ifdef CAPABILITIES
1039	seqc_write_begin(&newfde->fde_seqc);
1040#endif
1041	oioctls = filecaps_free_prep(&newfde->fde_caps);
1042	fde_copy(oldfde, newfde);
1043	filecaps_copy_finish(&oldfde->fde_caps, &newfde->fde_caps,
1044	    nioctls);
1045	if ((flags & FDDUP_FLAG_CLOEXEC) != 0)
1046		newfde->fde_flags = oldfde->fde_flags | UF_EXCLOSE;
1047	else
1048		newfde->fde_flags = oldfde->fde_flags & ~UF_EXCLOSE;
1049#ifdef CAPABILITIES
1050	seqc_write_end(&newfde->fde_seqc);
1051#endif
1052	td->td_retval[0] = new;
1053
1054	error = 0;
1055
1056	if (delfp != NULL) {
1057		(void) closefp(fdp, new, delfp, td, true, false);
1058		FILEDESC_UNLOCK_ASSERT(fdp);
1059	} else {
1060unlock:
1061		FILEDESC_XUNLOCK(fdp);
1062	}
1063
1064	filecaps_free_finish(oioctls);
1065	return (error);
1066}
1067
1068static void
1069sigiofree(struct sigio *sigio)
1070{
1071	crfree(sigio->sio_ucred);
1072	free(sigio, M_SIGIO);
1073}
1074
1075static struct sigio *
1076funsetown_locked(struct sigio *sigio)
1077{
1078	struct proc *p;
1079	struct pgrp *pg;
1080
1081	SIGIO_ASSERT_LOCKED();
1082
1083	if (sigio == NULL)
1084		return (NULL);
1085	*sigio->sio_myref = NULL;
1086	if (sigio->sio_pgid < 0) {
1087		pg = sigio->sio_pgrp;
1088		PGRP_LOCK(pg);
1089		SLIST_REMOVE(&pg->pg_sigiolst, sigio, sigio, sio_pgsigio);
1090		PGRP_UNLOCK(pg);
1091	} else {
1092		p = sigio->sio_proc;
1093		PROC_LOCK(p);
1094		SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, sio_pgsigio);
1095		PROC_UNLOCK(p);
1096	}
1097	return (sigio);
1098}
1099
1100/*
1101 * If sigio is on the list associated with a process or process group,
1102 * disable signalling from the device, remove sigio from the list and
1103 * free sigio.
1104 */
1105void
1106funsetown(struct sigio **sigiop)
1107{
1108	struct sigio *sigio;
1109
1110	/* Racy check, consumers must provide synchronization. */
1111	if (*sigiop == NULL)
1112		return;
1113
1114	SIGIO_LOCK();
1115	sigio = funsetown_locked(*sigiop);
1116	SIGIO_UNLOCK();
1117	if (sigio != NULL)
1118		sigiofree(sigio);
1119}
1120
1121/*
1122 * Free a list of sigio structures.  The caller must ensure that new sigio
1123 * structures cannot be added after this point.  For process groups this is
1124 * guaranteed using the proctree lock; for processes, the P_WEXIT flag serves
1125 * as an interlock.
1126 */
1127void
1128funsetownlst(struct sigiolst *sigiolst)
1129{
1130	struct proc *p;
1131	struct pgrp *pg;
1132	struct sigio *sigio, *tmp;
1133
1134	/* Racy check. */
1135	sigio = SLIST_FIRST(sigiolst);
1136	if (sigio == NULL)
1137		return;
1138
1139	p = NULL;
1140	pg = NULL;
1141
1142	SIGIO_LOCK();
1143	sigio = SLIST_FIRST(sigiolst);
1144	if (sigio == NULL) {
1145		SIGIO_UNLOCK();
1146		return;
1147	}
1148
1149	/*
1150	 * Every entry of the list should belong to a single proc or pgrp.
1151	 */
1152	if (sigio->sio_pgid < 0) {
1153		pg = sigio->sio_pgrp;
1154		sx_assert(&proctree_lock, SX_XLOCKED);
1155		PGRP_LOCK(pg);
1156	} else /* if (sigio->sio_pgid > 0) */ {
1157		p = sigio->sio_proc;
1158		PROC_LOCK(p);
1159		KASSERT((p->p_flag & P_WEXIT) != 0,
1160		    ("%s: process %p is not exiting", __func__, p));
1161	}
1162
1163	SLIST_FOREACH(sigio, sigiolst, sio_pgsigio) {
1164		*sigio->sio_myref = NULL;
1165		if (pg != NULL) {
1166			KASSERT(sigio->sio_pgid < 0,
1167			    ("Proc sigio in pgrp sigio list"));
1168			KASSERT(sigio->sio_pgrp == pg,
1169			    ("Bogus pgrp in sigio list"));
1170		} else /* if (p != NULL) */ {
1171			KASSERT(sigio->sio_pgid > 0,
1172			    ("Pgrp sigio in proc sigio list"));
1173			KASSERT(sigio->sio_proc == p,
1174			    ("Bogus proc in sigio list"));
1175		}
1176	}
1177
1178	if (pg != NULL)
1179		PGRP_UNLOCK(pg);
1180	else
1181		PROC_UNLOCK(p);
1182	SIGIO_UNLOCK();
1183
1184	SLIST_FOREACH_SAFE(sigio, sigiolst, sio_pgsigio, tmp)
1185		sigiofree(sigio);
1186}
1187
1188/*
1189 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
1190 *
1191 * After permission checking, add a sigio structure to the sigio list for
1192 * the process or process group.
1193 */
1194int
1195fsetown(pid_t pgid, struct sigio **sigiop)
1196{
1197	struct proc *proc;
1198	struct pgrp *pgrp;
1199	struct sigio *osigio, *sigio;
1200	int ret;
1201
1202	if (pgid == 0) {
1203		funsetown(sigiop);
1204		return (0);
1205	}
1206
1207	sigio = malloc(sizeof(struct sigio), M_SIGIO, M_WAITOK);
1208	sigio->sio_pgid = pgid;
1209	sigio->sio_ucred = crhold(curthread->td_ucred);
1210	sigio->sio_myref = sigiop;
1211
1212	ret = 0;
1213	if (pgid > 0) {
1214		ret = pget(pgid, PGET_NOTWEXIT | PGET_NOTID | PGET_HOLD, &proc);
1215		SIGIO_LOCK();
1216		osigio = funsetown_locked(*sigiop);
1217		if (ret == 0) {
1218			PROC_LOCK(proc);
1219			_PRELE(proc);
1220			if ((proc->p_flag & P_WEXIT) != 0) {
1221				ret = ESRCH;
1222			} else if (proc->p_session !=
1223			    curthread->td_proc->p_session) {
1224				/*
1225				 * Policy - Don't allow a process to FSETOWN a
1226				 * process in another session.
1227				 *
1228				 * Remove this test to allow maximum flexibility
1229				 * or restrict FSETOWN to the current process or
1230				 * process group for maximum safety.
1231				 */
1232				ret = EPERM;
1233			} else {
1234				sigio->sio_proc = proc;
1235				SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio,
1236				    sio_pgsigio);
1237			}
1238			PROC_UNLOCK(proc);
1239		}
1240	} else /* if (pgid < 0) */ {
1241		sx_slock(&proctree_lock);
1242		SIGIO_LOCK();
1243		osigio = funsetown_locked(*sigiop);
1244		pgrp = pgfind(-pgid);
1245		if (pgrp == NULL) {
1246			ret = ESRCH;
1247		} else {
1248			if (pgrp->pg_session != curthread->td_proc->p_session) {
1249				/*
1250				 * Policy - Don't allow a process to FSETOWN a
1251				 * process in another session.
1252				 *
1253				 * Remove this test to allow maximum flexibility
1254				 * or restrict FSETOWN to the current process or
1255				 * process group for maximum safety.
1256				 */
1257				ret = EPERM;
1258			} else {
1259				sigio->sio_pgrp = pgrp;
1260				SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio,
1261				    sio_pgsigio);
1262			}
1263			PGRP_UNLOCK(pgrp);
1264		}
1265		sx_sunlock(&proctree_lock);
1266	}
1267	if (ret == 0)
1268		*sigiop = sigio;
1269	SIGIO_UNLOCK();
1270	if (osigio != NULL)
1271		sigiofree(osigio);
1272	return (ret);
1273}
1274
1275/*
1276 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
1277 */
1278pid_t
1279fgetown(struct sigio **sigiop)
1280{
1281	pid_t pgid;
1282
1283	SIGIO_LOCK();
1284	pgid = (*sigiop != NULL) ? (*sigiop)->sio_pgid : 0;
1285	SIGIO_UNLOCK();
1286	return (pgid);
1287}
1288
1289static int
1290closefp_impl(struct filedesc *fdp, int fd, struct file *fp, struct thread *td,
1291    bool audit)
1292{
1293	int error;
1294
1295	FILEDESC_XLOCK_ASSERT(fdp);
1296
1297	/*
1298	 * We now hold the fp reference that used to be owned by the
1299	 * descriptor array.  We have to unlock the FILEDESC *AFTER*
1300	 * knote_fdclose to prevent a race of the fd getting opened, a knote
1301	 * added, and deleteing a knote for the new fd.
1302	 */
1303	if (__predict_false(!TAILQ_EMPTY(&fdp->fd_kqlist)))
1304		knote_fdclose(td, fd);
1305
1306	/*
1307	 * We need to notify mqueue if the object is of type mqueue.
1308	 */
1309	if (__predict_false(fp->f_type == DTYPE_MQUEUE))
1310		mq_fdclose(td, fd, fp);
1311	FILEDESC_XUNLOCK(fdp);
1312
1313#ifdef AUDIT
1314	if (AUDITING_TD(td) && audit)
1315		audit_sysclose(td, fd, fp);
1316#endif
1317	error = closef(fp, td);
1318
1319	/*
1320	 * All paths leading up to closefp() will have already removed or
1321	 * replaced the fd in the filedesc table, so a restart would not
1322	 * operate on the same file.
1323	 */
1324	if (error == ERESTART)
1325		error = EINTR;
1326
1327	return (error);
1328}
1329
1330static int
1331closefp_hl(struct filedesc *fdp, int fd, struct file *fp, struct thread *td,
1332    bool holdleaders, bool audit)
1333{
1334	int error;
1335
1336	FILEDESC_XLOCK_ASSERT(fdp);
1337
1338	if (holdleaders) {
1339		if (td->td_proc->p_fdtol != NULL) {
1340			/*
1341			 * Ask fdfree() to sleep to ensure that all relevant
1342			 * process leaders can be traversed in closef().
1343			 */
1344			fdp->fd_holdleaderscount++;
1345		} else {
1346			holdleaders = false;
1347		}
1348	}
1349
1350	error = closefp_impl(fdp, fd, fp, td, audit);
1351	if (holdleaders) {
1352		FILEDESC_XLOCK(fdp);
1353		fdp->fd_holdleaderscount--;
1354		if (fdp->fd_holdleaderscount == 0 &&
1355		    fdp->fd_holdleaderswakeup != 0) {
1356			fdp->fd_holdleaderswakeup = 0;
1357			wakeup(&fdp->fd_holdleaderscount);
1358		}
1359		FILEDESC_XUNLOCK(fdp);
1360	}
1361	return (error);
1362}
1363
1364static int
1365closefp(struct filedesc *fdp, int fd, struct file *fp, struct thread *td,
1366    bool holdleaders, bool audit)
1367{
1368
1369	FILEDESC_XLOCK_ASSERT(fdp);
1370
1371	if (__predict_false(td->td_proc->p_fdtol != NULL)) {
1372		return (closefp_hl(fdp, fd, fp, td, holdleaders, audit));
1373	} else {
1374		return (closefp_impl(fdp, fd, fp, td, audit));
1375	}
1376}
1377
1378/*
1379 * Close a file descriptor.
1380 */
1381#ifndef _SYS_SYSPROTO_H_
1382struct close_args {
1383	int     fd;
1384};
1385#endif
1386/* ARGSUSED */
1387int
1388sys_close(struct thread *td, struct close_args *uap)
1389{
1390
1391	return (kern_close(td, uap->fd));
1392}
1393
1394int
1395kern_close(struct thread *td, int fd)
1396{
1397	struct filedesc *fdp;
1398	struct file *fp;
1399
1400	fdp = td->td_proc->p_fd;
1401
1402	FILEDESC_XLOCK(fdp);
1403	if ((fp = fget_noref(fdp, fd)) == NULL) {
1404		FILEDESC_XUNLOCK(fdp);
1405		return (EBADF);
1406	}
1407	fdfree(fdp, fd);
1408
1409	/* closefp() drops the FILEDESC lock for us. */
1410	return (closefp(fdp, fd, fp, td, true, true));
1411}
1412
1413static int
1414close_range_cloexec(struct thread *td, u_int lowfd, u_int highfd)
1415{
1416	struct filedesc *fdp;
1417	struct fdescenttbl *fdt;
1418	struct filedescent *fde;
1419	int fd;
1420
1421	fdp = td->td_proc->p_fd;
1422	FILEDESC_XLOCK(fdp);
1423	fdt = atomic_load_ptr(&fdp->fd_files);
1424	highfd = MIN(highfd, fdt->fdt_nfiles - 1);
1425	fd = lowfd;
1426	if (__predict_false(fd > highfd)) {
1427		goto out_locked;
1428	}
1429	for (; fd <= highfd; fd++) {
1430		fde = &fdt->fdt_ofiles[fd];
1431		if (fde->fde_file != NULL)
1432			fde->fde_flags |= UF_EXCLOSE;
1433	}
1434out_locked:
1435	FILEDESC_XUNLOCK(fdp);
1436	return (0);
1437}
1438
1439static int
1440close_range_impl(struct thread *td, u_int lowfd, u_int highfd)
1441{
1442	struct filedesc *fdp;
1443	const struct fdescenttbl *fdt;
1444	struct file *fp;
1445	int fd;
1446
1447	fdp = td->td_proc->p_fd;
1448	FILEDESC_XLOCK(fdp);
1449	fdt = atomic_load_ptr(&fdp->fd_files);
1450	highfd = MIN(highfd, fdt->fdt_nfiles - 1);
1451	fd = lowfd;
1452	if (__predict_false(fd > highfd)) {
1453		goto out_locked;
1454	}
1455	for (;;) {
1456		fp = fdt->fdt_ofiles[fd].fde_file;
1457		if (fp == NULL) {
1458			if (fd == highfd)
1459				goto out_locked;
1460		} else {
1461			fdfree(fdp, fd);
1462			(void) closefp(fdp, fd, fp, td, true, true);
1463			if (fd == highfd)
1464				goto out_unlocked;
1465			FILEDESC_XLOCK(fdp);
1466			fdt = atomic_load_ptr(&fdp->fd_files);
1467		}
1468		fd++;
1469	}
1470out_locked:
1471	FILEDESC_XUNLOCK(fdp);
1472out_unlocked:
1473	return (0);
1474}
1475
1476int
1477kern_close_range(struct thread *td, int flags, u_int lowfd, u_int highfd)
1478{
1479
1480	/*
1481	 * Check this prior to clamping; closefrom(3) with only fd 0, 1, and 2
1482	 * open should not be a usage error.  From a close_range() perspective,
1483	 * close_range(3, ~0U, 0) in the same scenario should also likely not
1484	 * be a usage error as all fd above 3 are in-fact already closed.
1485	 */
1486	if (highfd < lowfd) {
1487		return (EINVAL);
1488	}
1489
1490	if ((flags & CLOSE_RANGE_CLOEXEC) != 0)
1491		return (close_range_cloexec(td, lowfd, highfd));
1492
1493	return (close_range_impl(td, lowfd, highfd));
1494}
1495
1496#ifndef _SYS_SYSPROTO_H_
1497struct close_range_args {
1498	u_int	lowfd;
1499	u_int	highfd;
1500	int	flags;
1501};
1502#endif
1503int
1504sys_close_range(struct thread *td, struct close_range_args *uap)
1505{
1506
1507	AUDIT_ARG_FD(uap->lowfd);
1508	AUDIT_ARG_CMD(uap->highfd);
1509	AUDIT_ARG_FFLAGS(uap->flags);
1510
1511	if ((uap->flags & ~(CLOSE_RANGE_CLOEXEC)) != 0)
1512		return (EINVAL);
1513	return (kern_close_range(td, uap->flags, uap->lowfd, uap->highfd));
1514}
1515
1516#ifdef COMPAT_FREEBSD12
1517/*
1518 * Close open file descriptors.
1519 */
1520#ifndef _SYS_SYSPROTO_H_
1521struct freebsd12_closefrom_args {
1522	int	lowfd;
1523};
1524#endif
1525/* ARGSUSED */
1526int
1527freebsd12_closefrom(struct thread *td, struct freebsd12_closefrom_args *uap)
1528{
1529	u_int lowfd;
1530
1531	AUDIT_ARG_FD(uap->lowfd);
1532
1533	/*
1534	 * Treat negative starting file descriptor values identical to
1535	 * closefrom(0) which closes all files.
1536	 */
1537	lowfd = MAX(0, uap->lowfd);
1538	return (kern_close_range(td, 0, lowfd, ~0U));
1539}
1540#endif	/* COMPAT_FREEBSD12 */
1541
1542#if defined(COMPAT_43)
1543/*
1544 * Return status information about a file descriptor.
1545 */
1546#ifndef _SYS_SYSPROTO_H_
1547struct ofstat_args {
1548	int	fd;
1549	struct	ostat *sb;
1550};
1551#endif
1552/* ARGSUSED */
1553int
1554ofstat(struct thread *td, struct ofstat_args *uap)
1555{
1556	struct ostat oub;
1557	struct stat ub;
1558	int error;
1559
1560	error = kern_fstat(td, uap->fd, &ub);
1561	if (error == 0) {
1562		cvtstat(&ub, &oub);
1563		error = copyout(&oub, uap->sb, sizeof(oub));
1564	}
1565	return (error);
1566}
1567#endif /* COMPAT_43 */
1568
1569#if defined(COMPAT_FREEBSD11)
1570int
1571freebsd11_fstat(struct thread *td, struct freebsd11_fstat_args *uap)
1572{
1573	struct stat sb;
1574	struct freebsd11_stat osb;
1575	int error;
1576
1577	error = kern_fstat(td, uap->fd, &sb);
1578	if (error != 0)
1579		return (error);
1580	error = freebsd11_cvtstat(&sb, &osb);
1581	if (error == 0)
1582		error = copyout(&osb, uap->sb, sizeof(osb));
1583	return (error);
1584}
1585#endif	/* COMPAT_FREEBSD11 */
1586
1587/*
1588 * Return status information about a file descriptor.
1589 */
1590#ifndef _SYS_SYSPROTO_H_
1591struct fstat_args {
1592	int	fd;
1593	struct	stat *sb;
1594};
1595#endif
1596/* ARGSUSED */
1597int
1598sys_fstat(struct thread *td, struct fstat_args *uap)
1599{
1600	struct stat ub;
1601	int error;
1602
1603	error = kern_fstat(td, uap->fd, &ub);
1604	if (error == 0)
1605		error = copyout(&ub, uap->sb, sizeof(ub));
1606	return (error);
1607}
1608
1609int
1610kern_fstat(struct thread *td, int fd, struct stat *sbp)
1611{
1612	struct file *fp;
1613	int error;
1614
1615	AUDIT_ARG_FD(fd);
1616
1617	error = fget(td, fd, &cap_fstat_rights, &fp);
1618	if (__predict_false(error != 0))
1619		return (error);
1620
1621	AUDIT_ARG_FILE(td->td_proc, fp);
1622
1623	error = fo_stat(fp, sbp, td->td_ucred);
1624	fdrop(fp, td);
1625#ifdef __STAT_TIME_T_EXT
1626	sbp->st_atim_ext = 0;
1627	sbp->st_mtim_ext = 0;
1628	sbp->st_ctim_ext = 0;
1629	sbp->st_btim_ext = 0;
1630#endif
1631#ifdef KTRACE
1632	if (KTRPOINT(td, KTR_STRUCT))
1633		ktrstat_error(sbp, error);
1634#endif
1635	return (error);
1636}
1637
1638#if defined(COMPAT_FREEBSD11)
1639/*
1640 * Return status information about a file descriptor.
1641 */
1642#ifndef _SYS_SYSPROTO_H_
1643struct freebsd11_nfstat_args {
1644	int	fd;
1645	struct	nstat *sb;
1646};
1647#endif
1648/* ARGSUSED */
1649int
1650freebsd11_nfstat(struct thread *td, struct freebsd11_nfstat_args *uap)
1651{
1652	struct nstat nub;
1653	struct stat ub;
1654	int error;
1655
1656	error = kern_fstat(td, uap->fd, &ub);
1657	if (error != 0)
1658		return (error);
1659	error = freebsd11_cvtnstat(&ub, &nub);
1660	if (error != 0)
1661		error = copyout(&nub, uap->sb, sizeof(nub));
1662	return (error);
1663}
1664#endif /* COMPAT_FREEBSD11 */
1665
1666/*
1667 * Return pathconf information about a file descriptor.
1668 */
1669#ifndef _SYS_SYSPROTO_H_
1670struct fpathconf_args {
1671	int	fd;
1672	int	name;
1673};
1674#endif
1675/* ARGSUSED */
1676int
1677sys_fpathconf(struct thread *td, struct fpathconf_args *uap)
1678{
1679	long value;
1680	int error;
1681
1682	error = kern_fpathconf(td, uap->fd, uap->name, &value);
1683	if (error == 0)
1684		td->td_retval[0] = value;
1685	return (error);
1686}
1687
1688int
1689kern_fpathconf(struct thread *td, int fd, int name, long *valuep)
1690{
1691	struct file *fp;
1692	struct vnode *vp;
1693	int error;
1694
1695	error = fget(td, fd, &cap_fpathconf_rights, &fp);
1696	if (error != 0)
1697		return (error);
1698
1699	if (name == _PC_ASYNC_IO) {
1700		*valuep = _POSIX_ASYNCHRONOUS_IO;
1701		goto out;
1702	}
1703	vp = fp->f_vnode;
1704	if (vp != NULL) {
1705		vn_lock(vp, LK_SHARED | LK_RETRY);
1706		error = VOP_PATHCONF(vp, name, valuep);
1707		VOP_UNLOCK(vp);
1708	} else if (fp->f_type == DTYPE_PIPE || fp->f_type == DTYPE_SOCKET) {
1709		if (name != _PC_PIPE_BUF) {
1710			error = EINVAL;
1711		} else {
1712			*valuep = PIPE_BUF;
1713			error = 0;
1714		}
1715	} else {
1716		error = EOPNOTSUPP;
1717	}
1718out:
1719	fdrop(fp, td);
1720	return (error);
1721}
1722
1723/*
1724 * Copy filecaps structure allocating memory for ioctls array if needed.
1725 *
1726 * The last parameter indicates whether the fdtable is locked. If it is not and
1727 * ioctls are encountered, copying fails and the caller must lock the table.
1728 *
1729 * Note that if the table was not locked, the caller has to check the relevant
1730 * sequence counter to determine whether the operation was successful.
1731 */
1732bool
1733filecaps_copy(const struct filecaps *src, struct filecaps *dst, bool locked)
1734{
1735	size_t size;
1736
1737	if (src->fc_ioctls != NULL && !locked)
1738		return (false);
1739	memcpy(dst, src, sizeof(*src));
1740	if (src->fc_ioctls == NULL)
1741		return (true);
1742
1743	KASSERT(src->fc_nioctls > 0,
1744	    ("fc_ioctls != NULL, but fc_nioctls=%hd", src->fc_nioctls));
1745
1746	size = sizeof(src->fc_ioctls[0]) * src->fc_nioctls;
1747	dst->fc_ioctls = malloc(size, M_FILECAPS, M_WAITOK);
1748	memcpy(dst->fc_ioctls, src->fc_ioctls, size);
1749	return (true);
1750}
1751
1752static u_long *
1753filecaps_copy_prep(const struct filecaps *src)
1754{
1755	u_long *ioctls;
1756	size_t size;
1757
1758	if (__predict_true(src->fc_ioctls == NULL))
1759		return (NULL);
1760
1761	KASSERT(src->fc_nioctls > 0,
1762	    ("fc_ioctls != NULL, but fc_nioctls=%hd", src->fc_nioctls));
1763
1764	size = sizeof(src->fc_ioctls[0]) * src->fc_nioctls;
1765	ioctls = malloc(size, M_FILECAPS, M_WAITOK);
1766	return (ioctls);
1767}
1768
1769static void
1770filecaps_copy_finish(const struct filecaps *src, struct filecaps *dst,
1771    u_long *ioctls)
1772{
1773	size_t size;
1774
1775	*dst = *src;
1776	if (__predict_true(src->fc_ioctls == NULL)) {
1777		MPASS(ioctls == NULL);
1778		return;
1779	}
1780
1781	size = sizeof(src->fc_ioctls[0]) * src->fc_nioctls;
1782	dst->fc_ioctls = ioctls;
1783	bcopy(src->fc_ioctls, dst->fc_ioctls, size);
1784}
1785
1786/*
1787 * Move filecaps structure to the new place and clear the old place.
1788 */
1789void
1790filecaps_move(struct filecaps *src, struct filecaps *dst)
1791{
1792
1793	*dst = *src;
1794	bzero(src, sizeof(*src));
1795}
1796
1797/*
1798 * Fill the given filecaps structure with full rights.
1799 */
1800static void
1801filecaps_fill(struct filecaps *fcaps)
1802{
1803
1804	CAP_ALL(&fcaps->fc_rights);
1805	fcaps->fc_ioctls = NULL;
1806	fcaps->fc_nioctls = -1;
1807	fcaps->fc_fcntls = CAP_FCNTL_ALL;
1808}
1809
1810/*
1811 * Free memory allocated within filecaps structure.
1812 */
1813static void
1814filecaps_free_ioctl(struct filecaps *fcaps)
1815{
1816
1817	free(fcaps->fc_ioctls, M_FILECAPS);
1818	fcaps->fc_ioctls = NULL;
1819}
1820
1821void
1822filecaps_free(struct filecaps *fcaps)
1823{
1824
1825	filecaps_free_ioctl(fcaps);
1826	bzero(fcaps, sizeof(*fcaps));
1827}
1828
1829static u_long *
1830filecaps_free_prep(struct filecaps *fcaps)
1831{
1832	u_long *ioctls;
1833
1834	ioctls = fcaps->fc_ioctls;
1835	bzero(fcaps, sizeof(*fcaps));
1836	return (ioctls);
1837}
1838
1839static void
1840filecaps_free_finish(u_long *ioctls)
1841{
1842
1843	free(ioctls, M_FILECAPS);
1844}
1845
1846/*
1847 * Validate the given filecaps structure.
1848 */
1849static void
1850filecaps_validate(const struct filecaps *fcaps, const char *func)
1851{
1852
1853	KASSERT(cap_rights_is_valid(&fcaps->fc_rights),
1854	    ("%s: invalid rights", func));
1855	KASSERT((fcaps->fc_fcntls & ~CAP_FCNTL_ALL) == 0,
1856	    ("%s: invalid fcntls", func));
1857	KASSERT(fcaps->fc_fcntls == 0 ||
1858	    cap_rights_is_set(&fcaps->fc_rights, CAP_FCNTL),
1859	    ("%s: fcntls without CAP_FCNTL", func));
1860	/*
1861	 * open calls without WANTIOCTLCAPS free caps but leave the counter
1862	 */
1863#if 0
1864	KASSERT(fcaps->fc_ioctls != NULL ? fcaps->fc_nioctls > 0 :
1865	    (fcaps->fc_nioctls == -1 || fcaps->fc_nioctls == 0),
1866	    ("%s: invalid ioctls", func));
1867#endif
1868	KASSERT(fcaps->fc_nioctls == 0 ||
1869	    cap_rights_is_set(&fcaps->fc_rights, CAP_IOCTL),
1870	    ("%s: ioctls without CAP_IOCTL", func));
1871}
1872
1873static void
1874fdgrowtable_exp(struct filedesc *fdp, int nfd)
1875{
1876	int nfd1;
1877
1878	FILEDESC_XLOCK_ASSERT(fdp);
1879
1880	nfd1 = fdp->fd_nfiles * 2;
1881	if (nfd1 < nfd)
1882		nfd1 = nfd;
1883	fdgrowtable(fdp, nfd1);
1884}
1885
1886/*
1887 * Grow the file table to accommodate (at least) nfd descriptors.
1888 */
1889static void
1890fdgrowtable(struct filedesc *fdp, int nfd)
1891{
1892	struct filedesc0 *fdp0;
1893	struct freetable *ft;
1894	struct fdescenttbl *ntable;
1895	struct fdescenttbl *otable;
1896	int nnfiles, onfiles;
1897	NDSLOTTYPE *nmap, *omap;
1898
1899	KASSERT(fdp->fd_nfiles > 0, ("zero-length file table"));
1900
1901	/* save old values */
1902	onfiles = fdp->fd_nfiles;
1903	otable = fdp->fd_files;
1904	omap = fdp->fd_map;
1905
1906	/* compute the size of the new table */
1907	nnfiles = NDSLOTS(nfd) * NDENTRIES; /* round up */
1908	if (nnfiles <= onfiles)
1909		/* the table is already large enough */
1910		return;
1911
1912	/*
1913	 * Allocate a new table.  We need enough space for the number of
1914	 * entries, file entries themselves and the struct freetable we will use
1915	 * when we decommission the table and place it on the freelist.
1916	 * We place the struct freetable in the middle so we don't have
1917	 * to worry about padding.
1918	 */
1919	ntable = malloc(offsetof(struct fdescenttbl, fdt_ofiles) +
1920	    nnfiles * sizeof(ntable->fdt_ofiles[0]) +
1921	    sizeof(struct freetable),
1922	    M_FILEDESC, M_ZERO | M_WAITOK);
1923	/* copy the old data */
1924	ntable->fdt_nfiles = nnfiles;
1925	memcpy(ntable->fdt_ofiles, otable->fdt_ofiles,
1926	    onfiles * sizeof(ntable->fdt_ofiles[0]));
1927
1928	/*
1929	 * Allocate a new map only if the old is not large enough.  It will
1930	 * grow at a slower rate than the table as it can map more
1931	 * entries than the table can hold.
1932	 */
1933	if (NDSLOTS(nnfiles) > NDSLOTS(onfiles)) {
1934		nmap = malloc(NDSLOTS(nnfiles) * NDSLOTSIZE, M_FILEDESC,
1935		    M_ZERO | M_WAITOK);
1936		/* copy over the old data and update the pointer */
1937		memcpy(nmap, omap, NDSLOTS(onfiles) * sizeof(*omap));
1938		fdp->fd_map = nmap;
1939	}
1940
1941	/*
1942	 * Make sure that ntable is correctly initialized before we replace
1943	 * fd_files poiner. Otherwise fget_unlocked() may see inconsistent
1944	 * data.
1945	 */
1946	atomic_store_rel_ptr((volatile void *)&fdp->fd_files, (uintptr_t)ntable);
1947
1948	/*
1949	 * Free the old file table when not shared by other threads or processes.
1950	 * The old file table is considered to be shared when either are true:
1951	 * - The process has more than one thread.
1952	 * - The file descriptor table has been shared via fdshare().
1953	 *
1954	 * When shared, the old file table will be placed on a freelist
1955	 * which will be processed when the struct filedesc is released.
1956	 *
1957	 * Note that if onfiles == NDFILE, we're dealing with the original
1958	 * static allocation contained within (struct filedesc0 *)fdp,
1959	 * which must not be freed.
1960	 */
1961	if (onfiles > NDFILE) {
1962		/*
1963		 * Note we may be called here from fdinit while allocating a
1964		 * table for a new process in which case ->p_fd points
1965		 * elsewhere.
1966		 */
1967		if (curproc->p_fd != fdp || FILEDESC_IS_ONLY_USER(fdp)) {
1968			free(otable, M_FILEDESC);
1969		} else {
1970			ft = (struct freetable *)&otable->fdt_ofiles[onfiles];
1971			fdp0 = (struct filedesc0 *)fdp;
1972			ft->ft_table = otable;
1973			SLIST_INSERT_HEAD(&fdp0->fd_free, ft, ft_next);
1974		}
1975	}
1976	/*
1977	 * The map does not have the same possibility of threads still
1978	 * holding references to it.  So always free it as long as it
1979	 * does not reference the original static allocation.
1980	 */
1981	if (NDSLOTS(onfiles) > NDSLOTS(NDFILE))
1982		free(omap, M_FILEDESC);
1983}
1984
1985/*
1986 * Allocate a file descriptor for the process.
1987 */
1988int
1989fdalloc(struct thread *td, int minfd, int *result)
1990{
1991	struct proc *p = td->td_proc;
1992	struct filedesc *fdp = p->p_fd;
1993	int fd, maxfd, allocfd;
1994#ifdef RACCT
1995	int error;
1996#endif
1997
1998	FILEDESC_XLOCK_ASSERT(fdp);
1999
2000	if (fdp->fd_freefile > minfd)
2001		minfd = fdp->fd_freefile;
2002
2003	maxfd = getmaxfd(td);
2004
2005	/*
2006	 * Search the bitmap for a free descriptor starting at minfd.
2007	 * If none is found, grow the file table.
2008	 */
2009	fd = fd_first_free(fdp, minfd, fdp->fd_nfiles);
2010	if (__predict_false(fd >= maxfd))
2011		return (EMFILE);
2012	if (__predict_false(fd >= fdp->fd_nfiles)) {
2013		allocfd = min(fd * 2, maxfd);
2014#ifdef RACCT
2015		if (RACCT_ENABLED()) {
2016			error = racct_set_unlocked(p, RACCT_NOFILE, allocfd);
2017			if (error != 0)
2018				return (EMFILE);
2019		}
2020#endif
2021		/*
2022		 * fd is already equal to first free descriptor >= minfd, so
2023		 * we only need to grow the table and we are done.
2024		 */
2025		fdgrowtable_exp(fdp, allocfd);
2026	}
2027
2028	/*
2029	 * Perform some sanity checks, then mark the file descriptor as
2030	 * used and return it to the caller.
2031	 */
2032	KASSERT(fd >= 0 && fd < min(maxfd, fdp->fd_nfiles),
2033	    ("invalid descriptor %d", fd));
2034	KASSERT(!fdisused(fdp, fd),
2035	    ("fd_first_free() returned non-free descriptor"));
2036	KASSERT(fdp->fd_ofiles[fd].fde_file == NULL,
2037	    ("file descriptor isn't free"));
2038	fdused(fdp, fd);
2039	*result = fd;
2040	return (0);
2041}
2042
2043/*
2044 * Allocate n file descriptors for the process.
2045 */
2046int
2047fdallocn(struct thread *td, int minfd, int *fds, int n)
2048{
2049	struct proc *p = td->td_proc;
2050	struct filedesc *fdp = p->p_fd;
2051	int i;
2052
2053	FILEDESC_XLOCK_ASSERT(fdp);
2054
2055	for (i = 0; i < n; i++)
2056		if (fdalloc(td, 0, &fds[i]) != 0)
2057			break;
2058
2059	if (i < n) {
2060		for (i--; i >= 0; i--)
2061			fdunused(fdp, fds[i]);
2062		return (EMFILE);
2063	}
2064
2065	return (0);
2066}
2067
2068/*
2069 * Create a new open file structure and allocate a file descriptor for the
2070 * process that refers to it.  We add one reference to the file for the
2071 * descriptor table and one reference for resultfp. This is to prevent us
2072 * being preempted and the entry in the descriptor table closed after we
2073 * release the FILEDESC lock.
2074 */
2075int
2076falloc_caps(struct thread *td, struct file **resultfp, int *resultfd, int flags,
2077    struct filecaps *fcaps)
2078{
2079	struct file *fp;
2080	int error, fd;
2081
2082	MPASS(resultfp != NULL);
2083	MPASS(resultfd != NULL);
2084
2085	error = _falloc_noinstall(td, &fp, 2);
2086	if (__predict_false(error != 0)) {
2087		return (error);
2088	}
2089
2090	error = finstall_refed(td, fp, &fd, flags, fcaps);
2091	if (__predict_false(error != 0)) {
2092		falloc_abort(td, fp);
2093		return (error);
2094	}
2095
2096	*resultfp = fp;
2097	*resultfd = fd;
2098
2099	return (0);
2100}
2101
2102/*
2103 * Create a new open file structure without allocating a file descriptor.
2104 */
2105int
2106_falloc_noinstall(struct thread *td, struct file **resultfp, u_int n)
2107{
2108	struct file *fp;
2109	int maxuserfiles = maxfiles - (maxfiles / 20);
2110	int openfiles_new;
2111	static struct timeval lastfail;
2112	static int curfail;
2113
2114	KASSERT(resultfp != NULL, ("%s: resultfp == NULL", __func__));
2115	MPASS(n > 0);
2116
2117	openfiles_new = atomic_fetchadd_int(&openfiles, 1) + 1;
2118	if ((openfiles_new >= maxuserfiles &&
2119	    priv_check(td, PRIV_MAXFILES) != 0) ||
2120	    openfiles_new >= maxfiles) {
2121		atomic_subtract_int(&openfiles, 1);
2122		if (ppsratecheck(&lastfail, &curfail, 1)) {
2123			printf("kern.maxfiles limit exceeded by uid %i, (%s) "
2124			    "please see tuning(7).\n", td->td_ucred->cr_ruid, td->td_proc->p_comm);
2125		}
2126		return (ENFILE);
2127	}
2128	fp = uma_zalloc(file_zone, M_WAITOK);
2129	bzero(fp, sizeof(*fp));
2130	refcount_init(&fp->f_count, n);
2131	fp->f_cred = crhold(td->td_ucred);
2132	fp->f_ops = &badfileops;
2133	*resultfp = fp;
2134	return (0);
2135}
2136
2137void
2138falloc_abort(struct thread *td, struct file *fp)
2139{
2140
2141	/*
2142	 * For assertion purposes.
2143	 */
2144	refcount_init(&fp->f_count, 0);
2145	_fdrop(fp, td);
2146}
2147
2148/*
2149 * Install a file in a file descriptor table.
2150 */
2151void
2152_finstall(struct filedesc *fdp, struct file *fp, int fd, int flags,
2153    struct filecaps *fcaps)
2154{
2155	struct filedescent *fde;
2156
2157	MPASS(fp != NULL);
2158	if (fcaps != NULL)
2159		filecaps_validate(fcaps, __func__);
2160	FILEDESC_XLOCK_ASSERT(fdp);
2161
2162	fde = &fdp->fd_ofiles[fd];
2163#ifdef CAPABILITIES
2164	seqc_write_begin(&fde->fde_seqc);
2165#endif
2166	fde->fde_file = fp;
2167	fde->fde_flags = (flags & O_CLOEXEC) != 0 ? UF_EXCLOSE : 0;
2168	if (fcaps != NULL)
2169		filecaps_move(fcaps, &fde->fde_caps);
2170	else
2171		filecaps_fill(&fde->fde_caps);
2172#ifdef CAPABILITIES
2173	seqc_write_end(&fde->fde_seqc);
2174#endif
2175}
2176
2177int
2178finstall_refed(struct thread *td, struct file *fp, int *fd, int flags,
2179    struct filecaps *fcaps)
2180{
2181	struct filedesc *fdp = td->td_proc->p_fd;
2182	int error;
2183
2184	MPASS(fd != NULL);
2185
2186	FILEDESC_XLOCK(fdp);
2187	error = fdalloc(td, 0, fd);
2188	if (__predict_true(error == 0)) {
2189		_finstall(fdp, fp, *fd, flags, fcaps);
2190	}
2191	FILEDESC_XUNLOCK(fdp);
2192	return (error);
2193}
2194
2195int
2196finstall(struct thread *td, struct file *fp, int *fd, int flags,
2197    struct filecaps *fcaps)
2198{
2199	int error;
2200
2201	MPASS(fd != NULL);
2202
2203	if (!fhold(fp))
2204		return (EBADF);
2205	error = finstall_refed(td, fp, fd, flags, fcaps);
2206	if (__predict_false(error != 0)) {
2207		fdrop(fp, td);
2208	}
2209	return (error);
2210}
2211
2212/*
2213 * Build a new filedesc structure from another.
2214 *
2215 * If fdp is not NULL, return with it shared locked.
2216 */
2217struct filedesc *
2218fdinit(void)
2219{
2220	struct filedesc0 *newfdp0;
2221	struct filedesc *newfdp;
2222
2223	newfdp0 = uma_zalloc(filedesc0_zone, M_WAITOK | M_ZERO);
2224	newfdp = &newfdp0->fd_fd;
2225
2226	/* Create the file descriptor table. */
2227	FILEDESC_LOCK_INIT(newfdp);
2228	refcount_init(&newfdp->fd_refcnt, 1);
2229	refcount_init(&newfdp->fd_holdcnt, 1);
2230	newfdp->fd_map = newfdp0->fd_dmap;
2231	newfdp->fd_files = (struct fdescenttbl *)&newfdp0->fd_dfiles;
2232	newfdp->fd_files->fdt_nfiles = NDFILE;
2233
2234	return (newfdp);
2235}
2236
2237/*
2238 * Build a pwddesc structure from another.
2239 * Copy the current, root, and jail root vnode references.
2240 *
2241 * If pdp is not NULL and keeplock is true, return with it (exclusively) locked.
2242 */
2243struct pwddesc *
2244pdinit(struct pwddesc *pdp, bool keeplock)
2245{
2246	struct pwddesc *newpdp;
2247	struct pwd *newpwd;
2248
2249	newpdp = malloc(sizeof(*newpdp), M_PWDDESC, M_WAITOK | M_ZERO);
2250
2251	PWDDESC_LOCK_INIT(newpdp);
2252	refcount_init(&newpdp->pd_refcount, 1);
2253	newpdp->pd_cmask = CMASK;
2254
2255	if (pdp == NULL) {
2256		newpwd = pwd_alloc();
2257		smr_serialized_store(&newpdp->pd_pwd, newpwd, true);
2258		return (newpdp);
2259	}
2260
2261	PWDDESC_XLOCK(pdp);
2262	newpwd = pwd_hold_pwddesc(pdp);
2263	smr_serialized_store(&newpdp->pd_pwd, newpwd, true);
2264	if (!keeplock)
2265		PWDDESC_XUNLOCK(pdp);
2266	return (newpdp);
2267}
2268
2269/*
2270 * Hold either filedesc or pwddesc of the passed process.
2271 *
2272 * The process lock is used to synchronize against the target exiting and
2273 * freeing the data.
2274 *
2275 * Clearing can be ilustrated in 3 steps:
2276 * 1. set the pointer to NULL. Either routine can race against it, hence
2277 *   atomic_load_ptr.
2278 * 2. observe the process lock as not taken. Until then fdhold/pdhold can
2279 *   race to either still see the pointer or find NULL. It is still safe to
2280 *   grab a reference as clearing is stalled.
2281 * 3. after the lock is observed as not taken, any fdhold/pdhold calls are
2282 *   guaranteed to see NULL, making it safe to finish clearing
2283 */
2284static struct filedesc *
2285fdhold(struct proc *p)
2286{
2287	struct filedesc *fdp;
2288
2289	PROC_LOCK_ASSERT(p, MA_OWNED);
2290	fdp = atomic_load_ptr(&p->p_fd);
2291	if (fdp != NULL)
2292		refcount_acquire(&fdp->fd_holdcnt);
2293	return (fdp);
2294}
2295
2296static struct pwddesc *
2297pdhold(struct proc *p)
2298{
2299	struct pwddesc *pdp;
2300
2301	PROC_LOCK_ASSERT(p, MA_OWNED);
2302	pdp = atomic_load_ptr(&p->p_pd);
2303	if (pdp != NULL)
2304		refcount_acquire(&pdp->pd_refcount);
2305	return (pdp);
2306}
2307
2308static void
2309fddrop(struct filedesc *fdp)
2310{
2311
2312	if (refcount_load(&fdp->fd_holdcnt) > 1) {
2313		if (refcount_release(&fdp->fd_holdcnt) == 0)
2314			return;
2315	}
2316
2317	FILEDESC_LOCK_DESTROY(fdp);
2318	uma_zfree(filedesc0_zone, fdp);
2319}
2320
2321static void
2322pddrop(struct pwddesc *pdp)
2323{
2324	struct pwd *pwd;
2325
2326	if (refcount_release_if_not_last(&pdp->pd_refcount))
2327		return;
2328
2329	PWDDESC_XLOCK(pdp);
2330	if (refcount_release(&pdp->pd_refcount) == 0) {
2331		PWDDESC_XUNLOCK(pdp);
2332		return;
2333	}
2334	pwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
2335	pwd_set(pdp, NULL);
2336	PWDDESC_XUNLOCK(pdp);
2337	pwd_drop(pwd);
2338
2339	PWDDESC_LOCK_DESTROY(pdp);
2340	free(pdp, M_PWDDESC);
2341}
2342
2343/*
2344 * Share a filedesc structure.
2345 */
2346struct filedesc *
2347fdshare(struct filedesc *fdp)
2348{
2349
2350	refcount_acquire(&fdp->fd_refcnt);
2351	return (fdp);
2352}
2353
2354/*
2355 * Share a pwddesc structure.
2356 */
2357struct pwddesc *
2358pdshare(struct pwddesc *pdp)
2359{
2360	refcount_acquire(&pdp->pd_refcount);
2361	return (pdp);
2362}
2363
2364/*
2365 * Unshare a filedesc structure, if necessary by making a copy
2366 */
2367void
2368fdunshare(struct thread *td)
2369{
2370	struct filedesc *tmp;
2371	struct proc *p = td->td_proc;
2372
2373	if (refcount_load(&p->p_fd->fd_refcnt) == 1)
2374		return;
2375
2376	tmp = fdcopy(p->p_fd);
2377	fdescfree(td);
2378	p->p_fd = tmp;
2379}
2380
2381/*
2382 * Unshare a pwddesc structure.
2383 */
2384void
2385pdunshare(struct thread *td)
2386{
2387	struct pwddesc *pdp;
2388	struct proc *p;
2389
2390	p = td->td_proc;
2391	/* Not shared. */
2392	if (refcount_load(&p->p_pd->pd_refcount) == 1)
2393		return;
2394
2395	pdp = pdcopy(p->p_pd);
2396	pdescfree(td);
2397	p->p_pd = pdp;
2398}
2399
2400/*
2401 * Copy a filedesc structure.  A NULL pointer in returns a NULL reference,
2402 * this is to ease callers, not catch errors.
2403 */
2404struct filedesc *
2405fdcopy(struct filedesc *fdp)
2406{
2407	struct filedesc *newfdp;
2408	struct filedescent *nfde, *ofde;
2409	int i, lastfile;
2410
2411	MPASS(fdp != NULL);
2412
2413	newfdp = fdinit();
2414	FILEDESC_SLOCK(fdp);
2415	for (;;) {
2416		lastfile = fdlastfile(fdp);
2417		if (lastfile < newfdp->fd_nfiles)
2418			break;
2419		FILEDESC_SUNLOCK(fdp);
2420		fdgrowtable(newfdp, lastfile + 1);
2421		FILEDESC_SLOCK(fdp);
2422	}
2423	/* copy all passable descriptors (i.e. not kqueue) */
2424	newfdp->fd_freefile = fdp->fd_freefile;
2425	FILEDESC_FOREACH_FDE(fdp, i, ofde) {
2426		if ((ofde->fde_file->f_ops->fo_flags & DFLAG_PASSABLE) == 0 ||
2427		    !fhold(ofde->fde_file)) {
2428			if (newfdp->fd_freefile == fdp->fd_freefile)
2429				newfdp->fd_freefile = i;
2430			continue;
2431		}
2432		nfde = &newfdp->fd_ofiles[i];
2433		*nfde = *ofde;
2434		filecaps_copy(&ofde->fde_caps, &nfde->fde_caps, true);
2435		fdused_init(newfdp, i);
2436	}
2437	MPASS(newfdp->fd_freefile != -1);
2438	FILEDESC_SUNLOCK(fdp);
2439	return (newfdp);
2440}
2441
2442/*
2443 * Copy a pwddesc structure.
2444 */
2445struct pwddesc *
2446pdcopy(struct pwddesc *pdp)
2447{
2448	struct pwddesc *newpdp;
2449
2450	MPASS(pdp != NULL);
2451
2452	newpdp = pdinit(pdp, true);
2453	newpdp->pd_cmask = pdp->pd_cmask;
2454	PWDDESC_XUNLOCK(pdp);
2455	return (newpdp);
2456}
2457
2458/*
2459 * Clear POSIX style locks. This is only used when fdp looses a reference (i.e.
2460 * one of processes using it exits) and the table used to be shared.
2461 */
2462static void
2463fdclearlocks(struct thread *td)
2464{
2465	struct filedesc *fdp;
2466	struct filedesc_to_leader *fdtol;
2467	struct flock lf;
2468	struct file *fp;
2469	struct proc *p;
2470	struct vnode *vp;
2471	int i;
2472
2473	p = td->td_proc;
2474	fdp = p->p_fd;
2475	fdtol = p->p_fdtol;
2476	MPASS(fdtol != NULL);
2477
2478	FILEDESC_XLOCK(fdp);
2479	KASSERT(fdtol->fdl_refcount > 0,
2480	    ("filedesc_to_refcount botch: fdl_refcount=%d",
2481	    fdtol->fdl_refcount));
2482	if (fdtol->fdl_refcount == 1 &&
2483	    (p->p_leader->p_flag & P_ADVLOCK) != 0) {
2484		FILEDESC_FOREACH_FP(fdp, i, fp) {
2485			if (fp->f_type != DTYPE_VNODE ||
2486			    !fhold(fp))
2487				continue;
2488			FILEDESC_XUNLOCK(fdp);
2489			lf.l_whence = SEEK_SET;
2490			lf.l_start = 0;
2491			lf.l_len = 0;
2492			lf.l_type = F_UNLCK;
2493			vp = fp->f_vnode;
2494			(void) VOP_ADVLOCK(vp,
2495			    (caddr_t)p->p_leader, F_UNLCK,
2496			    &lf, F_POSIX);
2497			FILEDESC_XLOCK(fdp);
2498			fdrop(fp, td);
2499		}
2500	}
2501retry:
2502	if (fdtol->fdl_refcount == 1) {
2503		if (fdp->fd_holdleaderscount > 0 &&
2504		    (p->p_leader->p_flag & P_ADVLOCK) != 0) {
2505			/*
2506			 * close() or kern_dup() has cleared a reference
2507			 * in a shared file descriptor table.
2508			 */
2509			fdp->fd_holdleaderswakeup = 1;
2510			sx_sleep(&fdp->fd_holdleaderscount,
2511			    FILEDESC_LOCK(fdp), PLOCK, "fdlhold", 0);
2512			goto retry;
2513		}
2514		if (fdtol->fdl_holdcount > 0) {
2515			/*
2516			 * Ensure that fdtol->fdl_leader remains
2517			 * valid in closef().
2518			 */
2519			fdtol->fdl_wakeup = 1;
2520			sx_sleep(fdtol, FILEDESC_LOCK(fdp), PLOCK,
2521			    "fdlhold", 0);
2522			goto retry;
2523		}
2524	}
2525	fdtol->fdl_refcount--;
2526	if (fdtol->fdl_refcount == 0 &&
2527	    fdtol->fdl_holdcount == 0) {
2528		fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
2529		fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
2530	} else
2531		fdtol = NULL;
2532	p->p_fdtol = NULL;
2533	FILEDESC_XUNLOCK(fdp);
2534	if (fdtol != NULL)
2535		free(fdtol, M_FILEDESC_TO_LEADER);
2536}
2537
2538/*
2539 * Release a filedesc structure.
2540 */
2541static void
2542fdescfree_fds(struct thread *td, struct filedesc *fdp)
2543{
2544	struct filedesc0 *fdp0;
2545	struct freetable *ft, *tft;
2546	struct filedescent *fde;
2547	struct file *fp;
2548	int i;
2549
2550	KASSERT(refcount_load(&fdp->fd_refcnt) == 0,
2551	    ("%s: fd table %p carries references", __func__, fdp));
2552
2553	/*
2554	 * Serialize with threads iterating over the table, if any.
2555	 */
2556	if (refcount_load(&fdp->fd_holdcnt) > 1) {
2557		FILEDESC_XLOCK(fdp);
2558		FILEDESC_XUNLOCK(fdp);
2559	}
2560
2561	FILEDESC_FOREACH_FDE(fdp, i, fde) {
2562		fp = fde->fde_file;
2563		fdefree_last(fde);
2564		(void) closef(fp, td);
2565	}
2566
2567	if (NDSLOTS(fdp->fd_nfiles) > NDSLOTS(NDFILE))
2568		free(fdp->fd_map, M_FILEDESC);
2569	if (fdp->fd_nfiles > NDFILE)
2570		free(fdp->fd_files, M_FILEDESC);
2571
2572	fdp0 = (struct filedesc0 *)fdp;
2573	SLIST_FOREACH_SAFE(ft, &fdp0->fd_free, ft_next, tft)
2574		free(ft->ft_table, M_FILEDESC);
2575
2576	fddrop(fdp);
2577}
2578
2579void
2580fdescfree(struct thread *td)
2581{
2582	struct proc *p;
2583	struct filedesc *fdp;
2584
2585	p = td->td_proc;
2586	fdp = p->p_fd;
2587	MPASS(fdp != NULL);
2588
2589#ifdef RACCT
2590	if (RACCT_ENABLED())
2591		racct_set_unlocked(p, RACCT_NOFILE, 0);
2592#endif
2593
2594	if (p->p_fdtol != NULL)
2595		fdclearlocks(td);
2596
2597	/*
2598	 * Check fdhold for an explanation.
2599	 */
2600	atomic_store_ptr(&p->p_fd, NULL);
2601	atomic_thread_fence_seq_cst();
2602	PROC_WAIT_UNLOCKED(p);
2603
2604	if (refcount_release(&fdp->fd_refcnt) == 0)
2605		return;
2606
2607	fdescfree_fds(td, fdp);
2608}
2609
2610void
2611pdescfree(struct thread *td)
2612{
2613	struct proc *p;
2614	struct pwddesc *pdp;
2615
2616	p = td->td_proc;
2617	pdp = p->p_pd;
2618	MPASS(pdp != NULL);
2619
2620	/*
2621	 * Check pdhold for an explanation.
2622	 */
2623	atomic_store_ptr(&p->p_pd, NULL);
2624	atomic_thread_fence_seq_cst();
2625	PROC_WAIT_UNLOCKED(p);
2626
2627	pddrop(pdp);
2628}
2629
2630/*
2631 * For setugid programs, we don't want to people to use that setugidness
2632 * to generate error messages which write to a file which otherwise would
2633 * otherwise be off-limits to the process.  We check for filesystems where
2634 * the vnode can change out from under us after execve (like [lin]procfs).
2635 *
2636 * Since fdsetugidsafety calls this only for fd 0, 1 and 2, this check is
2637 * sufficient.  We also don't check for setugidness since we know we are.
2638 */
2639static bool
2640is_unsafe(struct file *fp)
2641{
2642	struct vnode *vp;
2643
2644	if (fp->f_type != DTYPE_VNODE)
2645		return (false);
2646
2647	vp = fp->f_vnode;
2648	return ((vp->v_vflag & VV_PROCDEP) != 0);
2649}
2650
2651/*
2652 * Make this setguid thing safe, if at all possible.
2653 */
2654void
2655fdsetugidsafety(struct thread *td)
2656{
2657	struct filedesc *fdp;
2658	struct file *fp;
2659	int i;
2660
2661	fdp = td->td_proc->p_fd;
2662	KASSERT(refcount_load(&fdp->fd_refcnt) == 1,
2663	    ("the fdtable should not be shared"));
2664	MPASS(fdp->fd_nfiles >= 3);
2665	for (i = 0; i <= 2; i++) {
2666		fp = fdp->fd_ofiles[i].fde_file;
2667		if (fp != NULL && is_unsafe(fp)) {
2668			FILEDESC_XLOCK(fdp);
2669			knote_fdclose(td, i);
2670			/*
2671			 * NULL-out descriptor prior to close to avoid
2672			 * a race while close blocks.
2673			 */
2674			fdfree(fdp, i);
2675			FILEDESC_XUNLOCK(fdp);
2676			(void) closef(fp, td);
2677		}
2678	}
2679}
2680
2681/*
2682 * If a specific file object occupies a specific file descriptor, close the
2683 * file descriptor entry and drop a reference on the file object.  This is a
2684 * convenience function to handle a subsequent error in a function that calls
2685 * falloc() that handles the race that another thread might have closed the
2686 * file descriptor out from under the thread creating the file object.
2687 */
2688void
2689fdclose(struct thread *td, struct file *fp, int idx)
2690{
2691	struct filedesc *fdp = td->td_proc->p_fd;
2692
2693	FILEDESC_XLOCK(fdp);
2694	if (fdp->fd_ofiles[idx].fde_file == fp) {
2695		fdfree(fdp, idx);
2696		FILEDESC_XUNLOCK(fdp);
2697		fdrop(fp, td);
2698	} else
2699		FILEDESC_XUNLOCK(fdp);
2700}
2701
2702/*
2703 * Close any files on exec?
2704 */
2705void
2706fdcloseexec(struct thread *td)
2707{
2708	struct filedesc *fdp;
2709	struct filedescent *fde;
2710	struct file *fp;
2711	int i;
2712
2713	fdp = td->td_proc->p_fd;
2714	KASSERT(refcount_load(&fdp->fd_refcnt) == 1,
2715	    ("the fdtable should not be shared"));
2716	FILEDESC_FOREACH_FDE(fdp, i, fde) {
2717		fp = fde->fde_file;
2718		if (fp->f_type == DTYPE_MQUEUE ||
2719		    (fde->fde_flags & UF_EXCLOSE)) {
2720			FILEDESC_XLOCK(fdp);
2721			fdfree(fdp, i);
2722			(void) closefp(fdp, i, fp, td, false, false);
2723			FILEDESC_UNLOCK_ASSERT(fdp);
2724		}
2725	}
2726}
2727
2728/*
2729 * It is unsafe for set[ug]id processes to be started with file
2730 * descriptors 0..2 closed, as these descriptors are given implicit
2731 * significance in the Standard C library.  fdcheckstd() will create a
2732 * descriptor referencing /dev/null for each of stdin, stdout, and
2733 * stderr that is not already open.
2734 */
2735int
2736fdcheckstd(struct thread *td)
2737{
2738	struct filedesc *fdp;
2739	register_t save;
2740	int i, error, devnull;
2741
2742	fdp = td->td_proc->p_fd;
2743	KASSERT(refcount_load(&fdp->fd_refcnt) == 1,
2744	    ("the fdtable should not be shared"));
2745	MPASS(fdp->fd_nfiles >= 3);
2746	devnull = -1;
2747	for (i = 0; i <= 2; i++) {
2748		if (fdp->fd_ofiles[i].fde_file != NULL)
2749			continue;
2750
2751		save = td->td_retval[0];
2752		if (devnull != -1) {
2753			error = kern_dup(td, FDDUP_FIXED, 0, devnull, i);
2754		} else {
2755			error = kern_openat(td, AT_FDCWD, "/dev/null",
2756			    UIO_SYSSPACE, O_RDWR, 0);
2757			if (error == 0) {
2758				devnull = td->td_retval[0];
2759				KASSERT(devnull == i, ("we didn't get our fd"));
2760			}
2761		}
2762		td->td_retval[0] = save;
2763		if (error != 0)
2764			return (error);
2765	}
2766	return (0);
2767}
2768
2769/*
2770 * Internal form of close.  Decrement reference count on file structure.
2771 * Note: td may be NULL when closing a file that was being passed in a
2772 * message.
2773 */
2774int
2775closef(struct file *fp, struct thread *td)
2776{
2777	struct vnode *vp;
2778	struct flock lf;
2779	struct filedesc_to_leader *fdtol;
2780	struct filedesc *fdp;
2781
2782	MPASS(td != NULL);
2783
2784	/*
2785	 * POSIX record locking dictates that any close releases ALL
2786	 * locks owned by this process.  This is handled by setting
2787	 * a flag in the unlock to free ONLY locks obeying POSIX
2788	 * semantics, and not to free BSD-style file locks.
2789	 * If the descriptor was in a message, POSIX-style locks
2790	 * aren't passed with the descriptor, and the thread pointer
2791	 * will be NULL.  Callers should be careful only to pass a
2792	 * NULL thread pointer when there really is no owning
2793	 * context that might have locks, or the locks will be
2794	 * leaked.
2795	 */
2796	if (fp->f_type == DTYPE_VNODE) {
2797		vp = fp->f_vnode;
2798		if ((td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
2799			lf.l_whence = SEEK_SET;
2800			lf.l_start = 0;
2801			lf.l_len = 0;
2802			lf.l_type = F_UNLCK;
2803			(void) VOP_ADVLOCK(vp, (caddr_t)td->td_proc->p_leader,
2804			    F_UNLCK, &lf, F_POSIX);
2805		}
2806		fdtol = td->td_proc->p_fdtol;
2807		if (fdtol != NULL) {
2808			/*
2809			 * Handle special case where file descriptor table is
2810			 * shared between multiple process leaders.
2811			 */
2812			fdp = td->td_proc->p_fd;
2813			FILEDESC_XLOCK(fdp);
2814			for (fdtol = fdtol->fdl_next;
2815			    fdtol != td->td_proc->p_fdtol;
2816			    fdtol = fdtol->fdl_next) {
2817				if ((fdtol->fdl_leader->p_flag &
2818				    P_ADVLOCK) == 0)
2819					continue;
2820				fdtol->fdl_holdcount++;
2821				FILEDESC_XUNLOCK(fdp);
2822				lf.l_whence = SEEK_SET;
2823				lf.l_start = 0;
2824				lf.l_len = 0;
2825				lf.l_type = F_UNLCK;
2826				vp = fp->f_vnode;
2827				(void) VOP_ADVLOCK(vp,
2828				    (caddr_t)fdtol->fdl_leader, F_UNLCK, &lf,
2829				    F_POSIX);
2830				FILEDESC_XLOCK(fdp);
2831				fdtol->fdl_holdcount--;
2832				if (fdtol->fdl_holdcount == 0 &&
2833				    fdtol->fdl_wakeup != 0) {
2834					fdtol->fdl_wakeup = 0;
2835					wakeup(fdtol);
2836				}
2837			}
2838			FILEDESC_XUNLOCK(fdp);
2839		}
2840	}
2841	return (fdrop_close(fp, td));
2842}
2843
2844/*
2845 * Hack for file descriptor passing code.
2846 */
2847void
2848closef_nothread(struct file *fp)
2849{
2850
2851	fdrop(fp, NULL);
2852}
2853
2854/*
2855 * Initialize the file pointer with the specified properties.
2856 *
2857 * The ops are set with release semantics to be certain that the flags, type,
2858 * and data are visible when ops is.  This is to prevent ops methods from being
2859 * called with bad data.
2860 */
2861void
2862finit(struct file *fp, u_int flag, short type, void *data, struct fileops *ops)
2863{
2864	fp->f_data = data;
2865	fp->f_flag = flag;
2866	fp->f_type = type;
2867	atomic_store_rel_ptr((volatile uintptr_t *)&fp->f_ops, (uintptr_t)ops);
2868}
2869
2870void
2871finit_vnode(struct file *fp, u_int flag, void *data, struct fileops *ops)
2872{
2873	fp->f_seqcount[UIO_READ] = 1;
2874	fp->f_seqcount[UIO_WRITE] = 1;
2875	finit(fp, (flag & FMASK) | (fp->f_flag & FHASLOCK), DTYPE_VNODE,
2876	    data, ops);
2877}
2878
2879int
2880fget_cap_noref(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
2881    struct file **fpp, struct filecaps *havecapsp)
2882{
2883	struct filedescent *fde;
2884	int error;
2885
2886	FILEDESC_LOCK_ASSERT(fdp);
2887
2888	*fpp = NULL;
2889	fde = fdeget_noref(fdp, fd);
2890	if (fde == NULL) {
2891		error = EBADF;
2892		goto out;
2893	}
2894
2895#ifdef CAPABILITIES
2896	error = cap_check(cap_rights_fde_inline(fde), needrightsp);
2897	if (error != 0)
2898		goto out;
2899#endif
2900
2901	if (havecapsp != NULL)
2902		filecaps_copy(&fde->fde_caps, havecapsp, true);
2903
2904	*fpp = fde->fde_file;
2905
2906	error = 0;
2907out:
2908	return (error);
2909}
2910
2911#ifdef CAPABILITIES
2912int
2913fget_cap(struct thread *td, int fd, cap_rights_t *needrightsp,
2914    struct file **fpp, struct filecaps *havecapsp)
2915{
2916	struct filedesc *fdp = td->td_proc->p_fd;
2917	int error;
2918	struct file *fp;
2919	seqc_t seq;
2920
2921	*fpp = NULL;
2922	for (;;) {
2923		error = fget_unlocked_seq(td, fd, needrightsp, &fp, &seq);
2924		if (error != 0)
2925			return (error);
2926
2927		if (havecapsp != NULL) {
2928			if (!filecaps_copy(&fdp->fd_ofiles[fd].fde_caps,
2929			    havecapsp, false)) {
2930				fdrop(fp, td);
2931				goto get_locked;
2932			}
2933		}
2934
2935		if (!fd_modified(fdp, fd, seq))
2936			break;
2937		fdrop(fp, td);
2938	}
2939
2940	*fpp = fp;
2941	return (0);
2942
2943get_locked:
2944	FILEDESC_SLOCK(fdp);
2945	error = fget_cap_noref(fdp, fd, needrightsp, fpp, havecapsp);
2946	if (error == 0 && !fhold(*fpp))
2947		error = EBADF;
2948	FILEDESC_SUNLOCK(fdp);
2949	return (error);
2950}
2951#else
2952int
2953fget_cap(struct thread *td, int fd, cap_rights_t *needrightsp,
2954    struct file **fpp, struct filecaps *havecapsp)
2955{
2956	int error;
2957	error = fget_unlocked(td, fd, needrightsp, fpp);
2958	if (havecapsp != NULL && error == 0)
2959		filecaps_fill(havecapsp);
2960
2961	return (error);
2962}
2963#endif
2964
2965int
2966fget_remote(struct thread *td, struct proc *p, int fd, struct file **fpp)
2967{
2968	struct filedesc *fdp;
2969	struct file *fp;
2970	int error;
2971
2972	if (p == td->td_proc)	/* curproc */
2973		return (fget_unlocked(td, fd, &cap_no_rights, fpp));
2974
2975	PROC_LOCK(p);
2976	fdp = fdhold(p);
2977	PROC_UNLOCK(p);
2978	if (fdp == NULL)
2979		return (ENOENT);
2980	FILEDESC_SLOCK(fdp);
2981	if (refcount_load(&fdp->fd_refcnt) != 0) {
2982		fp = fget_noref(fdp, fd);
2983		if (fp != NULL && fhold(fp)) {
2984			*fpp = fp;
2985			error = 0;
2986		} else {
2987			error = EBADF;
2988		}
2989	} else {
2990		error = ENOENT;
2991	}
2992	FILEDESC_SUNLOCK(fdp);
2993	fddrop(fdp);
2994	return (error);
2995}
2996
2997#ifdef CAPABILITIES
2998int
2999fgetvp_lookup_smr(struct nameidata *ndp, struct vnode **vpp, bool *fsearch)
3000{
3001	const struct filedescent *fde;
3002	const struct fdescenttbl *fdt;
3003	struct filedesc *fdp;
3004	struct file *fp;
3005	struct vnode *vp;
3006	const cap_rights_t *haverights;
3007	cap_rights_t rights;
3008	seqc_t seq;
3009	int fd;
3010
3011	VFS_SMR_ASSERT_ENTERED();
3012
3013	fd = ndp->ni_dirfd;
3014	rights = *ndp->ni_rightsneeded;
3015	cap_rights_set_one(&rights, CAP_LOOKUP);
3016
3017	fdp = curproc->p_fd;
3018	fdt = fdp->fd_files;
3019	if (__predict_false((u_int)fd >= fdt->fdt_nfiles))
3020		return (EBADF);
3021	seq = seqc_read_notmodify(fd_seqc(fdt, fd));
3022	fde = &fdt->fdt_ofiles[fd];
3023	haverights = cap_rights_fde_inline(fde);
3024	fp = fde->fde_file;
3025	if (__predict_false(fp == NULL))
3026		return (EAGAIN);
3027	if (__predict_false(cap_check_inline_transient(haverights, &rights)))
3028		return (EAGAIN);
3029	*fsearch = ((fp->f_flag & FSEARCH) != 0);
3030	vp = fp->f_vnode;
3031	if (__predict_false(vp == NULL)) {
3032		return (EAGAIN);
3033	}
3034	if (!filecaps_copy(&fde->fde_caps, &ndp->ni_filecaps, false)) {
3035		return (EAGAIN);
3036	}
3037	/*
3038	 * Use an acquire barrier to force re-reading of fdt so it is
3039	 * refreshed for verification.
3040	 */
3041	atomic_thread_fence_acq();
3042	fdt = fdp->fd_files;
3043	if (__predict_false(!seqc_consistent_no_fence(fd_seqc(fdt, fd), seq)))
3044		return (EAGAIN);
3045	/*
3046	 * If file descriptor doesn't have all rights,
3047	 * all lookups relative to it must also be
3048	 * strictly relative.
3049	 *
3050	 * Not yet supported by fast path.
3051	 */
3052	CAP_ALL(&rights);
3053	if (!cap_rights_contains(&ndp->ni_filecaps.fc_rights, &rights) ||
3054	    ndp->ni_filecaps.fc_fcntls != CAP_FCNTL_ALL ||
3055	    ndp->ni_filecaps.fc_nioctls != -1) {
3056#ifdef notyet
3057		ndp->ni_lcf |= NI_LCF_STRICTREL;
3058#else
3059		return (EAGAIN);
3060#endif
3061	}
3062	*vpp = vp;
3063	return (0);
3064}
3065#else
3066int
3067fgetvp_lookup_smr(struct nameidata *ndp, struct vnode **vpp, bool *fsearch)
3068{
3069	const struct fdescenttbl *fdt;
3070	struct filedesc *fdp;
3071	struct file *fp;
3072	struct vnode *vp;
3073	int fd;
3074
3075	VFS_SMR_ASSERT_ENTERED();
3076
3077	fd = ndp->ni_dirfd;
3078	fdp = curproc->p_fd;
3079	fdt = fdp->fd_files;
3080	if (__predict_false((u_int)fd >= fdt->fdt_nfiles))
3081		return (EBADF);
3082	fp = fdt->fdt_ofiles[fd].fde_file;
3083	if (__predict_false(fp == NULL))
3084		return (EAGAIN);
3085	*fsearch = ((fp->f_flag & FSEARCH) != 0);
3086	vp = fp->f_vnode;
3087	if (__predict_false(vp == NULL || vp->v_type != VDIR)) {
3088		return (EAGAIN);
3089	}
3090	/*
3091	 * Use an acquire barrier to force re-reading of fdt so it is
3092	 * refreshed for verification.
3093	 */
3094	atomic_thread_fence_acq();
3095	fdt = fdp->fd_files;
3096	if (__predict_false(fp != fdt->fdt_ofiles[fd].fde_file))
3097		return (EAGAIN);
3098	filecaps_fill(&ndp->ni_filecaps);
3099	*vpp = vp;
3100	return (0);
3101}
3102#endif
3103
3104int
3105fgetvp_lookup(struct nameidata *ndp, struct vnode **vpp)
3106{
3107	struct thread *td;
3108	struct file *fp;
3109	struct vnode *vp;
3110	struct componentname *cnp;
3111	cap_rights_t rights;
3112	int error;
3113
3114	td = curthread;
3115	rights = *ndp->ni_rightsneeded;
3116	cap_rights_set_one(&rights, CAP_LOOKUP);
3117	cnp = &ndp->ni_cnd;
3118
3119	error = fget_cap(td, ndp->ni_dirfd, &rights, &fp, &ndp->ni_filecaps);
3120	if (__predict_false(error != 0))
3121		return (error);
3122	if (__predict_false(fp->f_ops == &badfileops)) {
3123		error = EBADF;
3124		goto out_free;
3125	}
3126	vp = fp->f_vnode;
3127	if (__predict_false(vp == NULL)) {
3128		error = ENOTDIR;
3129		goto out_free;
3130	}
3131	vrefact(vp);
3132	/*
3133	 * XXX does not check for VDIR, handled by namei_setup
3134	 */
3135	if ((fp->f_flag & FSEARCH) != 0)
3136		cnp->cn_flags |= NOEXECCHECK;
3137	fdrop(fp, td);
3138
3139#ifdef CAPABILITIES
3140	/*
3141	 * If file descriptor doesn't have all rights,
3142	 * all lookups relative to it must also be
3143	 * strictly relative.
3144	 */
3145	CAP_ALL(&rights);
3146	if (!cap_rights_contains(&ndp->ni_filecaps.fc_rights, &rights) ||
3147	    ndp->ni_filecaps.fc_fcntls != CAP_FCNTL_ALL ||
3148	    ndp->ni_filecaps.fc_nioctls != -1) {
3149		ndp->ni_lcf |= NI_LCF_STRICTREL;
3150		ndp->ni_resflags |= NIRES_STRICTREL;
3151	}
3152#endif
3153
3154	/*
3155	 * TODO: avoid copying ioctl caps if it can be helped to begin with
3156	 */
3157	if ((cnp->cn_flags & WANTIOCTLCAPS) == 0)
3158		filecaps_free_ioctl(&ndp->ni_filecaps);
3159
3160	*vpp = vp;
3161	return (0);
3162
3163out_free:
3164	filecaps_free(&ndp->ni_filecaps);
3165	fdrop(fp, td);
3166	return (error);
3167}
3168
3169/*
3170 * Fetch the descriptor locklessly.
3171 *
3172 * We avoid fdrop() races by never raising a refcount above 0.  To accomplish
3173 * this we have to use a cmpset loop rather than an atomic_add.  The descriptor
3174 * must be re-verified once we acquire a reference to be certain that the
3175 * identity is still correct and we did not lose a race due to preemption.
3176 *
3177 * Force a reload of fdt when looping. Another thread could reallocate
3178 * the table before this fd was closed, so it is possible that there is
3179 * a stale fp pointer in cached version.
3180 */
3181#ifdef CAPABILITIES
3182static int
3183fget_unlocked_seq(struct thread *td, int fd, cap_rights_t *needrightsp,
3184    struct file **fpp, seqc_t *seqp)
3185{
3186	struct filedesc *fdp;
3187	const struct filedescent *fde;
3188	const struct fdescenttbl *fdt;
3189	struct file *fp;
3190	seqc_t seq;
3191	cap_rights_t haverights;
3192	int error;
3193
3194	fdp = td->td_proc->p_fd;
3195	fdt = fdp->fd_files;
3196	if (__predict_false((u_int)fd >= fdt->fdt_nfiles))
3197		return (EBADF);
3198
3199	for (;;) {
3200		seq = seqc_read_notmodify(fd_seqc(fdt, fd));
3201		fde = &fdt->fdt_ofiles[fd];
3202		haverights = *cap_rights_fde_inline(fde);
3203		fp = fde->fde_file;
3204		if (__predict_false(fp == NULL)) {
3205			if (seqc_consistent(fd_seqc(fdt, fd), seq))
3206				return (EBADF);
3207			fdt = atomic_load_ptr(&fdp->fd_files);
3208			continue;
3209		}
3210		error = cap_check_inline(&haverights, needrightsp);
3211		if (__predict_false(error != 0)) {
3212			if (seqc_consistent(fd_seqc(fdt, fd), seq))
3213				return (error);
3214			fdt = atomic_load_ptr(&fdp->fd_files);
3215			continue;
3216		}
3217		if (__predict_false(!refcount_acquire_if_not_zero(&fp->f_count))) {
3218			fdt = atomic_load_ptr(&fdp->fd_files);
3219			continue;
3220		}
3221		/*
3222		 * Use an acquire barrier to force re-reading of fdt so it is
3223		 * refreshed for verification.
3224		 */
3225		atomic_thread_fence_acq();
3226		fdt = fdp->fd_files;
3227		if (seqc_consistent_no_fence(fd_seqc(fdt, fd), seq))
3228			break;
3229		fdrop(fp, td);
3230	}
3231	*fpp = fp;
3232	if (seqp != NULL) {
3233		*seqp = seq;
3234	}
3235	return (0);
3236}
3237#else
3238static int
3239fget_unlocked_seq(struct thread *td, int fd, cap_rights_t *needrightsp,
3240    struct file **fpp, seqc_t *seqp __unused)
3241{
3242	struct filedesc *fdp;
3243	const struct fdescenttbl *fdt;
3244	struct file *fp;
3245
3246	fdp = td->td_proc->p_fd;
3247	fdt = fdp->fd_files;
3248	if (__predict_false((u_int)fd >= fdt->fdt_nfiles))
3249		return (EBADF);
3250
3251	for (;;) {
3252		fp = fdt->fdt_ofiles[fd].fde_file;
3253		if (__predict_false(fp == NULL))
3254			return (EBADF);
3255		if (__predict_false(!refcount_acquire_if_not_zero(&fp->f_count))) {
3256			fdt = atomic_load_ptr(&fdp->fd_files);
3257			continue;
3258		}
3259		/*
3260		 * Use an acquire barrier to force re-reading of fdt so it is
3261		 * refreshed for verification.
3262		 */
3263		atomic_thread_fence_acq();
3264		fdt = fdp->fd_files;
3265		if (__predict_true(fp == fdt->fdt_ofiles[fd].fde_file))
3266			break;
3267		fdrop(fp, td);
3268	}
3269	*fpp = fp;
3270	return (0);
3271}
3272#endif
3273
3274/*
3275 * See the comments in fget_unlocked_seq for an explanation of how this works.
3276 *
3277 * This is a simplified variant which bails out to the aforementioned routine
3278 * if anything goes wrong. In practice this only happens when userspace is
3279 * racing with itself.
3280 */
3281int
3282fget_unlocked(struct thread *td, int fd, cap_rights_t *needrightsp,
3283    struct file **fpp)
3284{
3285	struct filedesc *fdp;
3286#ifdef CAPABILITIES
3287	const struct filedescent *fde;
3288#endif
3289	const struct fdescenttbl *fdt;
3290	struct file *fp;
3291#ifdef CAPABILITIES
3292	seqc_t seq;
3293	const cap_rights_t *haverights;
3294#endif
3295
3296	fdp = td->td_proc->p_fd;
3297	fdt = fdp->fd_files;
3298	if (__predict_false((u_int)fd >= fdt->fdt_nfiles)) {
3299		*fpp = NULL;
3300		return (EBADF);
3301	}
3302#ifdef CAPABILITIES
3303	seq = seqc_read_notmodify(fd_seqc(fdt, fd));
3304	fde = &fdt->fdt_ofiles[fd];
3305	haverights = cap_rights_fde_inline(fde);
3306	fp = fde->fde_file;
3307#else
3308	fp = fdt->fdt_ofiles[fd].fde_file;
3309#endif
3310	if (__predict_false(fp == NULL))
3311		goto out_fallback;
3312#ifdef CAPABILITIES
3313	if (__predict_false(cap_check_inline_transient(haverights, needrightsp)))
3314		goto out_fallback;
3315#endif
3316	if (__predict_false(!refcount_acquire_if_not_zero(&fp->f_count)))
3317		goto out_fallback;
3318
3319	/*
3320	 * Use an acquire barrier to force re-reading of fdt so it is
3321	 * refreshed for verification.
3322	 */
3323	atomic_thread_fence_acq();
3324	fdt = fdp->fd_files;
3325#ifdef	CAPABILITIES
3326	if (__predict_false(!seqc_consistent_no_fence(fd_seqc(fdt, fd), seq)))
3327#else
3328	if (__predict_false(fp != fdt->fdt_ofiles[fd].fde_file))
3329#endif
3330		goto out_fdrop;
3331	*fpp = fp;
3332	return (0);
3333out_fdrop:
3334	fdrop(fp, td);
3335out_fallback:
3336	*fpp = NULL;
3337	return (fget_unlocked_seq(td, fd, needrightsp, fpp, NULL));
3338}
3339
3340/*
3341 * Translate fd -> file when the caller guarantees the file descriptor table
3342 * can't be changed by others.
3343 *
3344 * Note this does not mean the file object itself is only visible to the caller,
3345 * merely that it wont disappear without having to be referenced.
3346 *
3347 * Must be paired with fput_only_user.
3348 */
3349#ifdef	CAPABILITIES
3350int
3351fget_only_user(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
3352    struct file **fpp)
3353{
3354	const struct filedescent *fde;
3355	const struct fdescenttbl *fdt;
3356	const cap_rights_t *haverights;
3357	struct file *fp;
3358	int error;
3359
3360	MPASS(FILEDESC_IS_ONLY_USER(fdp));
3361
3362	*fpp = NULL;
3363	if (__predict_false(fd >= fdp->fd_nfiles))
3364		return (EBADF);
3365
3366	fdt = fdp->fd_files;
3367	fde = &fdt->fdt_ofiles[fd];
3368	fp = fde->fde_file;
3369	if (__predict_false(fp == NULL))
3370		return (EBADF);
3371	MPASS(refcount_load(&fp->f_count) > 0);
3372	haverights = cap_rights_fde_inline(fde);
3373	error = cap_check_inline(haverights, needrightsp);
3374	if (__predict_false(error != 0))
3375		return (error);
3376	*fpp = fp;
3377	return (0);
3378}
3379#else
3380int
3381fget_only_user(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
3382    struct file **fpp)
3383{
3384	struct file *fp;
3385
3386	MPASS(FILEDESC_IS_ONLY_USER(fdp));
3387
3388	*fpp = NULL;
3389	if (__predict_false(fd >= fdp->fd_nfiles))
3390		return (EBADF);
3391
3392	fp = fdp->fd_ofiles[fd].fde_file;
3393	if (__predict_false(fp == NULL))
3394		return (EBADF);
3395
3396	MPASS(refcount_load(&fp->f_count) > 0);
3397	*fpp = fp;
3398	return (0);
3399}
3400#endif
3401
3402/*
3403 * Extract the file pointer associated with the specified descriptor for the
3404 * current user process.
3405 *
3406 * If the descriptor doesn't exist or doesn't match 'flags', EBADF is
3407 * returned.
3408 *
3409 * File's rights will be checked against the capability rights mask.
3410 *
3411 * If an error occurred the non-zero error is returned and *fpp is set to
3412 * NULL.  Otherwise *fpp is held and set and zero is returned.  Caller is
3413 * responsible for fdrop().
3414 */
3415static __inline int
3416_fget(struct thread *td, int fd, struct file **fpp, int flags,
3417    cap_rights_t *needrightsp)
3418{
3419	struct file *fp;
3420	int error;
3421
3422	*fpp = NULL;
3423	error = fget_unlocked(td, fd, needrightsp, &fp);
3424	if (__predict_false(error != 0))
3425		return (error);
3426	if (__predict_false(fp->f_ops == &badfileops)) {
3427		fdrop(fp, td);
3428		return (EBADF);
3429	}
3430
3431	/*
3432	 * FREAD and FWRITE failure return EBADF as per POSIX.
3433	 */
3434	error = 0;
3435	switch (flags) {
3436	case FREAD:
3437	case FWRITE:
3438		if ((fp->f_flag & flags) == 0)
3439			error = EBADF;
3440		break;
3441	case FEXEC:
3442		if (fp->f_ops != &path_fileops &&
3443		    ((fp->f_flag & (FREAD | FEXEC)) == 0 ||
3444		    (fp->f_flag & FWRITE) != 0))
3445			error = EBADF;
3446		break;
3447	case 0:
3448		break;
3449	default:
3450		KASSERT(0, ("wrong flags"));
3451	}
3452
3453	if (error != 0) {
3454		fdrop(fp, td);
3455		return (error);
3456	}
3457
3458	*fpp = fp;
3459	return (0);
3460}
3461
3462int
3463fget(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
3464{
3465
3466	return (_fget(td, fd, fpp, 0, rightsp));
3467}
3468
3469int
3470fget_mmap(struct thread *td, int fd, cap_rights_t *rightsp, vm_prot_t *maxprotp,
3471    struct file **fpp)
3472{
3473	int error;
3474#ifndef CAPABILITIES
3475	error = _fget(td, fd, fpp, 0, rightsp);
3476	if (maxprotp != NULL)
3477		*maxprotp = VM_PROT_ALL;
3478	return (error);
3479#else
3480	cap_rights_t fdrights;
3481	struct filedesc *fdp;
3482	struct file *fp;
3483	seqc_t seq;
3484
3485	*fpp = NULL;
3486	fdp = td->td_proc->p_fd;
3487	MPASS(cap_rights_is_set(rightsp, CAP_MMAP));
3488	for (;;) {
3489		error = fget_unlocked_seq(td, fd, rightsp, &fp, &seq);
3490		if (__predict_false(error != 0))
3491			return (error);
3492		if (__predict_false(fp->f_ops == &badfileops)) {
3493			fdrop(fp, td);
3494			return (EBADF);
3495		}
3496		if (maxprotp != NULL)
3497			fdrights = *cap_rights(fdp, fd);
3498		if (!fd_modified(fdp, fd, seq))
3499			break;
3500		fdrop(fp, td);
3501	}
3502
3503	/*
3504	 * If requested, convert capability rights to access flags.
3505	 */
3506	if (maxprotp != NULL)
3507		*maxprotp = cap_rights_to_vmprot(&fdrights);
3508	*fpp = fp;
3509	return (0);
3510#endif
3511}
3512
3513int
3514fget_read(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
3515{
3516
3517	return (_fget(td, fd, fpp, FREAD, rightsp));
3518}
3519
3520int
3521fget_write(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
3522{
3523
3524	return (_fget(td, fd, fpp, FWRITE, rightsp));
3525}
3526
3527int
3528fget_fcntl(struct thread *td, int fd, cap_rights_t *rightsp, int needfcntl,
3529    struct file **fpp)
3530{
3531#ifndef CAPABILITIES
3532	return (fget_unlocked(td, fd, rightsp, fpp));
3533#else
3534	struct filedesc *fdp = td->td_proc->p_fd;
3535	struct file *fp;
3536	int error;
3537	seqc_t seq;
3538
3539	*fpp = NULL;
3540	MPASS(cap_rights_is_set(rightsp, CAP_FCNTL));
3541	for (;;) {
3542		error = fget_unlocked_seq(td, fd, rightsp, &fp, &seq);
3543		if (error != 0)
3544			return (error);
3545		error = cap_fcntl_check(fdp, fd, needfcntl);
3546		if (!fd_modified(fdp, fd, seq))
3547			break;
3548		fdrop(fp, td);
3549	}
3550	if (error != 0) {
3551		fdrop(fp, td);
3552		return (error);
3553	}
3554	*fpp = fp;
3555	return (0);
3556#endif
3557}
3558
3559/*
3560 * Like fget() but loads the underlying vnode, or returns an error if the
3561 * descriptor does not represent a vnode.  Note that pipes use vnodes but
3562 * never have VM objects.  The returned vnode will be vref()'d.
3563 *
3564 * XXX: what about the unused flags ?
3565 */
3566static __inline int
3567_fgetvp(struct thread *td, int fd, int flags, cap_rights_t *needrightsp,
3568    struct vnode **vpp)
3569{
3570	struct file *fp;
3571	int error;
3572
3573	*vpp = NULL;
3574	error = _fget(td, fd, &fp, flags, needrightsp);
3575	if (error != 0)
3576		return (error);
3577	if (fp->f_vnode == NULL) {
3578		error = EINVAL;
3579	} else {
3580		*vpp = fp->f_vnode;
3581		vrefact(*vpp);
3582	}
3583	fdrop(fp, td);
3584
3585	return (error);
3586}
3587
3588int
3589fgetvp(struct thread *td, int fd, cap_rights_t *rightsp, struct vnode **vpp)
3590{
3591
3592	return (_fgetvp(td, fd, 0, rightsp, vpp));
3593}
3594
3595int
3596fgetvp_rights(struct thread *td, int fd, cap_rights_t *needrightsp,
3597    struct filecaps *havecaps, struct vnode **vpp)
3598{
3599	struct filecaps caps;
3600	struct file *fp;
3601	int error;
3602
3603	error = fget_cap(td, fd, needrightsp, &fp, &caps);
3604	if (error != 0)
3605		return (error);
3606	if (fp->f_ops == &badfileops) {
3607		error = EBADF;
3608		goto out;
3609	}
3610	if (fp->f_vnode == NULL) {
3611		error = EINVAL;
3612		goto out;
3613	}
3614
3615	*havecaps = caps;
3616	*vpp = fp->f_vnode;
3617	vrefact(*vpp);
3618	fdrop(fp, td);
3619
3620	return (0);
3621out:
3622	filecaps_free(&caps);
3623	fdrop(fp, td);
3624	return (error);
3625}
3626
3627int
3628fgetvp_read(struct thread *td, int fd, cap_rights_t *rightsp, struct vnode **vpp)
3629{
3630
3631	return (_fgetvp(td, fd, FREAD, rightsp, vpp));
3632}
3633
3634int
3635fgetvp_exec(struct thread *td, int fd, cap_rights_t *rightsp, struct vnode **vpp)
3636{
3637
3638	return (_fgetvp(td, fd, FEXEC, rightsp, vpp));
3639}
3640
3641#ifdef notyet
3642int
3643fgetvp_write(struct thread *td, int fd, cap_rights_t *rightsp,
3644    struct vnode **vpp)
3645{
3646
3647	return (_fgetvp(td, fd, FWRITE, rightsp, vpp));
3648}
3649#endif
3650
3651/*
3652 * Handle the last reference to a file being closed.
3653 *
3654 * Without the noinline attribute clang keeps inlining the func thorough this
3655 * file when fdrop is used.
3656 */
3657int __noinline
3658_fdrop(struct file *fp, struct thread *td)
3659{
3660	int error;
3661#ifdef INVARIANTS
3662	int count;
3663
3664	count = refcount_load(&fp->f_count);
3665	if (count != 0)
3666		panic("fdrop: fp %p count %d", fp, count);
3667#endif
3668	error = fo_close(fp, td);
3669	atomic_subtract_int(&openfiles, 1);
3670	crfree(fp->f_cred);
3671	free(fp->f_advice, M_FADVISE);
3672	uma_zfree(file_zone, fp);
3673
3674	return (error);
3675}
3676
3677/*
3678 * Apply an advisory lock on a file descriptor.
3679 *
3680 * Just attempt to get a record lock of the requested type on the entire file
3681 * (l_whence = SEEK_SET, l_start = 0, l_len = 0).
3682 */
3683#ifndef _SYS_SYSPROTO_H_
3684struct flock_args {
3685	int	fd;
3686	int	how;
3687};
3688#endif
3689/* ARGSUSED */
3690int
3691sys_flock(struct thread *td, struct flock_args *uap)
3692{
3693	struct file *fp;
3694	struct vnode *vp;
3695	struct flock lf;
3696	int error;
3697
3698	error = fget(td, uap->fd, &cap_flock_rights, &fp);
3699	if (error != 0)
3700		return (error);
3701	error = EOPNOTSUPP;
3702	if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) {
3703		goto done;
3704	}
3705	if (fp->f_ops == &path_fileops) {
3706		goto done;
3707	}
3708
3709	error = 0;
3710	vp = fp->f_vnode;
3711	lf.l_whence = SEEK_SET;
3712	lf.l_start = 0;
3713	lf.l_len = 0;
3714	if (uap->how & LOCK_UN) {
3715		lf.l_type = F_UNLCK;
3716		atomic_clear_int(&fp->f_flag, FHASLOCK);
3717		error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
3718		goto done;
3719	}
3720	if (uap->how & LOCK_EX)
3721		lf.l_type = F_WRLCK;
3722	else if (uap->how & LOCK_SH)
3723		lf.l_type = F_RDLCK;
3724	else {
3725		error = EBADF;
3726		goto done;
3727	}
3728	atomic_set_int(&fp->f_flag, FHASLOCK);
3729	error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf,
3730	    (uap->how & LOCK_NB) ? F_FLOCK : F_FLOCK | F_WAIT);
3731done:
3732	fdrop(fp, td);
3733	return (error);
3734}
3735/*
3736 * Duplicate the specified descriptor to a free descriptor.
3737 */
3738int
3739dupfdopen(struct thread *td, struct filedesc *fdp, int dfd, int mode,
3740    int openerror, int *indxp)
3741{
3742	struct filedescent *newfde, *oldfde;
3743	struct file *fp;
3744	u_long *ioctls;
3745	int error, indx;
3746
3747	KASSERT(openerror == ENODEV || openerror == ENXIO,
3748	    ("unexpected error %d in %s", openerror, __func__));
3749
3750	/*
3751	 * If the to-be-dup'd fd number is greater than the allowed number
3752	 * of file descriptors, or the fd to be dup'd has already been
3753	 * closed, then reject.
3754	 */
3755	FILEDESC_XLOCK(fdp);
3756	if ((fp = fget_noref(fdp, dfd)) == NULL) {
3757		FILEDESC_XUNLOCK(fdp);
3758		return (EBADF);
3759	}
3760
3761	error = fdalloc(td, 0, &indx);
3762	if (error != 0) {
3763		FILEDESC_XUNLOCK(fdp);
3764		return (error);
3765	}
3766
3767	/*
3768	 * There are two cases of interest here.
3769	 *
3770	 * For ENODEV simply dup (dfd) to file descriptor (indx) and return.
3771	 *
3772	 * For ENXIO steal away the file structure from (dfd) and store it in
3773	 * (indx).  (dfd) is effectively closed by this operation.
3774	 */
3775	switch (openerror) {
3776	case ENODEV:
3777		/*
3778		 * Check that the mode the file is being opened for is a
3779		 * subset of the mode of the existing descriptor.
3780		 */
3781		if (((mode & (FREAD|FWRITE)) | fp->f_flag) != fp->f_flag) {
3782			fdunused(fdp, indx);
3783			FILEDESC_XUNLOCK(fdp);
3784			return (EACCES);
3785		}
3786		if (!fhold(fp)) {
3787			fdunused(fdp, indx);
3788			FILEDESC_XUNLOCK(fdp);
3789			return (EBADF);
3790		}
3791		newfde = &fdp->fd_ofiles[indx];
3792		oldfde = &fdp->fd_ofiles[dfd];
3793		ioctls = filecaps_copy_prep(&oldfde->fde_caps);
3794#ifdef CAPABILITIES
3795		seqc_write_begin(&newfde->fde_seqc);
3796#endif
3797		fde_copy(oldfde, newfde);
3798		filecaps_copy_finish(&oldfde->fde_caps, &newfde->fde_caps,
3799		    ioctls);
3800#ifdef CAPABILITIES
3801		seqc_write_end(&newfde->fde_seqc);
3802#endif
3803		break;
3804	case ENXIO:
3805		/*
3806		 * Steal away the file pointer from dfd and stuff it into indx.
3807		 */
3808		newfde = &fdp->fd_ofiles[indx];
3809		oldfde = &fdp->fd_ofiles[dfd];
3810#ifdef CAPABILITIES
3811		seqc_write_begin(&oldfde->fde_seqc);
3812		seqc_write_begin(&newfde->fde_seqc);
3813#endif
3814		fde_copy(oldfde, newfde);
3815		oldfde->fde_file = NULL;
3816		fdunused(fdp, dfd);
3817#ifdef CAPABILITIES
3818		seqc_write_end(&newfde->fde_seqc);
3819		seqc_write_end(&oldfde->fde_seqc);
3820#endif
3821		break;
3822	}
3823	FILEDESC_XUNLOCK(fdp);
3824	*indxp = indx;
3825	return (0);
3826}
3827
3828/*
3829 * This sysctl determines if we will allow a process to chroot(2) if it
3830 * has a directory open:
3831 *	0: disallowed for all processes.
3832 *	1: allowed for processes that were not already chroot(2)'ed.
3833 *	2: allowed for all processes.
3834 */
3835
3836static int chroot_allow_open_directories = 1;
3837
3838SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW,
3839    &chroot_allow_open_directories, 0,
3840    "Allow a process to chroot(2) if it has a directory open");
3841
3842/*
3843 * Helper function for raised chroot(2) security function:  Refuse if
3844 * any filedescriptors are open directories.
3845 */
3846static int
3847chroot_refuse_vdir_fds(struct filedesc *fdp)
3848{
3849	struct vnode *vp;
3850	struct file *fp;
3851	int i;
3852
3853	FILEDESC_LOCK_ASSERT(fdp);
3854
3855	FILEDESC_FOREACH_FP(fdp, i, fp) {
3856		if (fp->f_type == DTYPE_VNODE) {
3857			vp = fp->f_vnode;
3858			if (vp->v_type == VDIR)
3859				return (EPERM);
3860		}
3861	}
3862	return (0);
3863}
3864
3865static void
3866pwd_fill(struct pwd *oldpwd, struct pwd *newpwd)
3867{
3868
3869	if (newpwd->pwd_cdir == NULL && oldpwd->pwd_cdir != NULL) {
3870		vrefact(oldpwd->pwd_cdir);
3871		newpwd->pwd_cdir = oldpwd->pwd_cdir;
3872	}
3873
3874	if (newpwd->pwd_rdir == NULL && oldpwd->pwd_rdir != NULL) {
3875		vrefact(oldpwd->pwd_rdir);
3876		newpwd->pwd_rdir = oldpwd->pwd_rdir;
3877	}
3878
3879	if (newpwd->pwd_jdir == NULL && oldpwd->pwd_jdir != NULL) {
3880		vrefact(oldpwd->pwd_jdir);
3881		newpwd->pwd_jdir = oldpwd->pwd_jdir;
3882	}
3883
3884	if (newpwd->pwd_adir == NULL && oldpwd->pwd_adir != NULL) {
3885		vrefact(oldpwd->pwd_adir);
3886		newpwd->pwd_adir = oldpwd->pwd_adir;
3887	}
3888}
3889
3890struct pwd *
3891pwd_hold_pwddesc(struct pwddesc *pdp)
3892{
3893	struct pwd *pwd;
3894
3895	PWDDESC_ASSERT_XLOCKED(pdp);
3896	pwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
3897	if (pwd != NULL)
3898		refcount_acquire(&pwd->pwd_refcount);
3899	return (pwd);
3900}
3901
3902bool
3903pwd_hold_smr(struct pwd *pwd)
3904{
3905
3906	MPASS(pwd != NULL);
3907	if (__predict_true(refcount_acquire_if_not_zero(&pwd->pwd_refcount))) {
3908		return (true);
3909	}
3910	return (false);
3911}
3912
3913struct pwd *
3914pwd_hold(struct thread *td)
3915{
3916	struct pwddesc *pdp;
3917	struct pwd *pwd;
3918
3919	pdp = td->td_proc->p_pd;
3920
3921	vfs_smr_enter();
3922	pwd = vfs_smr_entered_load(&pdp->pd_pwd);
3923	if (pwd_hold_smr(pwd)) {
3924		vfs_smr_exit();
3925		return (pwd);
3926	}
3927	vfs_smr_exit();
3928	PWDDESC_XLOCK(pdp);
3929	pwd = pwd_hold_pwddesc(pdp);
3930	MPASS(pwd != NULL);
3931	PWDDESC_XUNLOCK(pdp);
3932	return (pwd);
3933}
3934
3935struct pwd *
3936pwd_hold_proc(struct proc *p)
3937{
3938	struct pwddesc *pdp;
3939	struct pwd *pwd;
3940
3941	PROC_ASSERT_HELD(p);
3942	PROC_LOCK(p);
3943	pdp = pdhold(p);
3944	MPASS(pdp != NULL);
3945	PROC_UNLOCK(p);
3946
3947	PWDDESC_XLOCK(pdp);
3948	pwd = pwd_hold_pwddesc(pdp);
3949	MPASS(pwd != NULL);
3950	PWDDESC_XUNLOCK(pdp);
3951	pddrop(pdp);
3952	return (pwd);
3953}
3954
3955static struct pwd *
3956pwd_alloc(void)
3957{
3958	struct pwd *pwd;
3959
3960	pwd = uma_zalloc_smr(pwd_zone, M_WAITOK);
3961	bzero(pwd, sizeof(*pwd));
3962	refcount_init(&pwd->pwd_refcount, 1);
3963	return (pwd);
3964}
3965
3966void
3967pwd_drop(struct pwd *pwd)
3968{
3969
3970	if (!refcount_release(&pwd->pwd_refcount))
3971		return;
3972
3973	if (pwd->pwd_cdir != NULL)
3974		vrele(pwd->pwd_cdir);
3975	if (pwd->pwd_rdir != NULL)
3976		vrele(pwd->pwd_rdir);
3977	if (pwd->pwd_jdir != NULL)
3978		vrele(pwd->pwd_jdir);
3979	if (pwd->pwd_adir != NULL)
3980		vrele(pwd->pwd_adir);
3981	uma_zfree_smr(pwd_zone, pwd);
3982}
3983
3984/*
3985* The caller is responsible for invoking priv_check() and
3986* mac_vnode_check_chroot() to authorize this operation.
3987*/
3988int
3989pwd_chroot(struct thread *td, struct vnode *vp)
3990{
3991	struct pwddesc *pdp;
3992	struct filedesc *fdp;
3993	struct pwd *newpwd, *oldpwd;
3994	int error;
3995
3996	fdp = td->td_proc->p_fd;
3997	pdp = td->td_proc->p_pd;
3998	newpwd = pwd_alloc();
3999	FILEDESC_SLOCK(fdp);
4000	PWDDESC_XLOCK(pdp);
4001	oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
4002	if (chroot_allow_open_directories == 0 ||
4003	    (chroot_allow_open_directories == 1 &&
4004	    oldpwd->pwd_rdir != rootvnode)) {
4005		error = chroot_refuse_vdir_fds(fdp);
4006		FILEDESC_SUNLOCK(fdp);
4007		if (error != 0) {
4008			PWDDESC_XUNLOCK(pdp);
4009			pwd_drop(newpwd);
4010			return (error);
4011		}
4012	} else {
4013		FILEDESC_SUNLOCK(fdp);
4014	}
4015
4016	vrefact(vp);
4017	newpwd->pwd_rdir = vp;
4018	vrefact(vp);
4019	newpwd->pwd_adir = vp;
4020	if (oldpwd->pwd_jdir == NULL) {
4021		vrefact(vp);
4022		newpwd->pwd_jdir = vp;
4023	}
4024	pwd_fill(oldpwd, newpwd);
4025	pwd_set(pdp, newpwd);
4026	PWDDESC_XUNLOCK(pdp);
4027	pwd_drop(oldpwd);
4028	return (0);
4029}
4030
4031void
4032pwd_chdir(struct thread *td, struct vnode *vp)
4033{
4034	struct pwddesc *pdp;
4035	struct pwd *newpwd, *oldpwd;
4036
4037	VNPASS(vp->v_usecount > 0, vp);
4038
4039	newpwd = pwd_alloc();
4040	pdp = td->td_proc->p_pd;
4041	PWDDESC_XLOCK(pdp);
4042	oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
4043	newpwd->pwd_cdir = vp;
4044	pwd_fill(oldpwd, newpwd);
4045	pwd_set(pdp, newpwd);
4046	PWDDESC_XUNLOCK(pdp);
4047	pwd_drop(oldpwd);
4048}
4049
4050/*
4051 * Process is transitioning to/from a non-native ABI.
4052 */
4053void
4054pwd_altroot(struct thread *td, struct vnode *altroot_vp)
4055{
4056	struct pwddesc *pdp;
4057	struct pwd *newpwd, *oldpwd;
4058
4059	newpwd = pwd_alloc();
4060	pdp = td->td_proc->p_pd;
4061	PWDDESC_XLOCK(pdp);
4062	oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
4063	if (altroot_vp != NULL) {
4064		/*
4065		 * Native process to a non-native ABI.
4066		 */
4067
4068		vrefact(altroot_vp);
4069		newpwd->pwd_adir = altroot_vp;
4070	} else {
4071		/*
4072		 * Non-native process to the native ABI.
4073		 */
4074
4075		vrefact(oldpwd->pwd_rdir);
4076		newpwd->pwd_adir = oldpwd->pwd_rdir;
4077	}
4078	pwd_fill(oldpwd, newpwd);
4079	pwd_set(pdp, newpwd);
4080	PWDDESC_XUNLOCK(pdp);
4081	pwd_drop(oldpwd);
4082}
4083
4084/*
4085 * jail_attach(2) changes both root and working directories.
4086 */
4087int
4088pwd_chroot_chdir(struct thread *td, struct vnode *vp)
4089{
4090	struct pwddesc *pdp;
4091	struct filedesc *fdp;
4092	struct pwd *newpwd, *oldpwd;
4093	int error;
4094
4095	fdp = td->td_proc->p_fd;
4096	pdp = td->td_proc->p_pd;
4097	newpwd = pwd_alloc();
4098	FILEDESC_SLOCK(fdp);
4099	PWDDESC_XLOCK(pdp);
4100	oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
4101	error = chroot_refuse_vdir_fds(fdp);
4102	FILEDESC_SUNLOCK(fdp);
4103	if (error != 0) {
4104		PWDDESC_XUNLOCK(pdp);
4105		pwd_drop(newpwd);
4106		return (error);
4107	}
4108
4109	vrefact(vp);
4110	newpwd->pwd_rdir = vp;
4111	vrefact(vp);
4112	newpwd->pwd_cdir = vp;
4113	if (oldpwd->pwd_jdir == NULL) {
4114		vrefact(vp);
4115		newpwd->pwd_jdir = vp;
4116	}
4117	vrefact(vp);
4118	newpwd->pwd_adir = vp;
4119	pwd_fill(oldpwd, newpwd);
4120	pwd_set(pdp, newpwd);
4121	PWDDESC_XUNLOCK(pdp);
4122	pwd_drop(oldpwd);
4123	return (0);
4124}
4125
4126void
4127pwd_ensure_dirs(void)
4128{
4129	struct pwddesc *pdp;
4130	struct pwd *oldpwd, *newpwd;
4131
4132	pdp = curproc->p_pd;
4133	PWDDESC_XLOCK(pdp);
4134	oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
4135	if (oldpwd->pwd_cdir != NULL && oldpwd->pwd_rdir != NULL &&
4136	    oldpwd->pwd_adir != NULL) {
4137		PWDDESC_XUNLOCK(pdp);
4138		return;
4139	}
4140	PWDDESC_XUNLOCK(pdp);
4141
4142	newpwd = pwd_alloc();
4143	PWDDESC_XLOCK(pdp);
4144	oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
4145	pwd_fill(oldpwd, newpwd);
4146	if (newpwd->pwd_cdir == NULL) {
4147		vrefact(rootvnode);
4148		newpwd->pwd_cdir = rootvnode;
4149	}
4150	if (newpwd->pwd_rdir == NULL) {
4151		vrefact(rootvnode);
4152		newpwd->pwd_rdir = rootvnode;
4153	}
4154	if (newpwd->pwd_adir == NULL) {
4155		vrefact(rootvnode);
4156		newpwd->pwd_adir = rootvnode;
4157	}
4158	pwd_set(pdp, newpwd);
4159	PWDDESC_XUNLOCK(pdp);
4160	pwd_drop(oldpwd);
4161}
4162
4163void
4164pwd_set_rootvnode(void)
4165{
4166	struct pwddesc *pdp;
4167	struct pwd *oldpwd, *newpwd;
4168
4169	pdp = curproc->p_pd;
4170
4171	newpwd = pwd_alloc();
4172	PWDDESC_XLOCK(pdp);
4173	oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
4174	vrefact(rootvnode);
4175	newpwd->pwd_cdir = rootvnode;
4176	vrefact(rootvnode);
4177	newpwd->pwd_rdir = rootvnode;
4178	vrefact(rootvnode);
4179	newpwd->pwd_adir = rootvnode;
4180	pwd_fill(oldpwd, newpwd);
4181	pwd_set(pdp, newpwd);
4182	PWDDESC_XUNLOCK(pdp);
4183	pwd_drop(oldpwd);
4184}
4185
4186/*
4187 * Scan all active processes and prisons to see if any of them have a current
4188 * or root directory of `olddp'. If so, replace them with the new mount point.
4189 */
4190void
4191mountcheckdirs(struct vnode *olddp, struct vnode *newdp)
4192{
4193	struct pwddesc *pdp;
4194	struct pwd *newpwd, *oldpwd;
4195	struct prison *pr;
4196	struct proc *p;
4197	int nrele;
4198
4199	if (vrefcnt(olddp) == 1)
4200		return;
4201	nrele = 0;
4202	newpwd = pwd_alloc();
4203	sx_slock(&allproc_lock);
4204	FOREACH_PROC_IN_SYSTEM(p) {
4205		PROC_LOCK(p);
4206		pdp = pdhold(p);
4207		PROC_UNLOCK(p);
4208		if (pdp == NULL)
4209			continue;
4210		PWDDESC_XLOCK(pdp);
4211		oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
4212		if (oldpwd == NULL ||
4213		    (oldpwd->pwd_cdir != olddp &&
4214		    oldpwd->pwd_rdir != olddp &&
4215		    oldpwd->pwd_jdir != olddp &&
4216		    oldpwd->pwd_adir != olddp)) {
4217			PWDDESC_XUNLOCK(pdp);
4218			pddrop(pdp);
4219			continue;
4220		}
4221		if (oldpwd->pwd_cdir == olddp) {
4222			vrefact(newdp);
4223			newpwd->pwd_cdir = newdp;
4224		}
4225		if (oldpwd->pwd_rdir == olddp) {
4226			vrefact(newdp);
4227			newpwd->pwd_rdir = newdp;
4228		}
4229		if (oldpwd->pwd_jdir == olddp) {
4230			vrefact(newdp);
4231			newpwd->pwd_jdir = newdp;
4232		}
4233		if (oldpwd->pwd_adir == olddp) {
4234			vrefact(newdp);
4235			newpwd->pwd_adir = newdp;
4236		}
4237		pwd_fill(oldpwd, newpwd);
4238		pwd_set(pdp, newpwd);
4239		PWDDESC_XUNLOCK(pdp);
4240		pwd_drop(oldpwd);
4241		pddrop(pdp);
4242		newpwd = pwd_alloc();
4243	}
4244	sx_sunlock(&allproc_lock);
4245	pwd_drop(newpwd);
4246	if (rootvnode == olddp) {
4247		vrefact(newdp);
4248		rootvnode = newdp;
4249		nrele++;
4250	}
4251	mtx_lock(&prison0.pr_mtx);
4252	if (prison0.pr_root == olddp) {
4253		vrefact(newdp);
4254		prison0.pr_root = newdp;
4255		nrele++;
4256	}
4257	mtx_unlock(&prison0.pr_mtx);
4258	sx_slock(&allprison_lock);
4259	TAILQ_FOREACH(pr, &allprison, pr_list) {
4260		mtx_lock(&pr->pr_mtx);
4261		if (pr->pr_root == olddp) {
4262			vrefact(newdp);
4263			pr->pr_root = newdp;
4264			nrele++;
4265		}
4266		mtx_unlock(&pr->pr_mtx);
4267	}
4268	sx_sunlock(&allprison_lock);
4269	while (nrele--)
4270		vrele(olddp);
4271}
4272
4273int
4274descrip_check_write_mp(struct filedesc *fdp, struct mount *mp)
4275{
4276	struct file *fp;
4277	struct vnode *vp;
4278	int error, i;
4279
4280	error = 0;
4281	FILEDESC_SLOCK(fdp);
4282	FILEDESC_FOREACH_FP(fdp, i, fp) {
4283		if (fp->f_type != DTYPE_VNODE ||
4284		    (atomic_load_int(&fp->f_flag) & FWRITE) == 0)
4285			continue;
4286		vp = fp->f_vnode;
4287		if (vp->v_mount == mp) {
4288			error = EDEADLK;
4289			break;
4290		}
4291	}
4292	FILEDESC_SUNLOCK(fdp);
4293	return (error);
4294}
4295
4296struct filedesc_to_leader *
4297filedesc_to_leader_alloc(struct filedesc_to_leader *old, struct filedesc *fdp,
4298    struct proc *leader)
4299{
4300	struct filedesc_to_leader *fdtol;
4301
4302	fdtol = malloc(sizeof(struct filedesc_to_leader),
4303	    M_FILEDESC_TO_LEADER, M_WAITOK);
4304	fdtol->fdl_refcount = 1;
4305	fdtol->fdl_holdcount = 0;
4306	fdtol->fdl_wakeup = 0;
4307	fdtol->fdl_leader = leader;
4308	if (old != NULL) {
4309		FILEDESC_XLOCK(fdp);
4310		fdtol->fdl_next = old->fdl_next;
4311		fdtol->fdl_prev = old;
4312		old->fdl_next = fdtol;
4313		fdtol->fdl_next->fdl_prev = fdtol;
4314		FILEDESC_XUNLOCK(fdp);
4315	} else {
4316		fdtol->fdl_next = fdtol;
4317		fdtol->fdl_prev = fdtol;
4318	}
4319	return (fdtol);
4320}
4321
4322struct filedesc_to_leader *
4323filedesc_to_leader_share(struct filedesc_to_leader *fdtol, struct filedesc *fdp)
4324{
4325	FILEDESC_XLOCK(fdp);
4326	fdtol->fdl_refcount++;
4327	FILEDESC_XUNLOCK(fdp);
4328	return (fdtol);
4329}
4330
4331static int
4332sysctl_kern_proc_nfds(SYSCTL_HANDLER_ARGS)
4333{
4334	NDSLOTTYPE *map;
4335	struct filedesc *fdp;
4336	u_int namelen;
4337	int count, off, minoff;
4338
4339	namelen = arg2;
4340	if (namelen != 1)
4341		return (EINVAL);
4342
4343	if (*(int *)arg1 != 0)
4344		return (EINVAL);
4345
4346	fdp = curproc->p_fd;
4347	count = 0;
4348	FILEDESC_SLOCK(fdp);
4349	map = fdp->fd_map;
4350	off = NDSLOT(fdp->fd_nfiles - 1);
4351	for (minoff = NDSLOT(0); off >= minoff; --off)
4352		count += bitcountl(map[off]);
4353	FILEDESC_SUNLOCK(fdp);
4354
4355	return (SYSCTL_OUT(req, &count, sizeof(count)));
4356}
4357
4358static SYSCTL_NODE(_kern_proc, KERN_PROC_NFDS, nfds,
4359    CTLFLAG_RD|CTLFLAG_CAPRD|CTLFLAG_MPSAFE, sysctl_kern_proc_nfds,
4360    "Number of open file descriptors");
4361
4362/*
4363 * Get file structures globally.
4364 */
4365static int
4366sysctl_kern_file(SYSCTL_HANDLER_ARGS)
4367{
4368	struct xfile xf;
4369	struct filedesc *fdp;
4370	struct file *fp;
4371	struct proc *p;
4372	int error, n;
4373
4374	error = sysctl_wire_old_buffer(req, 0);
4375	if (error != 0)
4376		return (error);
4377	if (req->oldptr == NULL) {
4378		n = 0;
4379		sx_slock(&allproc_lock);
4380		FOREACH_PROC_IN_SYSTEM(p) {
4381			PROC_LOCK(p);
4382			if (p->p_state == PRS_NEW) {
4383				PROC_UNLOCK(p);
4384				continue;
4385			}
4386			fdp = fdhold(p);
4387			PROC_UNLOCK(p);
4388			if (fdp == NULL)
4389				continue;
4390			/* overestimates sparse tables. */
4391			n += fdp->fd_nfiles;
4392			fddrop(fdp);
4393		}
4394		sx_sunlock(&allproc_lock);
4395		return (SYSCTL_OUT(req, 0, n * sizeof(xf)));
4396	}
4397	error = 0;
4398	bzero(&xf, sizeof(xf));
4399	xf.xf_size = sizeof(xf);
4400	sx_slock(&allproc_lock);
4401	FOREACH_PROC_IN_SYSTEM(p) {
4402		PROC_LOCK(p);
4403		if (p->p_state == PRS_NEW) {
4404			PROC_UNLOCK(p);
4405			continue;
4406		}
4407		if (p_cansee(req->td, p) != 0) {
4408			PROC_UNLOCK(p);
4409			continue;
4410		}
4411		xf.xf_pid = p->p_pid;
4412		xf.xf_uid = p->p_ucred->cr_uid;
4413		fdp = fdhold(p);
4414		PROC_UNLOCK(p);
4415		if (fdp == NULL)
4416			continue;
4417		FILEDESC_SLOCK(fdp);
4418		if (refcount_load(&fdp->fd_refcnt) == 0)
4419			goto nextproc;
4420		FILEDESC_FOREACH_FP(fdp, n, fp) {
4421			xf.xf_fd = n;
4422			xf.xf_file = (uintptr_t)fp;
4423			xf.xf_data = (uintptr_t)fp->f_data;
4424			xf.xf_vnode = (uintptr_t)fp->f_vnode;
4425			xf.xf_type = (uintptr_t)fp->f_type;
4426			xf.xf_count = refcount_load(&fp->f_count);
4427			xf.xf_msgcount = 0;
4428			xf.xf_offset = foffset_get(fp);
4429			xf.xf_flag = fp->f_flag;
4430			error = SYSCTL_OUT(req, &xf, sizeof(xf));
4431
4432			/*
4433			 * There is no need to re-check the fdtable refcount
4434			 * here since the filedesc lock is not dropped in the
4435			 * loop body.
4436			 */
4437			if (error != 0)
4438				break;
4439		}
4440nextproc:
4441		FILEDESC_SUNLOCK(fdp);
4442		fddrop(fdp);
4443		if (error)
4444			break;
4445	}
4446	sx_sunlock(&allproc_lock);
4447	return (error);
4448}
4449
4450SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD|CTLFLAG_MPSAFE,
4451    0, 0, sysctl_kern_file, "S,xfile", "Entire file table");
4452
4453#ifdef KINFO_FILE_SIZE
4454CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE);
4455#endif
4456
4457static int
4458xlate_fflags(int fflags)
4459{
4460	static const struct {
4461		int	fflag;
4462		int	kf_fflag;
4463	} fflags_table[] = {
4464		{ FAPPEND, KF_FLAG_APPEND },
4465		{ FASYNC, KF_FLAG_ASYNC },
4466		{ FFSYNC, KF_FLAG_FSYNC },
4467		{ FHASLOCK, KF_FLAG_HASLOCK },
4468		{ FNONBLOCK, KF_FLAG_NONBLOCK },
4469		{ FREAD, KF_FLAG_READ },
4470		{ FWRITE, KF_FLAG_WRITE },
4471		{ O_CREAT, KF_FLAG_CREAT },
4472		{ O_DIRECT, KF_FLAG_DIRECT },
4473		{ O_EXCL, KF_FLAG_EXCL },
4474		{ O_EXEC, KF_FLAG_EXEC },
4475		{ O_EXLOCK, KF_FLAG_EXLOCK },
4476		{ O_NOFOLLOW, KF_FLAG_NOFOLLOW },
4477		{ O_SHLOCK, KF_FLAG_SHLOCK },
4478		{ O_TRUNC, KF_FLAG_TRUNC }
4479	};
4480	unsigned int i;
4481	int kflags;
4482
4483	kflags = 0;
4484	for (i = 0; i < nitems(fflags_table); i++)
4485		if (fflags & fflags_table[i].fflag)
4486			kflags |=  fflags_table[i].kf_fflag;
4487	return (kflags);
4488}
4489
4490/* Trim unused data from kf_path by truncating the structure size. */
4491void
4492pack_kinfo(struct kinfo_file *kif)
4493{
4494
4495	kif->kf_structsize = offsetof(struct kinfo_file, kf_path) +
4496	    strlen(kif->kf_path) + 1;
4497	kif->kf_structsize = roundup(kif->kf_structsize, sizeof(uint64_t));
4498}
4499
4500static void
4501export_file_to_kinfo(struct file *fp, int fd, cap_rights_t *rightsp,
4502    struct kinfo_file *kif, struct filedesc *fdp, int flags)
4503{
4504	int error;
4505
4506	bzero(kif, sizeof(*kif));
4507
4508	/* Set a default type to allow for empty fill_kinfo() methods. */
4509	kif->kf_type = KF_TYPE_UNKNOWN;
4510	kif->kf_flags = xlate_fflags(fp->f_flag);
4511	if (rightsp != NULL)
4512		kif->kf_cap_rights = *rightsp;
4513	else
4514		cap_rights_init_zero(&kif->kf_cap_rights);
4515	kif->kf_fd = fd;
4516	kif->kf_ref_count = refcount_load(&fp->f_count);
4517	kif->kf_offset = foffset_get(fp);
4518
4519	/*
4520	 * This may drop the filedesc lock, so the 'fp' cannot be
4521	 * accessed after this call.
4522	 */
4523	error = fo_fill_kinfo(fp, kif, fdp);
4524	if (error == 0)
4525		kif->kf_status |= KF_ATTR_VALID;
4526	if ((flags & KERN_FILEDESC_PACK_KINFO) != 0)
4527		pack_kinfo(kif);
4528	else
4529		kif->kf_structsize = roundup2(sizeof(*kif), sizeof(uint64_t));
4530}
4531
4532static void
4533export_vnode_to_kinfo(struct vnode *vp, int fd, int fflags,
4534    struct kinfo_file *kif, int flags)
4535{
4536	int error;
4537
4538	bzero(kif, sizeof(*kif));
4539
4540	kif->kf_type = KF_TYPE_VNODE;
4541	error = vn_fill_kinfo_vnode(vp, kif);
4542	if (error == 0)
4543		kif->kf_status |= KF_ATTR_VALID;
4544	kif->kf_flags = xlate_fflags(fflags);
4545	cap_rights_init_zero(&kif->kf_cap_rights);
4546	kif->kf_fd = fd;
4547	kif->kf_ref_count = -1;
4548	kif->kf_offset = -1;
4549	if ((flags & KERN_FILEDESC_PACK_KINFO) != 0)
4550		pack_kinfo(kif);
4551	else
4552		kif->kf_structsize = roundup2(sizeof(*kif), sizeof(uint64_t));
4553	vrele(vp);
4554}
4555
4556struct export_fd_buf {
4557	struct filedesc		*fdp;
4558	struct pwddesc	*pdp;
4559	struct sbuf 		*sb;
4560	ssize_t			remainder;
4561	struct kinfo_file	kif;
4562	int			flags;
4563};
4564
4565static int
4566export_kinfo_to_sb(struct export_fd_buf *efbuf)
4567{
4568	struct kinfo_file *kif;
4569
4570	kif = &efbuf->kif;
4571	if (efbuf->remainder != -1) {
4572		if (efbuf->remainder < kif->kf_structsize)
4573			return (ENOMEM);
4574		efbuf->remainder -= kif->kf_structsize;
4575	}
4576	if (sbuf_bcat(efbuf->sb, kif, kif->kf_structsize) != 0)
4577		return (sbuf_error(efbuf->sb));
4578	return (0);
4579}
4580
4581static int
4582export_file_to_sb(struct file *fp, int fd, cap_rights_t *rightsp,
4583    struct export_fd_buf *efbuf)
4584{
4585	int error;
4586
4587	if (efbuf->remainder == 0)
4588		return (ENOMEM);
4589	export_file_to_kinfo(fp, fd, rightsp, &efbuf->kif, efbuf->fdp,
4590	    efbuf->flags);
4591	FILEDESC_SUNLOCK(efbuf->fdp);
4592	error = export_kinfo_to_sb(efbuf);
4593	FILEDESC_SLOCK(efbuf->fdp);
4594	return (error);
4595}
4596
4597static int
4598export_vnode_to_sb(struct vnode *vp, int fd, int fflags,
4599    struct export_fd_buf *efbuf)
4600{
4601	int error;
4602
4603	if (efbuf->remainder == 0)
4604		return (ENOMEM);
4605	if (efbuf->pdp != NULL)
4606		PWDDESC_XUNLOCK(efbuf->pdp);
4607	export_vnode_to_kinfo(vp, fd, fflags, &efbuf->kif, efbuf->flags);
4608	error = export_kinfo_to_sb(efbuf);
4609	if (efbuf->pdp != NULL)
4610		PWDDESC_XLOCK(efbuf->pdp);
4611	return (error);
4612}
4613
4614/*
4615 * Store a process file descriptor information to sbuf.
4616 *
4617 * Takes a locked proc as argument, and returns with the proc unlocked.
4618 */
4619int
4620kern_proc_filedesc_out(struct proc *p,  struct sbuf *sb, ssize_t maxlen,
4621    int flags)
4622{
4623	struct file *fp;
4624	struct filedesc *fdp;
4625	struct pwddesc *pdp;
4626	struct export_fd_buf *efbuf;
4627	struct vnode *cttyvp, *textvp, *tracevp;
4628	struct pwd *pwd;
4629	int error, i;
4630	cap_rights_t rights;
4631
4632	PROC_LOCK_ASSERT(p, MA_OWNED);
4633
4634	/* ktrace vnode */
4635	tracevp = ktr_get_tracevp(p, true);
4636	/* text vnode */
4637	textvp = p->p_textvp;
4638	if (textvp != NULL)
4639		vrefact(textvp);
4640	/* Controlling tty. */
4641	cttyvp = NULL;
4642	if (p->p_pgrp != NULL && p->p_pgrp->pg_session != NULL) {
4643		cttyvp = p->p_pgrp->pg_session->s_ttyvp;
4644		if (cttyvp != NULL)
4645			vrefact(cttyvp);
4646	}
4647	fdp = fdhold(p);
4648	pdp = pdhold(p);
4649	PROC_UNLOCK(p);
4650
4651	efbuf = malloc(sizeof(*efbuf), M_TEMP, M_WAITOK);
4652	efbuf->fdp = NULL;
4653	efbuf->pdp = NULL;
4654	efbuf->sb = sb;
4655	efbuf->remainder = maxlen;
4656	efbuf->flags = flags;
4657
4658	error = 0;
4659	if (tracevp != NULL)
4660		error = export_vnode_to_sb(tracevp, KF_FD_TYPE_TRACE,
4661		    FREAD | FWRITE, efbuf);
4662	if (error == 0 && textvp != NULL)
4663		error = export_vnode_to_sb(textvp, KF_FD_TYPE_TEXT, FREAD,
4664		    efbuf);
4665	if (error == 0 && cttyvp != NULL)
4666		error = export_vnode_to_sb(cttyvp, KF_FD_TYPE_CTTY,
4667		    FREAD | FWRITE, efbuf);
4668	if (error != 0 || pdp == NULL || fdp == NULL)
4669		goto fail;
4670	efbuf->fdp = fdp;
4671	efbuf->pdp = pdp;
4672	PWDDESC_XLOCK(pdp);
4673	pwd = pwd_hold_pwddesc(pdp);
4674	if (pwd != NULL) {
4675		/* working directory */
4676		if (pwd->pwd_cdir != NULL) {
4677			vrefact(pwd->pwd_cdir);
4678			error = export_vnode_to_sb(pwd->pwd_cdir,
4679			    KF_FD_TYPE_CWD, FREAD, efbuf);
4680		}
4681		/* root directory */
4682		if (error == 0 && pwd->pwd_rdir != NULL) {
4683			vrefact(pwd->pwd_rdir);
4684			error = export_vnode_to_sb(pwd->pwd_rdir,
4685			    KF_FD_TYPE_ROOT, FREAD, efbuf);
4686		}
4687		/* jail directory */
4688		if (error == 0 && pwd->pwd_jdir != NULL) {
4689			vrefact(pwd->pwd_jdir);
4690			error = export_vnode_to_sb(pwd->pwd_jdir,
4691			    KF_FD_TYPE_JAIL, FREAD, efbuf);
4692		}
4693	}
4694	PWDDESC_XUNLOCK(pdp);
4695	if (error != 0)
4696		goto fail;
4697	if (pwd != NULL)
4698		pwd_drop(pwd);
4699	FILEDESC_SLOCK(fdp);
4700	if (refcount_load(&fdp->fd_refcnt) == 0)
4701		goto skip;
4702	FILEDESC_FOREACH_FP(fdp, i, fp) {
4703#ifdef CAPABILITIES
4704		rights = *cap_rights(fdp, i);
4705#else /* !CAPABILITIES */
4706		rights = cap_no_rights;
4707#endif
4708		/*
4709		 * Create sysctl entry.  It is OK to drop the filedesc
4710		 * lock inside of export_file_to_sb() as we will
4711		 * re-validate and re-evaluate its properties when the
4712		 * loop continues.
4713		 */
4714		error = export_file_to_sb(fp, i, &rights, efbuf);
4715		if (error != 0 || refcount_load(&fdp->fd_refcnt) == 0)
4716			break;
4717	}
4718skip:
4719	FILEDESC_SUNLOCK(fdp);
4720fail:
4721	if (fdp != NULL)
4722		fddrop(fdp);
4723	if (pdp != NULL)
4724		pddrop(pdp);
4725	free(efbuf, M_TEMP);
4726	return (error);
4727}
4728
4729#define FILEDESC_SBUF_SIZE	(sizeof(struct kinfo_file) * 5)
4730
4731/*
4732 * Get per-process file descriptors for use by procstat(1), et al.
4733 */
4734static int
4735sysctl_kern_proc_filedesc(SYSCTL_HANDLER_ARGS)
4736{
4737	struct sbuf sb;
4738	struct proc *p;
4739	ssize_t maxlen;
4740	u_int namelen;
4741	int error, error2, *name;
4742
4743	namelen = arg2;
4744	if (namelen != 1)
4745		return (EINVAL);
4746
4747	name = (int *)arg1;
4748
4749	sbuf_new_for_sysctl(&sb, NULL, FILEDESC_SBUF_SIZE, req);
4750	sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
4751	error = pget((pid_t)name[0], PGET_CANDEBUG | PGET_NOTWEXIT, &p);
4752	if (error != 0) {
4753		sbuf_delete(&sb);
4754		return (error);
4755	}
4756	maxlen = req->oldptr != NULL ? req->oldlen : -1;
4757	error = kern_proc_filedesc_out(p, &sb, maxlen,
4758	    KERN_FILEDESC_PACK_KINFO);
4759	error2 = sbuf_finish(&sb);
4760	sbuf_delete(&sb);
4761	return (error != 0 ? error : error2);
4762}
4763
4764#ifdef COMPAT_FREEBSD7
4765#ifdef KINFO_OFILE_SIZE
4766CTASSERT(sizeof(struct kinfo_ofile) == KINFO_OFILE_SIZE);
4767#endif
4768
4769static void
4770kinfo_to_okinfo(struct kinfo_file *kif, struct kinfo_ofile *okif)
4771{
4772
4773	okif->kf_structsize = sizeof(*okif);
4774	okif->kf_type = kif->kf_type;
4775	okif->kf_fd = kif->kf_fd;
4776	okif->kf_ref_count = kif->kf_ref_count;
4777	okif->kf_flags = kif->kf_flags & (KF_FLAG_READ | KF_FLAG_WRITE |
4778	    KF_FLAG_APPEND | KF_FLAG_ASYNC | KF_FLAG_FSYNC | KF_FLAG_NONBLOCK |
4779	    KF_FLAG_DIRECT | KF_FLAG_HASLOCK);
4780	okif->kf_offset = kif->kf_offset;
4781	if (kif->kf_type == KF_TYPE_VNODE)
4782		okif->kf_vnode_type = kif->kf_un.kf_file.kf_file_type;
4783	else
4784		okif->kf_vnode_type = KF_VTYPE_VNON;
4785	strlcpy(okif->kf_path, kif->kf_path, sizeof(okif->kf_path));
4786	if (kif->kf_type == KF_TYPE_SOCKET) {
4787		okif->kf_sock_domain = kif->kf_un.kf_sock.kf_sock_domain0;
4788		okif->kf_sock_type = kif->kf_un.kf_sock.kf_sock_type0;
4789		okif->kf_sock_protocol = kif->kf_un.kf_sock.kf_sock_protocol0;
4790		okif->kf_sa_local = kif->kf_un.kf_sock.kf_sa_local;
4791		okif->kf_sa_peer = kif->kf_un.kf_sock.kf_sa_peer;
4792	} else {
4793		okif->kf_sa_local.ss_family = AF_UNSPEC;
4794		okif->kf_sa_peer.ss_family = AF_UNSPEC;
4795	}
4796}
4797
4798static int
4799export_vnode_for_osysctl(struct vnode *vp, int type, struct kinfo_file *kif,
4800    struct kinfo_ofile *okif, struct pwddesc *pdp, struct sysctl_req *req)
4801{
4802	int error;
4803
4804	vrefact(vp);
4805	PWDDESC_XUNLOCK(pdp);
4806	export_vnode_to_kinfo(vp, type, 0, kif, KERN_FILEDESC_PACK_KINFO);
4807	kinfo_to_okinfo(kif, okif);
4808	error = SYSCTL_OUT(req, okif, sizeof(*okif));
4809	PWDDESC_XLOCK(pdp);
4810	return (error);
4811}
4812
4813/*
4814 * Get per-process file descriptors for use by procstat(1), et al.
4815 */
4816static int
4817sysctl_kern_proc_ofiledesc(SYSCTL_HANDLER_ARGS)
4818{
4819	struct kinfo_ofile *okif;
4820	struct kinfo_file *kif;
4821	struct filedesc *fdp;
4822	struct pwddesc *pdp;
4823	struct pwd *pwd;
4824	u_int namelen;
4825	int error, i, *name;
4826	struct file *fp;
4827	struct proc *p;
4828
4829	namelen = arg2;
4830	if (namelen != 1)
4831		return (EINVAL);
4832
4833	name = (int *)arg1;
4834	error = pget((pid_t)name[0], PGET_CANDEBUG | PGET_NOTWEXIT, &p);
4835	if (error != 0)
4836		return (error);
4837	fdp = fdhold(p);
4838	if (fdp != NULL)
4839		pdp = pdhold(p);
4840	PROC_UNLOCK(p);
4841	if (fdp == NULL || pdp == NULL) {
4842		if (fdp != NULL)
4843			fddrop(fdp);
4844		return (ENOENT);
4845	}
4846	kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK);
4847	okif = malloc(sizeof(*okif), M_TEMP, M_WAITOK);
4848	PWDDESC_XLOCK(pdp);
4849	pwd = pwd_hold_pwddesc(pdp);
4850	if (pwd != NULL) {
4851		if (pwd->pwd_cdir != NULL)
4852			export_vnode_for_osysctl(pwd->pwd_cdir, KF_FD_TYPE_CWD, kif,
4853			    okif, pdp, req);
4854		if (pwd->pwd_rdir != NULL)
4855			export_vnode_for_osysctl(pwd->pwd_rdir, KF_FD_TYPE_ROOT, kif,
4856			    okif, pdp, req);
4857		if (pwd->pwd_jdir != NULL)
4858			export_vnode_for_osysctl(pwd->pwd_jdir, KF_FD_TYPE_JAIL, kif,
4859			    okif, pdp, req);
4860	}
4861	PWDDESC_XUNLOCK(pdp);
4862	if (pwd != NULL)
4863		pwd_drop(pwd);
4864	FILEDESC_SLOCK(fdp);
4865	if (refcount_load(&fdp->fd_refcnt) == 0)
4866		goto skip;
4867	FILEDESC_FOREACH_FP(fdp, i, fp) {
4868		export_file_to_kinfo(fp, i, NULL, kif, fdp,
4869		    KERN_FILEDESC_PACK_KINFO);
4870		FILEDESC_SUNLOCK(fdp);
4871		kinfo_to_okinfo(kif, okif);
4872		error = SYSCTL_OUT(req, okif, sizeof(*okif));
4873		FILEDESC_SLOCK(fdp);
4874		if (error != 0 || refcount_load(&fdp->fd_refcnt) == 0)
4875			break;
4876	}
4877skip:
4878	FILEDESC_SUNLOCK(fdp);
4879	fddrop(fdp);
4880	pddrop(pdp);
4881	free(kif, M_TEMP);
4882	free(okif, M_TEMP);
4883	return (0);
4884}
4885
4886static SYSCTL_NODE(_kern_proc, KERN_PROC_OFILEDESC, ofiledesc,
4887    CTLFLAG_RD|CTLFLAG_MPSAFE, sysctl_kern_proc_ofiledesc,
4888    "Process ofiledesc entries");
4889#endif	/* COMPAT_FREEBSD7 */
4890
4891int
4892vntype_to_kinfo(int vtype)
4893{
4894	struct {
4895		int	vtype;
4896		int	kf_vtype;
4897	} vtypes_table[] = {
4898		{ VBAD, KF_VTYPE_VBAD },
4899		{ VBLK, KF_VTYPE_VBLK },
4900		{ VCHR, KF_VTYPE_VCHR },
4901		{ VDIR, KF_VTYPE_VDIR },
4902		{ VFIFO, KF_VTYPE_VFIFO },
4903		{ VLNK, KF_VTYPE_VLNK },
4904		{ VNON, KF_VTYPE_VNON },
4905		{ VREG, KF_VTYPE_VREG },
4906		{ VSOCK, KF_VTYPE_VSOCK }
4907	};
4908	unsigned int i;
4909
4910	/*
4911	 * Perform vtype translation.
4912	 */
4913	for (i = 0; i < nitems(vtypes_table); i++)
4914		if (vtypes_table[i].vtype == vtype)
4915			return (vtypes_table[i].kf_vtype);
4916
4917	return (KF_VTYPE_UNKNOWN);
4918}
4919
4920static SYSCTL_NODE(_kern_proc, KERN_PROC_FILEDESC, filedesc,
4921    CTLFLAG_RD|CTLFLAG_MPSAFE, sysctl_kern_proc_filedesc,
4922    "Process filedesc entries");
4923
4924/*
4925 * Store a process current working directory information to sbuf.
4926 *
4927 * Takes a locked proc as argument, and returns with the proc unlocked.
4928 */
4929int
4930kern_proc_cwd_out(struct proc *p,  struct sbuf *sb, ssize_t maxlen)
4931{
4932	struct pwddesc *pdp;
4933	struct pwd *pwd;
4934	struct export_fd_buf *efbuf;
4935	struct vnode *cdir;
4936	int error;
4937
4938	PROC_LOCK_ASSERT(p, MA_OWNED);
4939
4940	pdp = pdhold(p);
4941	PROC_UNLOCK(p);
4942	if (pdp == NULL)
4943		return (EINVAL);
4944
4945	efbuf = malloc(sizeof(*efbuf), M_TEMP, M_WAITOK);
4946	efbuf->fdp = NULL;
4947	efbuf->pdp = pdp;
4948	efbuf->sb = sb;
4949	efbuf->remainder = maxlen;
4950	efbuf->flags = 0;
4951
4952	PWDDESC_XLOCK(pdp);
4953	pwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
4954	cdir = pwd->pwd_cdir;
4955	if (cdir == NULL) {
4956		error = EINVAL;
4957	} else {
4958		vrefact(cdir);
4959		error = export_vnode_to_sb(cdir, KF_FD_TYPE_CWD, FREAD, efbuf);
4960	}
4961	PWDDESC_XUNLOCK(pdp);
4962	pddrop(pdp);
4963	free(efbuf, M_TEMP);
4964	return (error);
4965}
4966
4967/*
4968 * Get per-process current working directory.
4969 */
4970static int
4971sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS)
4972{
4973	struct sbuf sb;
4974	struct proc *p;
4975	ssize_t maxlen;
4976	u_int namelen;
4977	int error, error2, *name;
4978
4979	namelen = arg2;
4980	if (namelen != 1)
4981		return (EINVAL);
4982
4983	name = (int *)arg1;
4984
4985	sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file), req);
4986	sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
4987	error = pget((pid_t)name[0], PGET_CANDEBUG | PGET_NOTWEXIT, &p);
4988	if (error != 0) {
4989		sbuf_delete(&sb);
4990		return (error);
4991	}
4992	maxlen = req->oldptr != NULL ? req->oldlen : -1;
4993	error = kern_proc_cwd_out(p, &sb, maxlen);
4994	error2 = sbuf_finish(&sb);
4995	sbuf_delete(&sb);
4996	return (error != 0 ? error : error2);
4997}
4998
4999static SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, CTLFLAG_RD|CTLFLAG_MPSAFE,
5000    sysctl_kern_proc_cwd, "Process current working directory");
5001
5002#ifdef DDB
5003/*
5004 * For the purposes of debugging, generate a human-readable string for the
5005 * file type.
5006 */
5007static const char *
5008file_type_to_name(short type)
5009{
5010
5011	switch (type) {
5012	case 0:
5013		return ("zero");
5014	case DTYPE_VNODE:
5015		return ("vnode");
5016	case DTYPE_SOCKET:
5017		return ("socket");
5018	case DTYPE_PIPE:
5019		return ("pipe");
5020	case DTYPE_FIFO:
5021		return ("fifo");
5022	case DTYPE_KQUEUE:
5023		return ("kqueue");
5024	case DTYPE_CRYPTO:
5025		return ("crypto");
5026	case DTYPE_MQUEUE:
5027		return ("mqueue");
5028	case DTYPE_SHM:
5029		return ("shm");
5030	case DTYPE_SEM:
5031		return ("ksem");
5032	case DTYPE_PTS:
5033		return ("pts");
5034	case DTYPE_DEV:
5035		return ("dev");
5036	case DTYPE_PROCDESC:
5037		return ("proc");
5038	case DTYPE_EVENTFD:
5039		return ("eventfd");
5040	case DTYPE_TIMERFD:
5041		return ("timerfd");
5042	default:
5043		return ("unkn");
5044	}
5045}
5046
5047/*
5048 * For the purposes of debugging, identify a process (if any, perhaps one of
5049 * many) that references the passed file in its file descriptor array. Return
5050 * NULL if none.
5051 */
5052static struct proc *
5053file_to_first_proc(struct file *fp)
5054{
5055	struct filedesc *fdp;
5056	struct proc *p;
5057	int n;
5058
5059	FOREACH_PROC_IN_SYSTEM(p) {
5060		if (p->p_state == PRS_NEW)
5061			continue;
5062		fdp = p->p_fd;
5063		if (fdp == NULL)
5064			continue;
5065		for (n = 0; n < fdp->fd_nfiles; n++) {
5066			if (fp == fdp->fd_ofiles[n].fde_file)
5067				return (p);
5068		}
5069	}
5070	return (NULL);
5071}
5072
5073static void
5074db_print_file(struct file *fp, int header)
5075{
5076#define XPTRWIDTH ((int)howmany(sizeof(void *) * NBBY, 4))
5077	struct proc *p;
5078
5079	if (header)
5080		db_printf("%*s %6s %*s %8s %4s %5s %6s %*s %5s %s\n",
5081		    XPTRWIDTH, "File", "Type", XPTRWIDTH, "Data", "Flag",
5082		    "GCFl", "Count", "MCount", XPTRWIDTH, "Vnode", "FPID",
5083		    "FCmd");
5084	p = file_to_first_proc(fp);
5085	db_printf("%*p %6s %*p %08x %04x %5d %6d %*p %5d %s\n", XPTRWIDTH,
5086	    fp, file_type_to_name(fp->f_type), XPTRWIDTH, fp->f_data,
5087	    fp->f_flag, 0, refcount_load(&fp->f_count), 0, XPTRWIDTH, fp->f_vnode,
5088	    p != NULL ? p->p_pid : -1, p != NULL ? p->p_comm : "-");
5089
5090#undef XPTRWIDTH
5091}
5092
5093DB_SHOW_COMMAND(file, db_show_file)
5094{
5095	struct file *fp;
5096
5097	if (!have_addr) {
5098		db_printf("usage: show file <addr>\n");
5099		return;
5100	}
5101	fp = (struct file *)addr;
5102	db_print_file(fp, 1);
5103}
5104
5105DB_SHOW_COMMAND_FLAGS(files, db_show_files, DB_CMD_MEMSAFE)
5106{
5107	struct filedesc *fdp;
5108	struct file *fp;
5109	struct proc *p;
5110	int header;
5111	int n;
5112
5113	header = 1;
5114	FOREACH_PROC_IN_SYSTEM(p) {
5115		if (p->p_state == PRS_NEW)
5116			continue;
5117		if ((fdp = p->p_fd) == NULL)
5118			continue;
5119		for (n = 0; n < fdp->fd_nfiles; ++n) {
5120			if ((fp = fdp->fd_ofiles[n].fde_file) == NULL)
5121				continue;
5122			db_print_file(fp, header);
5123			header = 0;
5124		}
5125	}
5126}
5127#endif
5128
5129SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc,
5130    CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
5131    &maxfilesperproc, 0, "Maximum files allowed open per process");
5132
5133SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
5134    &maxfiles, 0, "Maximum number of files");
5135
5136SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
5137    &openfiles, 0, "System-wide number of open files");
5138
5139/* ARGSUSED*/
5140static void
5141filelistinit(void *dummy)
5142{
5143
5144	file_zone = uma_zcreate("Files", sizeof(struct file), NULL, NULL,
5145	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
5146	filedesc0_zone = uma_zcreate("filedesc0", sizeof(struct filedesc0),
5147	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
5148	pwd_zone = uma_zcreate("PWD", sizeof(struct pwd), NULL, NULL,
5149	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_SMR);
5150	/*
5151	 * XXXMJG this is a temporary hack due to boot ordering issues against
5152	 * the vnode zone.
5153	 */
5154	vfs_smr = uma_zone_get_smr(pwd_zone);
5155	mtx_init(&sigio_lock, "sigio lock", NULL, MTX_DEF);
5156}
5157SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, filelistinit, NULL);
5158
5159/*-------------------------------------------------------------------*/
5160
5161static int
5162badfo_readwrite(struct file *fp, struct uio *uio, struct ucred *active_cred,
5163    int flags, struct thread *td)
5164{
5165
5166	return (EBADF);
5167}
5168
5169static int
5170badfo_truncate(struct file *fp, off_t length, struct ucred *active_cred,
5171    struct thread *td)
5172{
5173
5174	return (EINVAL);
5175}
5176
5177static int
5178badfo_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
5179    struct thread *td)
5180{
5181
5182	return (EBADF);
5183}
5184
5185static int
5186badfo_poll(struct file *fp, int events, struct ucred *active_cred,
5187    struct thread *td)
5188{
5189
5190	return (0);
5191}
5192
5193static int
5194badfo_kqfilter(struct file *fp, struct knote *kn)
5195{
5196
5197	return (EBADF);
5198}
5199
5200static int
5201badfo_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
5202{
5203
5204	return (EBADF);
5205}
5206
5207static int
5208badfo_close(struct file *fp, struct thread *td)
5209{
5210
5211	return (0);
5212}
5213
5214static int
5215badfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
5216    struct thread *td)
5217{
5218
5219	return (EBADF);
5220}
5221
5222static int
5223badfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
5224    struct thread *td)
5225{
5226
5227	return (EBADF);
5228}
5229
5230static int
5231badfo_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
5232    struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
5233    struct thread *td)
5234{
5235
5236	return (EBADF);
5237}
5238
5239static int
5240badfo_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
5241{
5242
5243	return (0);
5244}
5245
5246struct fileops badfileops = {
5247	.fo_read = badfo_readwrite,
5248	.fo_write = badfo_readwrite,
5249	.fo_truncate = badfo_truncate,
5250	.fo_ioctl = badfo_ioctl,
5251	.fo_poll = badfo_poll,
5252	.fo_kqfilter = badfo_kqfilter,
5253	.fo_stat = badfo_stat,
5254	.fo_close = badfo_close,
5255	.fo_chmod = badfo_chmod,
5256	.fo_chown = badfo_chown,
5257	.fo_sendfile = badfo_sendfile,
5258	.fo_fill_kinfo = badfo_fill_kinfo,
5259};
5260
5261static int
5262path_poll(struct file *fp, int events, struct ucred *active_cred,
5263    struct thread *td)
5264{
5265	return (POLLNVAL);
5266}
5267
5268static int
5269path_close(struct file *fp, struct thread *td)
5270{
5271	MPASS(fp->f_type == DTYPE_VNODE);
5272	fp->f_ops = &badfileops;
5273	vrele(fp->f_vnode);
5274	return (0);
5275}
5276
5277struct fileops path_fileops = {
5278	.fo_read = badfo_readwrite,
5279	.fo_write = badfo_readwrite,
5280	.fo_truncate = badfo_truncate,
5281	.fo_ioctl = badfo_ioctl,
5282	.fo_poll = path_poll,
5283	.fo_kqfilter = vn_kqfilter_opath,
5284	.fo_stat = vn_statfile,
5285	.fo_close = path_close,
5286	.fo_chmod = badfo_chmod,
5287	.fo_chown = badfo_chown,
5288	.fo_sendfile = badfo_sendfile,
5289	.fo_fill_kinfo = vn_fill_kinfo,
5290	.fo_cmp = vn_cmp,
5291	.fo_flags = DFLAG_PASSABLE,
5292};
5293
5294int
5295invfo_rdwr(struct file *fp, struct uio *uio, struct ucred *active_cred,
5296    int flags, struct thread *td)
5297{
5298
5299	return (EOPNOTSUPP);
5300}
5301
5302int
5303invfo_truncate(struct file *fp, off_t length, struct ucred *active_cred,
5304    struct thread *td)
5305{
5306
5307	return (EINVAL);
5308}
5309
5310int
5311invfo_ioctl(struct file *fp, u_long com, void *data,
5312    struct ucred *active_cred, struct thread *td)
5313{
5314
5315	return (ENOTTY);
5316}
5317
5318int
5319invfo_poll(struct file *fp, int events, struct ucred *active_cred,
5320    struct thread *td)
5321{
5322
5323	return (poll_no_poll(events));
5324}
5325
5326int
5327invfo_kqfilter(struct file *fp, struct knote *kn)
5328{
5329
5330	return (EINVAL);
5331}
5332
5333int
5334invfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
5335    struct thread *td)
5336{
5337
5338	return (EINVAL);
5339}
5340
5341int
5342invfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
5343    struct thread *td)
5344{
5345
5346	return (EINVAL);
5347}
5348
5349int
5350invfo_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
5351    struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
5352    struct thread *td)
5353{
5354
5355	return (EINVAL);
5356}
5357
5358/*-------------------------------------------------------------------*/
5359
5360/*
5361 * File Descriptor pseudo-device driver (/dev/fd/).
5362 *
5363 * Opening minor device N dup()s the file (if any) connected to file
5364 * descriptor N belonging to the calling process.  Note that this driver
5365 * consists of only the ``open()'' routine, because all subsequent
5366 * references to this file will be direct to the other driver.
5367 *
5368 * XXX: we could give this one a cloning event handler if necessary.
5369 */
5370
5371/* ARGSUSED */
5372static int
5373fdopen(struct cdev *dev, int mode, int type, struct thread *td)
5374{
5375
5376	/*
5377	 * XXX Kludge: set curthread->td_dupfd to contain the value of the
5378	 * the file descriptor being sought for duplication. The error
5379	 * return ensures that the vnode for this device will be released
5380	 * by vn_open. Open will detect this special error and take the
5381	 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
5382	 * will simply report the error.
5383	 */
5384	td->td_dupfd = dev2unit(dev);
5385	return (ENODEV);
5386}
5387
5388static struct cdevsw fildesc_cdevsw = {
5389	.d_version =	D_VERSION,
5390	.d_open =	fdopen,
5391	.d_name =	"FD",
5392};
5393
5394static void
5395fildesc_drvinit(void *unused)
5396{
5397	struct cdev *dev;
5398
5399	dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 0, NULL,
5400	    UID_ROOT, GID_WHEEL, 0666, "fd/0");
5401	make_dev_alias(dev, "stdin");
5402	dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 1, NULL,
5403	    UID_ROOT, GID_WHEEL, 0666, "fd/1");
5404	make_dev_alias(dev, "stdout");
5405	dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 2, NULL,
5406	    UID_ROOT, GID_WHEEL, 0666, "fd/2");
5407	make_dev_alias(dev, "stderr");
5408}
5409
5410SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, fildesc_drvinit, NULL);
5411