1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1989, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 */
36
37#include <sys/cdefs.h>
38#include "opt_capsicum.h"
39#include "opt_ktrace.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/sysproto.h>
44#include <sys/capsicum.h>
45#include <sys/filedesc.h>
46#include <sys/filio.h>
47#include <sys/fcntl.h>
48#include <sys/file.h>
49#include <sys/lock.h>
50#include <sys/proc.h>
51#include <sys/signalvar.h>
52#include <sys/socketvar.h>
53#include <sys/uio.h>
54#include <sys/eventfd.h>
55#include <sys/kernel.h>
56#include <sys/ktr.h>
57#include <sys/limits.h>
58#include <sys/malloc.h>
59#include <sys/poll.h>
60#include <sys/resourcevar.h>
61#include <sys/selinfo.h>
62#include <sys/sleepqueue.h>
63#include <sys/specialfd.h>
64#include <sys/syscallsubr.h>
65#include <sys/sysctl.h>
66#include <sys/sysent.h>
67#include <sys/vnode.h>
68#include <sys/unistd.h>
69#include <sys/bio.h>
70#include <sys/buf.h>
71#include <sys/condvar.h>
72#ifdef KTRACE
73#include <sys/ktrace.h>
74#endif
75
76#include <security/audit/audit.h>
77
78/*
79 * The following macro defines how many bytes will be allocated from
80 * the stack instead of memory allocated when passing the IOCTL data
81 * structures from userspace and to the kernel. Some IOCTLs having
82 * small data structures are used very frequently and this small
83 * buffer on the stack gives a significant speedup improvement for
84 * those requests. The value of this define should be greater or equal
85 * to 64 bytes and should also be power of two. The data structure is
86 * currently hard-aligned to a 8-byte boundary on the stack. This
87 * should currently be sufficient for all supported platforms.
88 */
89#define	SYS_IOCTL_SMALL_SIZE	128	/* bytes */
90#define	SYS_IOCTL_SMALL_ALIGN	8	/* bytes */
91
92#ifdef __LP64__
93static int iosize_max_clamp = 0;
94SYSCTL_INT(_debug, OID_AUTO, iosize_max_clamp, CTLFLAG_RW,
95    &iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX");
96static int devfs_iosize_max_clamp = 1;
97SYSCTL_INT(_debug, OID_AUTO, devfs_iosize_max_clamp, CTLFLAG_RW,
98    &devfs_iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX for devices");
99#endif
100
101/*
102 * Assert that the return value of read(2) and write(2) syscalls fits
103 * into a register.  If not, an architecture will need to provide the
104 * usermode wrappers to reconstruct the result.
105 */
106CTASSERT(sizeof(register_t) >= sizeof(size_t));
107
108static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
109static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
110MALLOC_DEFINE(M_IOV, "iov", "large iov's");
111
112static int	pollout(struct thread *, struct pollfd *, struct pollfd *,
113		    u_int);
114static int	pollscan(struct thread *, struct pollfd *, u_int);
115static int	pollrescan(struct thread *);
116static int	selscan(struct thread *, fd_mask **, fd_mask **, int);
117static int	selrescan(struct thread *, fd_mask **, fd_mask **);
118static void	selfdalloc(struct thread *, void *);
119static void	selfdfree(struct seltd *, struct selfd *);
120static int	dofileread(struct thread *, int, struct file *, struct uio *,
121		    off_t, int);
122static int	dofilewrite(struct thread *, int, struct file *, struct uio *,
123		    off_t, int);
124static void	doselwakeup(struct selinfo *, int);
125static void	seltdinit(struct thread *);
126static int	seltdwait(struct thread *, sbintime_t, sbintime_t);
127static void	seltdclear(struct thread *);
128
129/*
130 * One seltd per-thread allocated on demand as needed.
131 *
132 *	t - protected by st_mtx
133 * 	k - Only accessed by curthread or read-only
134 */
135struct seltd {
136	STAILQ_HEAD(, selfd)	st_selq;	/* (k) List of selfds. */
137	struct selfd		*st_free1;	/* (k) free fd for read set. */
138	struct selfd		*st_free2;	/* (k) free fd for write set. */
139	struct mtx		st_mtx;		/* Protects struct seltd */
140	struct cv		st_wait;	/* (t) Wait channel. */
141	int			st_flags;	/* (t) SELTD_ flags. */
142};
143
144#define	SELTD_PENDING	0x0001			/* We have pending events. */
145#define	SELTD_RESCAN	0x0002			/* Doing a rescan. */
146
147/*
148 * One selfd allocated per-thread per-file-descriptor.
149 *	f - protected by sf_mtx
150 */
151struct selfd {
152	STAILQ_ENTRY(selfd)	sf_link;	/* (k) fds owned by this td. */
153	TAILQ_ENTRY(selfd)	sf_threads;	/* (f) fds on this selinfo. */
154	struct selinfo		*sf_si;		/* (f) selinfo when linked. */
155	struct mtx		*sf_mtx;	/* Pointer to selinfo mtx. */
156	struct seltd		*sf_td;		/* (k) owning seltd. */
157	void			*sf_cookie;	/* (k) fd or pollfd. */
158};
159
160MALLOC_DEFINE(M_SELFD, "selfd", "selfd");
161static struct mtx_pool *mtxpool_select;
162
163#ifdef __LP64__
164size_t
165devfs_iosize_max(void)
166{
167
168	return (devfs_iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ?
169	    INT_MAX : SSIZE_MAX);
170}
171
172size_t
173iosize_max(void)
174{
175
176	return (iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ?
177	    INT_MAX : SSIZE_MAX);
178}
179#endif
180
181#ifndef _SYS_SYSPROTO_H_
182struct read_args {
183	int	fd;
184	void	*buf;
185	size_t	nbyte;
186};
187#endif
188int
189sys_read(struct thread *td, struct read_args *uap)
190{
191	struct uio auio;
192	struct iovec aiov;
193	int error;
194
195	if (uap->nbyte > IOSIZE_MAX)
196		return (EINVAL);
197	aiov.iov_base = uap->buf;
198	aiov.iov_len = uap->nbyte;
199	auio.uio_iov = &aiov;
200	auio.uio_iovcnt = 1;
201	auio.uio_resid = uap->nbyte;
202	auio.uio_segflg = UIO_USERSPACE;
203	error = kern_readv(td, uap->fd, &auio);
204	return (error);
205}
206
207/*
208 * Positioned read system call
209 */
210#ifndef _SYS_SYSPROTO_H_
211struct pread_args {
212	int	fd;
213	void	*buf;
214	size_t	nbyte;
215	int	pad;
216	off_t	offset;
217};
218#endif
219int
220sys_pread(struct thread *td, struct pread_args *uap)
221{
222
223	return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
224}
225
226int
227kern_pread(struct thread *td, int fd, void *buf, size_t nbyte, off_t offset)
228{
229	struct uio auio;
230	struct iovec aiov;
231	int error;
232
233	if (nbyte > IOSIZE_MAX)
234		return (EINVAL);
235	aiov.iov_base = buf;
236	aiov.iov_len = nbyte;
237	auio.uio_iov = &aiov;
238	auio.uio_iovcnt = 1;
239	auio.uio_resid = nbyte;
240	auio.uio_segflg = UIO_USERSPACE;
241	error = kern_preadv(td, fd, &auio, offset);
242	return (error);
243}
244
245#if defined(COMPAT_FREEBSD6)
246int
247freebsd6_pread(struct thread *td, struct freebsd6_pread_args *uap)
248{
249
250	return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
251}
252#endif
253
254/*
255 * Scatter read system call.
256 */
257#ifndef _SYS_SYSPROTO_H_
258struct readv_args {
259	int	fd;
260	struct	iovec *iovp;
261	u_int	iovcnt;
262};
263#endif
264int
265sys_readv(struct thread *td, struct readv_args *uap)
266{
267	struct uio *auio;
268	int error;
269
270	error = copyinuio(uap->iovp, uap->iovcnt, &auio);
271	if (error)
272		return (error);
273	error = kern_readv(td, uap->fd, auio);
274	freeuio(auio);
275	return (error);
276}
277
278int
279kern_readv(struct thread *td, int fd, struct uio *auio)
280{
281	struct file *fp;
282	int error;
283
284	error = fget_read(td, fd, &cap_read_rights, &fp);
285	if (error)
286		return (error);
287	error = dofileread(td, fd, fp, auio, (off_t)-1, 0);
288	fdrop(fp, td);
289	return (error);
290}
291
292/*
293 * Scatter positioned read system call.
294 */
295#ifndef _SYS_SYSPROTO_H_
296struct preadv_args {
297	int	fd;
298	struct	iovec *iovp;
299	u_int	iovcnt;
300	off_t	offset;
301};
302#endif
303int
304sys_preadv(struct thread *td, struct preadv_args *uap)
305{
306	struct uio *auio;
307	int error;
308
309	error = copyinuio(uap->iovp, uap->iovcnt, &auio);
310	if (error)
311		return (error);
312	error = kern_preadv(td, uap->fd, auio, uap->offset);
313	freeuio(auio);
314	return (error);
315}
316
317int
318kern_preadv(struct thread *td, int fd, struct uio *auio, off_t offset)
319{
320	struct file *fp;
321	int error;
322
323	error = fget_read(td, fd, &cap_pread_rights, &fp);
324	if (error)
325		return (error);
326	if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
327		error = ESPIPE;
328	else if (offset < 0 &&
329	    (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR))
330		error = EINVAL;
331	else
332		error = dofileread(td, fd, fp, auio, offset, FOF_OFFSET);
333	fdrop(fp, td);
334	return (error);
335}
336
337/*
338 * Common code for readv and preadv that reads data in
339 * from a file using the passed in uio, offset, and flags.
340 */
341static int
342dofileread(struct thread *td, int fd, struct file *fp, struct uio *auio,
343    off_t offset, int flags)
344{
345	ssize_t cnt;
346	int error;
347#ifdef KTRACE
348	struct uio *ktruio = NULL;
349#endif
350
351	AUDIT_ARG_FD(fd);
352
353	/* Finish zero length reads right here */
354	if (auio->uio_resid == 0) {
355		td->td_retval[0] = 0;
356		return (0);
357	}
358	auio->uio_rw = UIO_READ;
359	auio->uio_offset = offset;
360	auio->uio_td = td;
361#ifdef KTRACE
362	if (KTRPOINT(td, KTR_GENIO))
363		ktruio = cloneuio(auio);
364#endif
365	cnt = auio->uio_resid;
366	if ((error = fo_read(fp, auio, td->td_ucred, flags, td))) {
367		if (auio->uio_resid != cnt && (error == ERESTART ||
368		    error == EINTR || error == EWOULDBLOCK))
369			error = 0;
370	}
371	cnt -= auio->uio_resid;
372#ifdef KTRACE
373	if (ktruio != NULL) {
374		ktruio->uio_resid = cnt;
375		ktrgenio(fd, UIO_READ, ktruio, error);
376	}
377#endif
378	td->td_retval[0] = cnt;
379	return (error);
380}
381
382#ifndef _SYS_SYSPROTO_H_
383struct write_args {
384	int	fd;
385	const void *buf;
386	size_t	nbyte;
387};
388#endif
389int
390sys_write(struct thread *td, struct write_args *uap)
391{
392	struct uio auio;
393	struct iovec aiov;
394	int error;
395
396	if (uap->nbyte > IOSIZE_MAX)
397		return (EINVAL);
398	aiov.iov_base = (void *)(uintptr_t)uap->buf;
399	aiov.iov_len = uap->nbyte;
400	auio.uio_iov = &aiov;
401	auio.uio_iovcnt = 1;
402	auio.uio_resid = uap->nbyte;
403	auio.uio_segflg = UIO_USERSPACE;
404	error = kern_writev(td, uap->fd, &auio);
405	return (error);
406}
407
408/*
409 * Positioned write system call.
410 */
411#ifndef _SYS_SYSPROTO_H_
412struct pwrite_args {
413	int	fd;
414	const void *buf;
415	size_t	nbyte;
416	int	pad;
417	off_t	offset;
418};
419#endif
420int
421sys_pwrite(struct thread *td, struct pwrite_args *uap)
422{
423
424	return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
425}
426
427int
428kern_pwrite(struct thread *td, int fd, const void *buf, size_t nbyte,
429    off_t offset)
430{
431	struct uio auio;
432	struct iovec aiov;
433	int error;
434
435	if (nbyte > IOSIZE_MAX)
436		return (EINVAL);
437	aiov.iov_base = (void *)(uintptr_t)buf;
438	aiov.iov_len = nbyte;
439	auio.uio_iov = &aiov;
440	auio.uio_iovcnt = 1;
441	auio.uio_resid = nbyte;
442	auio.uio_segflg = UIO_USERSPACE;
443	error = kern_pwritev(td, fd, &auio, offset);
444	return (error);
445}
446
447#if defined(COMPAT_FREEBSD6)
448int
449freebsd6_pwrite(struct thread *td, struct freebsd6_pwrite_args *uap)
450{
451
452	return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
453}
454#endif
455
456/*
457 * Gather write system call.
458 */
459#ifndef _SYS_SYSPROTO_H_
460struct writev_args {
461	int	fd;
462	struct	iovec *iovp;
463	u_int	iovcnt;
464};
465#endif
466int
467sys_writev(struct thread *td, struct writev_args *uap)
468{
469	struct uio *auio;
470	int error;
471
472	error = copyinuio(uap->iovp, uap->iovcnt, &auio);
473	if (error)
474		return (error);
475	error = kern_writev(td, uap->fd, auio);
476	freeuio(auio);
477	return (error);
478}
479
480int
481kern_writev(struct thread *td, int fd, struct uio *auio)
482{
483	struct file *fp;
484	int error;
485
486	error = fget_write(td, fd, &cap_write_rights, &fp);
487	if (error)
488		return (error);
489	error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0);
490	fdrop(fp, td);
491	return (error);
492}
493
494/*
495 * Gather positioned write system call.
496 */
497#ifndef _SYS_SYSPROTO_H_
498struct pwritev_args {
499	int	fd;
500	struct	iovec *iovp;
501	u_int	iovcnt;
502	off_t	offset;
503};
504#endif
505int
506sys_pwritev(struct thread *td, struct pwritev_args *uap)
507{
508	struct uio *auio;
509	int error;
510
511	error = copyinuio(uap->iovp, uap->iovcnt, &auio);
512	if (error)
513		return (error);
514	error = kern_pwritev(td, uap->fd, auio, uap->offset);
515	freeuio(auio);
516	return (error);
517}
518
519int
520kern_pwritev(struct thread *td, int fd, struct uio *auio, off_t offset)
521{
522	struct file *fp;
523	int error;
524
525	error = fget_write(td, fd, &cap_pwrite_rights, &fp);
526	if (error)
527		return (error);
528	if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
529		error = ESPIPE;
530	else if (offset < 0 &&
531	    (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR))
532		error = EINVAL;
533	else
534		error = dofilewrite(td, fd, fp, auio, offset, FOF_OFFSET);
535	fdrop(fp, td);
536	return (error);
537}
538
539/*
540 * Common code for writev and pwritev that writes data to
541 * a file using the passed in uio, offset, and flags.
542 */
543static int
544dofilewrite(struct thread *td, int fd, struct file *fp, struct uio *auio,
545    off_t offset, int flags)
546{
547	ssize_t cnt;
548	int error;
549#ifdef KTRACE
550	struct uio *ktruio = NULL;
551#endif
552
553	AUDIT_ARG_FD(fd);
554	auio->uio_rw = UIO_WRITE;
555	auio->uio_td = td;
556	auio->uio_offset = offset;
557#ifdef KTRACE
558	if (KTRPOINT(td, KTR_GENIO))
559		ktruio = cloneuio(auio);
560#endif
561	cnt = auio->uio_resid;
562	error = fo_write(fp, auio, td->td_ucred, flags, td);
563	/*
564	 * Socket layer is responsible for special error handling,
565	 * see sousrsend().
566	 */
567	if (error != 0 && fp->f_type != DTYPE_SOCKET) {
568		if (auio->uio_resid != cnt && (error == ERESTART ||
569		    error == EINTR || error == EWOULDBLOCK))
570			error = 0;
571		if (error == EPIPE) {
572			PROC_LOCK(td->td_proc);
573			tdsignal(td, SIGPIPE);
574			PROC_UNLOCK(td->td_proc);
575		}
576	}
577	cnt -= auio->uio_resid;
578#ifdef KTRACE
579	if (ktruio != NULL) {
580		if (error == 0)
581			ktruio->uio_resid = cnt;
582		ktrgenio(fd, UIO_WRITE, ktruio, error);
583	}
584#endif
585	td->td_retval[0] = cnt;
586	return (error);
587}
588
589/*
590 * Truncate a file given a file descriptor.
591 *
592 * Can't use fget_write() here, since must return EINVAL and not EBADF if the
593 * descriptor isn't writable.
594 */
595int
596kern_ftruncate(struct thread *td, int fd, off_t length)
597{
598	struct file *fp;
599	int error;
600
601	AUDIT_ARG_FD(fd);
602	if (length < 0)
603		return (EINVAL);
604	error = fget(td, fd, &cap_ftruncate_rights, &fp);
605	if (error)
606		return (error);
607	AUDIT_ARG_FILE(td->td_proc, fp);
608	if (!(fp->f_flag & FWRITE)) {
609		fdrop(fp, td);
610		return (EINVAL);
611	}
612	error = fo_truncate(fp, length, td->td_ucred, td);
613	fdrop(fp, td);
614	return (error);
615}
616
617#ifndef _SYS_SYSPROTO_H_
618struct ftruncate_args {
619	int	fd;
620	int	pad;
621	off_t	length;
622};
623#endif
624int
625sys_ftruncate(struct thread *td, struct ftruncate_args *uap)
626{
627
628	return (kern_ftruncate(td, uap->fd, uap->length));
629}
630
631#if defined(COMPAT_43)
632#ifndef _SYS_SYSPROTO_H_
633struct oftruncate_args {
634	int	fd;
635	long	length;
636};
637#endif
638int
639oftruncate(struct thread *td, struct oftruncate_args *uap)
640{
641
642	return (kern_ftruncate(td, uap->fd, uap->length));
643}
644#endif /* COMPAT_43 */
645
646#ifndef _SYS_SYSPROTO_H_
647struct ioctl_args {
648	int	fd;
649	u_long	com;
650	caddr_t	data;
651};
652#endif
653/* ARGSUSED */
654int
655sys_ioctl(struct thread *td, struct ioctl_args *uap)
656{
657	u_char smalldata[SYS_IOCTL_SMALL_SIZE] __aligned(SYS_IOCTL_SMALL_ALIGN);
658	uint32_t com;
659	int arg, error;
660	u_int size;
661	caddr_t data;
662
663#ifdef INVARIANTS
664	if (uap->com > 0xffffffff) {
665		printf(
666		    "WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n",
667		    td->td_proc->p_pid, td->td_name, uap->com);
668	}
669#endif
670	com = (uint32_t)uap->com;
671
672	/*
673	 * Interpret high order word to find amount of data to be
674	 * copied to/from the user's address space.
675	 */
676	size = IOCPARM_LEN(com);
677	if ((size > IOCPARM_MAX) ||
678	    ((com & (IOC_VOID  | IOC_IN | IOC_OUT)) == 0) ||
679#if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
680	    ((com & IOC_OUT) && size == 0) ||
681#else
682	    ((com & (IOC_IN | IOC_OUT)) && size == 0) ||
683#endif
684	    ((com & IOC_VOID) && size > 0 && size != sizeof(int)))
685		return (ENOTTY);
686
687	if (size > 0) {
688		if (com & IOC_VOID) {
689			/* Integer argument. */
690			arg = (intptr_t)uap->data;
691			data = (void *)&arg;
692			size = 0;
693		} else {
694			if (size > SYS_IOCTL_SMALL_SIZE)
695				data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
696			else
697				data = smalldata;
698		}
699	} else
700		data = (void *)&uap->data;
701	if (com & IOC_IN) {
702		error = copyin(uap->data, data, (u_int)size);
703		if (error != 0)
704			goto out;
705	} else if (com & IOC_OUT) {
706		/*
707		 * Zero the buffer so the user always
708		 * gets back something deterministic.
709		 */
710		bzero(data, size);
711	}
712
713	error = kern_ioctl(td, uap->fd, com, data);
714
715	if (error == 0 && (com & IOC_OUT))
716		error = copyout(data, uap->data, (u_int)size);
717
718out:
719	if (size > SYS_IOCTL_SMALL_SIZE)
720		free(data, M_IOCTLOPS);
721	return (error);
722}
723
724int
725kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
726{
727	struct file *fp;
728	struct filedesc *fdp;
729	int error, tmp, locked;
730
731	AUDIT_ARG_FD(fd);
732	AUDIT_ARG_CMD(com);
733
734	fdp = td->td_proc->p_fd;
735
736	switch (com) {
737	case FIONCLEX:
738	case FIOCLEX:
739		FILEDESC_XLOCK(fdp);
740		locked = LA_XLOCKED;
741		break;
742	default:
743#ifdef CAPABILITIES
744		FILEDESC_SLOCK(fdp);
745		locked = LA_SLOCKED;
746#else
747		locked = LA_UNLOCKED;
748#endif
749		break;
750	}
751
752#ifdef CAPABILITIES
753	if ((fp = fget_noref(fdp, fd)) == NULL) {
754		error = EBADF;
755		goto out;
756	}
757	if ((error = cap_ioctl_check(fdp, fd, com)) != 0) {
758		fp = NULL;	/* fhold() was not called yet */
759		goto out;
760	}
761	if (!fhold(fp)) {
762		error = EBADF;
763		fp = NULL;
764		goto out;
765	}
766	if (locked == LA_SLOCKED) {
767		FILEDESC_SUNLOCK(fdp);
768		locked = LA_UNLOCKED;
769	}
770#else
771	error = fget(td, fd, &cap_ioctl_rights, &fp);
772	if (error != 0) {
773		fp = NULL;
774		goto out;
775	}
776#endif
777	if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
778		error = EBADF;
779		goto out;
780	}
781
782	switch (com) {
783	case FIONCLEX:
784		fdp->fd_ofiles[fd].fde_flags &= ~UF_EXCLOSE;
785		goto out;
786	case FIOCLEX:
787		fdp->fd_ofiles[fd].fde_flags |= UF_EXCLOSE;
788		goto out;
789	case FIONBIO:
790		if ((tmp = *(int *)data))
791			atomic_set_int(&fp->f_flag, FNONBLOCK);
792		else
793			atomic_clear_int(&fp->f_flag, FNONBLOCK);
794		data = (void *)&tmp;
795		break;
796	case FIOASYNC:
797		if ((tmp = *(int *)data))
798			atomic_set_int(&fp->f_flag, FASYNC);
799		else
800			atomic_clear_int(&fp->f_flag, FASYNC);
801		data = (void *)&tmp;
802		break;
803	}
804
805	error = fo_ioctl(fp, com, data, td->td_ucred, td);
806out:
807	switch (locked) {
808	case LA_XLOCKED:
809		FILEDESC_XUNLOCK(fdp);
810		break;
811#ifdef CAPABILITIES
812	case LA_SLOCKED:
813		FILEDESC_SUNLOCK(fdp);
814		break;
815#endif
816	default:
817		FILEDESC_UNLOCK_ASSERT(fdp);
818		break;
819	}
820	if (fp != NULL)
821		fdrop(fp, td);
822	return (error);
823}
824
825int
826sys_posix_fallocate(struct thread *td, struct posix_fallocate_args *uap)
827{
828	int error;
829
830	error = kern_posix_fallocate(td, uap->fd, uap->offset, uap->len);
831	return (kern_posix_error(td, error));
832}
833
834int
835kern_posix_fallocate(struct thread *td, int fd, off_t offset, off_t len)
836{
837	struct file *fp;
838	int error;
839
840	AUDIT_ARG_FD(fd);
841	if (offset < 0 || len <= 0)
842		return (EINVAL);
843	/* Check for wrap. */
844	if (offset > OFF_MAX - len)
845		return (EFBIG);
846	AUDIT_ARG_FD(fd);
847	error = fget(td, fd, &cap_pwrite_rights, &fp);
848	if (error != 0)
849		return (error);
850	AUDIT_ARG_FILE(td->td_proc, fp);
851	if ((fp->f_ops->fo_flags & DFLAG_SEEKABLE) == 0) {
852		error = ESPIPE;
853		goto out;
854	}
855	if ((fp->f_flag & FWRITE) == 0) {
856		error = EBADF;
857		goto out;
858	}
859
860	error = fo_fallocate(fp, offset, len, td);
861 out:
862	fdrop(fp, td);
863	return (error);
864}
865
866int
867sys_fspacectl(struct thread *td, struct fspacectl_args *uap)
868{
869	struct spacectl_range rqsr, rmsr;
870	int error, cerror;
871
872	error = copyin(uap->rqsr, &rqsr, sizeof(rqsr));
873	if (error != 0)
874		return (error);
875
876	error = kern_fspacectl(td, uap->fd, uap->cmd, &rqsr, uap->flags,
877	    &rmsr);
878	if (uap->rmsr != NULL) {
879		cerror = copyout(&rmsr, uap->rmsr, sizeof(rmsr));
880		if (error == 0)
881			error = cerror;
882	}
883	return (error);
884}
885
886int
887kern_fspacectl(struct thread *td, int fd, int cmd,
888    const struct spacectl_range *rqsr, int flags, struct spacectl_range *rmsrp)
889{
890	struct file *fp;
891	struct spacectl_range rmsr;
892	int error;
893
894	AUDIT_ARG_FD(fd);
895	AUDIT_ARG_CMD(cmd);
896	AUDIT_ARG_FFLAGS(flags);
897
898	if (rqsr == NULL)
899		return (EINVAL);
900	rmsr = *rqsr;
901	if (rmsrp != NULL)
902		*rmsrp = rmsr;
903
904	if (cmd != SPACECTL_DEALLOC ||
905	    rqsr->r_offset < 0 || rqsr->r_len <= 0 ||
906	    rqsr->r_offset > OFF_MAX - rqsr->r_len ||
907	    (flags & ~SPACECTL_F_SUPPORTED) != 0)
908		return (EINVAL);
909
910	error = fget_write(td, fd, &cap_pwrite_rights, &fp);
911	if (error != 0)
912		return (error);
913	AUDIT_ARG_FILE(td->td_proc, fp);
914	if ((fp->f_ops->fo_flags & DFLAG_SEEKABLE) == 0) {
915		error = ESPIPE;
916		goto out;
917	}
918	if ((fp->f_flag & FWRITE) == 0) {
919		error = EBADF;
920		goto out;
921	}
922
923	error = fo_fspacectl(fp, cmd, &rmsr.r_offset, &rmsr.r_len, flags,
924	    td->td_ucred, td);
925	/* fspacectl is not restarted after signals if the file is modified. */
926	if (rmsr.r_len != rqsr->r_len && (error == ERESTART ||
927	    error == EINTR || error == EWOULDBLOCK))
928		error = 0;
929	if (rmsrp != NULL)
930		*rmsrp = rmsr;
931out:
932	fdrop(fp, td);
933	return (error);
934}
935
936int
937kern_specialfd(struct thread *td, int type, void *arg)
938{
939	struct file *fp;
940	struct specialfd_eventfd *ae;
941	int error, fd, fflags;
942
943	fflags = 0;
944	error = falloc_noinstall(td, &fp);
945	if (error != 0)
946		return (error);
947
948	switch (type) {
949	case SPECIALFD_EVENTFD:
950		ae = arg;
951		if ((ae->flags & EFD_CLOEXEC) != 0)
952			fflags |= O_CLOEXEC;
953		error = eventfd_create_file(td, fp, ae->initval, ae->flags);
954		break;
955	default:
956		error = EINVAL;
957		break;
958	}
959
960	if (error == 0)
961		error = finstall(td, fp, &fd, fflags, NULL);
962	fdrop(fp, td);
963	if (error == 0)
964		td->td_retval[0] = fd;
965	return (error);
966}
967
968int
969sys___specialfd(struct thread *td, struct __specialfd_args *args)
970{
971	struct specialfd_eventfd ae;
972	int error;
973
974	switch (args->type) {
975	case SPECIALFD_EVENTFD:
976		if (args->len != sizeof(struct specialfd_eventfd)) {
977			error = EINVAL;
978			break;
979		}
980		error = copyin(args->req, &ae, sizeof(ae));
981		if (error != 0)
982			break;
983		if ((ae.flags & ~(EFD_CLOEXEC | EFD_NONBLOCK |
984		    EFD_SEMAPHORE)) != 0) {
985			error = EINVAL;
986			break;
987		}
988		error = kern_specialfd(td, args->type, &ae);
989		break;
990	default:
991		error = EINVAL;
992		break;
993	}
994	return (error);
995}
996
997int
998poll_no_poll(int events)
999{
1000	/*
1001	 * Return true for read/write.  If the user asked for something
1002	 * special, return POLLNVAL, so that clients have a way of
1003	 * determining reliably whether or not the extended
1004	 * functionality is present without hard-coding knowledge
1005	 * of specific filesystem implementations.
1006	 */
1007	if (events & ~POLLSTANDARD)
1008		return (POLLNVAL);
1009
1010	return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
1011}
1012
1013int
1014sys_pselect(struct thread *td, struct pselect_args *uap)
1015{
1016	struct timespec ts;
1017	struct timeval tv, *tvp;
1018	sigset_t set, *uset;
1019	int error;
1020
1021	if (uap->ts != NULL) {
1022		error = copyin(uap->ts, &ts, sizeof(ts));
1023		if (error != 0)
1024		    return (error);
1025		TIMESPEC_TO_TIMEVAL(&tv, &ts);
1026		tvp = &tv;
1027	} else
1028		tvp = NULL;
1029	if (uap->sm != NULL) {
1030		error = copyin(uap->sm, &set, sizeof(set));
1031		if (error != 0)
1032			return (error);
1033		uset = &set;
1034	} else
1035		uset = NULL;
1036	return (kern_pselect(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
1037	    uset, NFDBITS));
1038}
1039
1040int
1041kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex,
1042    struct timeval *tvp, sigset_t *uset, int abi_nfdbits)
1043{
1044	int error;
1045
1046	if (uset != NULL) {
1047		error = kern_sigprocmask(td, SIG_SETMASK, uset,
1048		    &td->td_oldsigmask, 0);
1049		if (error != 0)
1050			return (error);
1051		td->td_pflags |= TDP_OLDMASK;
1052		/*
1053		 * Make sure that ast() is called on return to
1054		 * usermode and TDP_OLDMASK is cleared, restoring old
1055		 * sigmask.
1056		 */
1057		ast_sched(td, TDA_SIGSUSPEND);
1058	}
1059	error = kern_select(td, nd, in, ou, ex, tvp, abi_nfdbits);
1060	return (error);
1061}
1062
1063#ifndef _SYS_SYSPROTO_H_
1064struct select_args {
1065	int	nd;
1066	fd_set	*in, *ou, *ex;
1067	struct	timeval *tv;
1068};
1069#endif
1070int
1071sys_select(struct thread *td, struct select_args *uap)
1072{
1073	struct timeval tv, *tvp;
1074	int error;
1075
1076	if (uap->tv != NULL) {
1077		error = copyin(uap->tv, &tv, sizeof(tv));
1078		if (error)
1079			return (error);
1080		tvp = &tv;
1081	} else
1082		tvp = NULL;
1083
1084	return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
1085	    NFDBITS));
1086}
1087
1088/*
1089 * In the unlikely case when user specified n greater then the last
1090 * open file descriptor, check that no bits are set after the last
1091 * valid fd.  We must return EBADF if any is set.
1092 *
1093 * There are applications that rely on the behaviour.
1094 *
1095 * nd is fd_nfiles.
1096 */
1097static int
1098select_check_badfd(fd_set *fd_in, int nd, int ndu, int abi_nfdbits)
1099{
1100	char *addr, *oaddr;
1101	int b, i, res;
1102	uint8_t bits;
1103
1104	if (nd >= ndu || fd_in == NULL)
1105		return (0);
1106
1107	oaddr = NULL;
1108	bits = 0; /* silence gcc */
1109	for (i = nd; i < ndu; i++) {
1110		b = i / NBBY;
1111#if BYTE_ORDER == LITTLE_ENDIAN
1112		addr = (char *)fd_in + b;
1113#else
1114		addr = (char *)fd_in;
1115		if (abi_nfdbits == NFDBITS) {
1116			addr += rounddown(b, sizeof(fd_mask)) +
1117			    sizeof(fd_mask) - 1 - b % sizeof(fd_mask);
1118		} else {
1119			addr += rounddown(b, sizeof(uint32_t)) +
1120			    sizeof(uint32_t) - 1 - b % sizeof(uint32_t);
1121		}
1122#endif
1123		if (addr != oaddr) {
1124			res = fubyte(addr);
1125			if (res == -1)
1126				return (EFAULT);
1127			oaddr = addr;
1128			bits = res;
1129		}
1130		if ((bits & (1 << (i % NBBY))) != 0)
1131			return (EBADF);
1132	}
1133	return (0);
1134}
1135
1136int
1137kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou,
1138    fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits)
1139{
1140	struct filedesc *fdp;
1141	/*
1142	 * The magic 2048 here is chosen to be just enough for FD_SETSIZE
1143	 * infds with the new FD_SETSIZE of 1024, and more than enough for
1144	 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE
1145	 * of 256.
1146	 */
1147	fd_mask s_selbits[howmany(2048, NFDBITS)];
1148	fd_mask *ibits[3], *obits[3], *selbits, *sbp;
1149	struct timeval rtv;
1150	sbintime_t asbt, precision, rsbt;
1151	u_int nbufbytes, ncpbytes, ncpubytes, nfdbits;
1152	int error, lf, ndu;
1153
1154	if (nd < 0)
1155		return (EINVAL);
1156	fdp = td->td_proc->p_fd;
1157	ndu = nd;
1158	lf = fdp->fd_nfiles;
1159	if (nd > lf)
1160		nd = lf;
1161
1162	error = select_check_badfd(fd_in, nd, ndu, abi_nfdbits);
1163	if (error != 0)
1164		return (error);
1165	error = select_check_badfd(fd_ou, nd, ndu, abi_nfdbits);
1166	if (error != 0)
1167		return (error);
1168	error = select_check_badfd(fd_ex, nd, ndu, abi_nfdbits);
1169	if (error != 0)
1170		return (error);
1171
1172	/*
1173	 * Allocate just enough bits for the non-null fd_sets.  Use the
1174	 * preallocated auto buffer if possible.
1175	 */
1176	nfdbits = roundup(nd, NFDBITS);
1177	ncpbytes = nfdbits / NBBY;
1178	ncpubytes = roundup(nd, abi_nfdbits) / NBBY;
1179	nbufbytes = 0;
1180	if (fd_in != NULL)
1181		nbufbytes += 2 * ncpbytes;
1182	if (fd_ou != NULL)
1183		nbufbytes += 2 * ncpbytes;
1184	if (fd_ex != NULL)
1185		nbufbytes += 2 * ncpbytes;
1186	if (nbufbytes <= sizeof s_selbits)
1187		selbits = &s_selbits[0];
1188	else
1189		selbits = malloc(nbufbytes, M_SELECT, M_WAITOK);
1190
1191	/*
1192	 * Assign pointers into the bit buffers and fetch the input bits.
1193	 * Put the output buffers together so that they can be bzeroed
1194	 * together.
1195	 */
1196	sbp = selbits;
1197#define	getbits(name, x) \
1198	do {								\
1199		if (name == NULL) {					\
1200			ibits[x] = NULL;				\
1201			obits[x] = NULL;				\
1202		} else {						\
1203			ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp;	\
1204			obits[x] = sbp;					\
1205			sbp += ncpbytes / sizeof *sbp;			\
1206			error = copyin(name, ibits[x], ncpubytes);	\
1207			if (error != 0)					\
1208				goto done;				\
1209			if (ncpbytes != ncpubytes)			\
1210				bzero((char *)ibits[x] + ncpubytes,	\
1211				    ncpbytes - ncpubytes);		\
1212		}							\
1213	} while (0)
1214	getbits(fd_in, 0);
1215	getbits(fd_ou, 1);
1216	getbits(fd_ex, 2);
1217#undef	getbits
1218
1219#if BYTE_ORDER == BIG_ENDIAN && defined(__LP64__)
1220	/*
1221	 * XXX: swizzle_fdset assumes that if abi_nfdbits != NFDBITS,
1222	 * we are running under 32-bit emulation. This should be more
1223	 * generic.
1224	 */
1225#define swizzle_fdset(bits)						\
1226	if (abi_nfdbits != NFDBITS && bits != NULL) {			\
1227		int i;							\
1228		for (i = 0; i < ncpbytes / sizeof *sbp; i++)		\
1229			bits[i] = (bits[i] >> 32) | (bits[i] << 32);	\
1230	}
1231#else
1232#define swizzle_fdset(bits)
1233#endif
1234
1235	/* Make sure the bit order makes it through an ABI transition */
1236	swizzle_fdset(ibits[0]);
1237	swizzle_fdset(ibits[1]);
1238	swizzle_fdset(ibits[2]);
1239
1240	if (nbufbytes != 0)
1241		bzero(selbits, nbufbytes / 2);
1242
1243	precision = 0;
1244	if (tvp != NULL) {
1245		rtv = *tvp;
1246		if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
1247		    rtv.tv_usec >= 1000000) {
1248			error = EINVAL;
1249			goto done;
1250		}
1251		if (!timevalisset(&rtv))
1252			asbt = 0;
1253		else if (rtv.tv_sec <= INT32_MAX) {
1254			rsbt = tvtosbt(rtv);
1255			precision = rsbt;
1256			precision >>= tc_precexp;
1257			if (TIMESEL(&asbt, rsbt))
1258				asbt += tc_tick_sbt;
1259			if (asbt <= SBT_MAX - rsbt)
1260				asbt += rsbt;
1261			else
1262				asbt = -1;
1263		} else
1264			asbt = -1;
1265	} else
1266		asbt = -1;
1267	seltdinit(td);
1268	/* Iterate until the timeout expires or descriptors become ready. */
1269	for (;;) {
1270		error = selscan(td, ibits, obits, nd);
1271		if (error || td->td_retval[0] != 0)
1272			break;
1273		error = seltdwait(td, asbt, precision);
1274		if (error)
1275			break;
1276		error = selrescan(td, ibits, obits);
1277		if (error || td->td_retval[0] != 0)
1278			break;
1279	}
1280	seltdclear(td);
1281
1282done:
1283	/* select is not restarted after signals... */
1284	if (error == ERESTART)
1285		error = EINTR;
1286	if (error == EWOULDBLOCK)
1287		error = 0;
1288
1289	/* swizzle bit order back, if necessary */
1290	swizzle_fdset(obits[0]);
1291	swizzle_fdset(obits[1]);
1292	swizzle_fdset(obits[2]);
1293#undef swizzle_fdset
1294
1295#define	putbits(name, x) \
1296	if (name && (error2 = copyout(obits[x], name, ncpubytes))) \
1297		error = error2;
1298	if (error == 0) {
1299		int error2;
1300
1301		putbits(fd_in, 0);
1302		putbits(fd_ou, 1);
1303		putbits(fd_ex, 2);
1304#undef putbits
1305	}
1306	if (selbits != &s_selbits[0])
1307		free(selbits, M_SELECT);
1308
1309	return (error);
1310}
1311/*
1312 * Convert a select bit set to poll flags.
1313 *
1314 * The backend always returns POLLHUP/POLLERR if appropriate and we
1315 * return this as a set bit in any set.
1316 */
1317static const int select_flags[3] = {
1318    POLLRDNORM | POLLHUP | POLLERR,
1319    POLLWRNORM | POLLHUP | POLLERR,
1320    POLLRDBAND | POLLERR
1321};
1322
1323/*
1324 * Compute the fo_poll flags required for a fd given by the index and
1325 * bit position in the fd_mask array.
1326 */
1327static __inline int
1328selflags(fd_mask **ibits, int idx, fd_mask bit)
1329{
1330	int flags;
1331	int msk;
1332
1333	flags = 0;
1334	for (msk = 0; msk < 3; msk++) {
1335		if (ibits[msk] == NULL)
1336			continue;
1337		if ((ibits[msk][idx] & bit) == 0)
1338			continue;
1339		flags |= select_flags[msk];
1340	}
1341	return (flags);
1342}
1343
1344/*
1345 * Set the appropriate output bits given a mask of fired events and the
1346 * input bits originally requested.
1347 */
1348static __inline int
1349selsetbits(fd_mask **ibits, fd_mask **obits, int idx, fd_mask bit, int events)
1350{
1351	int msk;
1352	int n;
1353
1354	n = 0;
1355	for (msk = 0; msk < 3; msk++) {
1356		if ((events & select_flags[msk]) == 0)
1357			continue;
1358		if (ibits[msk] == NULL)
1359			continue;
1360		if ((ibits[msk][idx] & bit) == 0)
1361			continue;
1362		/*
1363		 * XXX Check for a duplicate set.  This can occur because a
1364		 * socket calls selrecord() twice for each poll() call
1365		 * resulting in two selfds per real fd.  selrescan() will
1366		 * call selsetbits twice as a result.
1367		 */
1368		if ((obits[msk][idx] & bit) != 0)
1369			continue;
1370		obits[msk][idx] |= bit;
1371		n++;
1372	}
1373
1374	return (n);
1375}
1376
1377/*
1378 * Traverse the list of fds attached to this thread's seltd and check for
1379 * completion.
1380 */
1381static int
1382selrescan(struct thread *td, fd_mask **ibits, fd_mask **obits)
1383{
1384	struct filedesc *fdp;
1385	struct selinfo *si;
1386	struct seltd *stp;
1387	struct selfd *sfp;
1388	struct selfd *sfn;
1389	struct file *fp;
1390	fd_mask bit;
1391	int fd, ev, n, idx;
1392	int error;
1393	bool only_user;
1394
1395	fdp = td->td_proc->p_fd;
1396	stp = td->td_sel;
1397	n = 0;
1398	only_user = FILEDESC_IS_ONLY_USER(fdp);
1399	STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1400		fd = (int)(uintptr_t)sfp->sf_cookie;
1401		si = sfp->sf_si;
1402		selfdfree(stp, sfp);
1403		/* If the selinfo wasn't cleared the event didn't fire. */
1404		if (si != NULL)
1405			continue;
1406		if (only_user)
1407			error = fget_only_user(fdp, fd, &cap_event_rights, &fp);
1408		else
1409			error = fget_unlocked(td, fd, &cap_event_rights, &fp);
1410		if (__predict_false(error != 0))
1411			return (error);
1412		idx = fd / NFDBITS;
1413		bit = (fd_mask)1 << (fd % NFDBITS);
1414		ev = fo_poll(fp, selflags(ibits, idx, bit), td->td_ucred, td);
1415		if (only_user)
1416			fput_only_user(fdp, fp);
1417		else
1418			fdrop(fp, td);
1419		if (ev != 0)
1420			n += selsetbits(ibits, obits, idx, bit, ev);
1421	}
1422	stp->st_flags = 0;
1423	td->td_retval[0] = n;
1424	return (0);
1425}
1426
1427/*
1428 * Perform the initial filedescriptor scan and register ourselves with
1429 * each selinfo.
1430 */
1431static int
1432selscan(struct thread *td, fd_mask **ibits, fd_mask **obits, int nfd)
1433{
1434	struct filedesc *fdp;
1435	struct file *fp;
1436	fd_mask bit;
1437	int ev, flags, end, fd;
1438	int n, idx;
1439	int error;
1440	bool only_user;
1441
1442	fdp = td->td_proc->p_fd;
1443	n = 0;
1444	only_user = FILEDESC_IS_ONLY_USER(fdp);
1445	for (idx = 0, fd = 0; fd < nfd; idx++) {
1446		end = imin(fd + NFDBITS, nfd);
1447		for (bit = 1; fd < end; bit <<= 1, fd++) {
1448			/* Compute the list of events we're interested in. */
1449			flags = selflags(ibits, idx, bit);
1450			if (flags == 0)
1451				continue;
1452			if (only_user)
1453				error = fget_only_user(fdp, fd, &cap_event_rights, &fp);
1454			else
1455				error = fget_unlocked(td, fd, &cap_event_rights, &fp);
1456			if (__predict_false(error != 0))
1457				return (error);
1458			selfdalloc(td, (void *)(uintptr_t)fd);
1459			ev = fo_poll(fp, flags, td->td_ucred, td);
1460			if (only_user)
1461				fput_only_user(fdp, fp);
1462			else
1463				fdrop(fp, td);
1464			if (ev != 0)
1465				n += selsetbits(ibits, obits, idx, bit, ev);
1466		}
1467	}
1468
1469	td->td_retval[0] = n;
1470	return (0);
1471}
1472
1473int
1474sys_poll(struct thread *td, struct poll_args *uap)
1475{
1476	struct timespec ts, *tsp;
1477
1478	if (uap->timeout != INFTIM) {
1479		if (uap->timeout < 0)
1480			return (EINVAL);
1481		ts.tv_sec = uap->timeout / 1000;
1482		ts.tv_nsec = (uap->timeout % 1000) * 1000000;
1483		tsp = &ts;
1484	} else
1485		tsp = NULL;
1486
1487	return (kern_poll(td, uap->fds, uap->nfds, tsp, NULL));
1488}
1489
1490/*
1491 * kfds points to an array in the kernel.
1492 */
1493int
1494kern_poll_kfds(struct thread *td, struct pollfd *kfds, u_int nfds,
1495    struct timespec *tsp, sigset_t *uset)
1496{
1497	sbintime_t sbt, precision, tmp;
1498	time_t over;
1499	struct timespec ts;
1500	int error;
1501
1502	precision = 0;
1503	if (tsp != NULL) {
1504		if (!timespecvalid_interval(tsp))
1505			return (EINVAL);
1506		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
1507			sbt = 0;
1508		else {
1509			ts = *tsp;
1510			if (ts.tv_sec > INT32_MAX / 2) {
1511				over = ts.tv_sec - INT32_MAX / 2;
1512				ts.tv_sec -= over;
1513			} else
1514				over = 0;
1515			tmp = tstosbt(ts);
1516			precision = tmp;
1517			precision >>= tc_precexp;
1518			if (TIMESEL(&sbt, tmp))
1519				sbt += tc_tick_sbt;
1520			sbt += tmp;
1521		}
1522	} else
1523		sbt = -1;
1524
1525	if (uset != NULL) {
1526		error = kern_sigprocmask(td, SIG_SETMASK, uset,
1527		    &td->td_oldsigmask, 0);
1528		if (error)
1529			return (error);
1530		td->td_pflags |= TDP_OLDMASK;
1531		/*
1532		 * Make sure that ast() is called on return to
1533		 * usermode and TDP_OLDMASK is cleared, restoring old
1534		 * sigmask.
1535		 */
1536		ast_sched(td, TDA_SIGSUSPEND);
1537	}
1538
1539	seltdinit(td);
1540	/* Iterate until the timeout expires or descriptors become ready. */
1541	for (;;) {
1542		error = pollscan(td, kfds, nfds);
1543		if (error || td->td_retval[0] != 0)
1544			break;
1545		error = seltdwait(td, sbt, precision);
1546		if (error)
1547			break;
1548		error = pollrescan(td);
1549		if (error || td->td_retval[0] != 0)
1550			break;
1551	}
1552	seltdclear(td);
1553
1554	/* poll is not restarted after signals... */
1555	if (error == ERESTART)
1556		error = EINTR;
1557	if (error == EWOULDBLOCK)
1558		error = 0;
1559	return (error);
1560}
1561
1562int
1563sys_ppoll(struct thread *td, struct ppoll_args *uap)
1564{
1565	struct timespec ts, *tsp;
1566	sigset_t set, *ssp;
1567	int error;
1568
1569	if (uap->ts != NULL) {
1570		error = copyin(uap->ts, &ts, sizeof(ts));
1571		if (error)
1572			return (error);
1573		tsp = &ts;
1574	} else
1575		tsp = NULL;
1576	if (uap->set != NULL) {
1577		error = copyin(uap->set, &set, sizeof(set));
1578		if (error)
1579			return (error);
1580		ssp = &set;
1581	} else
1582		ssp = NULL;
1583	return (kern_poll(td, uap->fds, uap->nfds, tsp, ssp));
1584}
1585
1586/*
1587 * ufds points to an array in user space.
1588 */
1589int
1590kern_poll(struct thread *td, struct pollfd *ufds, u_int nfds,
1591    struct timespec *tsp, sigset_t *set)
1592{
1593	struct pollfd *kfds;
1594	struct pollfd stackfds[32];
1595	int error;
1596
1597	if (kern_poll_maxfds(nfds))
1598		return (EINVAL);
1599	if (nfds > nitems(stackfds))
1600		kfds = mallocarray(nfds, sizeof(*kfds), M_TEMP, M_WAITOK);
1601	else
1602		kfds = stackfds;
1603	error = copyin(ufds, kfds, nfds * sizeof(*kfds));
1604	if (error != 0)
1605		goto out;
1606
1607	error = kern_poll_kfds(td, kfds, nfds, tsp, set);
1608	if (error == 0)
1609		error = pollout(td, kfds, ufds, nfds);
1610#ifdef KTRACE
1611	if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY))
1612		ktrstructarray("pollfd", UIO_USERSPACE, ufds, nfds,
1613		    sizeof(*ufds));
1614#endif
1615
1616out:
1617	if (nfds > nitems(stackfds))
1618		free(kfds, M_TEMP);
1619	return (error);
1620}
1621
1622bool
1623kern_poll_maxfds(u_int nfds)
1624{
1625
1626	/*
1627	 * This is kinda bogus.  We have fd limits, but that is not
1628	 * really related to the size of the pollfd array.  Make sure
1629	 * we let the process use at least FD_SETSIZE entries and at
1630	 * least enough for the system-wide limits.  We want to be reasonably
1631	 * safe, but not overly restrictive.
1632	 */
1633	return (nfds > maxfilesperproc && nfds > FD_SETSIZE);
1634}
1635
1636static int
1637pollrescan(struct thread *td)
1638{
1639	struct seltd *stp;
1640	struct selfd *sfp;
1641	struct selfd *sfn;
1642	struct selinfo *si;
1643	struct filedesc *fdp;
1644	struct file *fp;
1645	struct pollfd *fd;
1646	int n, error;
1647	bool only_user;
1648
1649	n = 0;
1650	fdp = td->td_proc->p_fd;
1651	stp = td->td_sel;
1652	only_user = FILEDESC_IS_ONLY_USER(fdp);
1653	STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1654		fd = (struct pollfd *)sfp->sf_cookie;
1655		si = sfp->sf_si;
1656		selfdfree(stp, sfp);
1657		/* If the selinfo wasn't cleared the event didn't fire. */
1658		if (si != NULL)
1659			continue;
1660		if (only_user)
1661			error = fget_only_user(fdp, fd->fd, &cap_event_rights, &fp);
1662		else
1663			error = fget_unlocked(td, fd->fd, &cap_event_rights, &fp);
1664		if (__predict_false(error != 0)) {
1665			fd->revents = POLLNVAL;
1666			n++;
1667			continue;
1668		}
1669		/*
1670		 * Note: backend also returns POLLHUP and
1671		 * POLLERR if appropriate.
1672		 */
1673		fd->revents = fo_poll(fp, fd->events, td->td_ucred, td);
1674		if (only_user)
1675			fput_only_user(fdp, fp);
1676		else
1677			fdrop(fp, td);
1678		if (fd->revents != 0)
1679			n++;
1680	}
1681	stp->st_flags = 0;
1682	td->td_retval[0] = n;
1683	return (0);
1684}
1685
1686static int
1687pollout(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd)
1688{
1689	int error = 0;
1690	u_int i = 0;
1691	u_int n = 0;
1692
1693	for (i = 0; i < nfd; i++) {
1694		error = copyout(&fds->revents, &ufds->revents,
1695		    sizeof(ufds->revents));
1696		if (error)
1697			return (error);
1698		if (fds->revents != 0)
1699			n++;
1700		fds++;
1701		ufds++;
1702	}
1703	td->td_retval[0] = n;
1704	return (0);
1705}
1706
1707static int
1708pollscan(struct thread *td, struct pollfd *fds, u_int nfd)
1709{
1710	struct filedesc *fdp;
1711	struct file *fp;
1712	int i, n, error;
1713	bool only_user;
1714
1715	n = 0;
1716	fdp = td->td_proc->p_fd;
1717	only_user = FILEDESC_IS_ONLY_USER(fdp);
1718	for (i = 0; i < nfd; i++, fds++) {
1719		if (fds->fd < 0) {
1720			fds->revents = 0;
1721			continue;
1722		}
1723		if (only_user)
1724			error = fget_only_user(fdp, fds->fd, &cap_event_rights, &fp);
1725		else
1726			error = fget_unlocked(td, fds->fd, &cap_event_rights, &fp);
1727		if (__predict_false(error != 0)) {
1728			fds->revents = POLLNVAL;
1729			n++;
1730			continue;
1731		}
1732		/*
1733		 * Note: backend also returns POLLHUP and
1734		 * POLLERR if appropriate.
1735		 */
1736		selfdalloc(td, fds);
1737		fds->revents = fo_poll(fp, fds->events,
1738		    td->td_ucred, td);
1739		if (only_user)
1740			fput_only_user(fdp, fp);
1741		else
1742			fdrop(fp, td);
1743		/*
1744		 * POSIX requires POLLOUT to be never
1745		 * set simultaneously with POLLHUP.
1746		 */
1747		if ((fds->revents & POLLHUP) != 0)
1748			fds->revents &= ~POLLOUT;
1749
1750		if (fds->revents != 0)
1751			n++;
1752	}
1753	td->td_retval[0] = n;
1754	return (0);
1755}
1756
1757/*
1758 * XXX This was created specifically to support netncp and netsmb.  This
1759 * allows the caller to specify a socket to wait for events on.  It returns
1760 * 0 if any events matched and an error otherwise.  There is no way to
1761 * determine which events fired.
1762 */
1763int
1764selsocket(struct socket *so, int events, struct timeval *tvp, struct thread *td)
1765{
1766	struct timeval rtv;
1767	sbintime_t asbt, precision, rsbt;
1768	int error;
1769
1770	precision = 0;	/* stupid gcc! */
1771	if (tvp != NULL) {
1772		rtv = *tvp;
1773		if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
1774		    rtv.tv_usec >= 1000000)
1775			return (EINVAL);
1776		if (!timevalisset(&rtv))
1777			asbt = 0;
1778		else if (rtv.tv_sec <= INT32_MAX) {
1779			rsbt = tvtosbt(rtv);
1780			precision = rsbt;
1781			precision >>= tc_precexp;
1782			if (TIMESEL(&asbt, rsbt))
1783				asbt += tc_tick_sbt;
1784			if (asbt <= SBT_MAX - rsbt)
1785				asbt += rsbt;
1786			else
1787				asbt = -1;
1788		} else
1789			asbt = -1;
1790	} else
1791		asbt = -1;
1792	seltdinit(td);
1793	/*
1794	 * Iterate until the timeout expires or the socket becomes ready.
1795	 */
1796	for (;;) {
1797		selfdalloc(td, NULL);
1798		if (sopoll(so, events, NULL, td) != 0) {
1799			error = 0;
1800			break;
1801		}
1802		error = seltdwait(td, asbt, precision);
1803		if (error)
1804			break;
1805	}
1806	seltdclear(td);
1807	/* XXX Duplicates ncp/smb behavior. */
1808	if (error == ERESTART)
1809		error = 0;
1810	return (error);
1811}
1812
1813/*
1814 * Preallocate two selfds associated with 'cookie'.  Some fo_poll routines
1815 * have two select sets, one for read and another for write.
1816 */
1817static void
1818selfdalloc(struct thread *td, void *cookie)
1819{
1820	struct seltd *stp;
1821
1822	stp = td->td_sel;
1823	if (stp->st_free1 == NULL)
1824		stp->st_free1 = malloc(sizeof(*stp->st_free1), M_SELFD, M_WAITOK|M_ZERO);
1825	stp->st_free1->sf_td = stp;
1826	stp->st_free1->sf_cookie = cookie;
1827	if (stp->st_free2 == NULL)
1828		stp->st_free2 = malloc(sizeof(*stp->st_free2), M_SELFD, M_WAITOK|M_ZERO);
1829	stp->st_free2->sf_td = stp;
1830	stp->st_free2->sf_cookie = cookie;
1831}
1832
1833static void
1834selfdfree(struct seltd *stp, struct selfd *sfp)
1835{
1836	STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link);
1837	/*
1838	 * Paired with doselwakeup.
1839	 */
1840	if (atomic_load_acq_ptr((uintptr_t *)&sfp->sf_si) != (uintptr_t)NULL) {
1841		mtx_lock(sfp->sf_mtx);
1842		if (sfp->sf_si != NULL) {
1843			TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads);
1844		}
1845		mtx_unlock(sfp->sf_mtx);
1846	}
1847	free(sfp, M_SELFD);
1848}
1849
1850/* Drain the waiters tied to all the selfd belonging the specified selinfo. */
1851void
1852seldrain(struct selinfo *sip)
1853{
1854
1855	/*
1856	 * This feature is already provided by doselwakeup(), thus it is
1857	 * enough to go for it.
1858	 * Eventually, the context, should take care to avoid races
1859	 * between thread calling select()/poll() and file descriptor
1860	 * detaching, but, again, the races are just the same as
1861	 * selwakeup().
1862	 */
1863        doselwakeup(sip, -1);
1864}
1865
1866/*
1867 * Record a select request.
1868 */
1869void
1870selrecord(struct thread *selector, struct selinfo *sip)
1871{
1872	struct selfd *sfp;
1873	struct seltd *stp;
1874	struct mtx *mtxp;
1875
1876	stp = selector->td_sel;
1877	/*
1878	 * Don't record when doing a rescan.
1879	 */
1880	if (stp->st_flags & SELTD_RESCAN)
1881		return;
1882	/*
1883	 * Grab one of the preallocated descriptors.
1884	 */
1885	sfp = NULL;
1886	if ((sfp = stp->st_free1) != NULL)
1887		stp->st_free1 = NULL;
1888	else if ((sfp = stp->st_free2) != NULL)
1889		stp->st_free2 = NULL;
1890	else
1891		panic("selrecord: No free selfd on selq");
1892	mtxp = sip->si_mtx;
1893	if (mtxp == NULL)
1894		mtxp = mtx_pool_find(mtxpool_select, sip);
1895	/*
1896	 * Initialize the sfp and queue it in the thread.
1897	 */
1898	sfp->sf_si = sip;
1899	sfp->sf_mtx = mtxp;
1900	STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link);
1901	/*
1902	 * Now that we've locked the sip, check for initialization.
1903	 */
1904	mtx_lock(mtxp);
1905	if (sip->si_mtx == NULL) {
1906		sip->si_mtx = mtxp;
1907		TAILQ_INIT(&sip->si_tdlist);
1908	}
1909	/*
1910	 * Add this thread to the list of selfds listening on this selinfo.
1911	 */
1912	TAILQ_INSERT_TAIL(&sip->si_tdlist, sfp, sf_threads);
1913	mtx_unlock(sip->si_mtx);
1914}
1915
1916/* Wake up a selecting thread. */
1917void
1918selwakeup(struct selinfo *sip)
1919{
1920	doselwakeup(sip, -1);
1921}
1922
1923/* Wake up a selecting thread, and set its priority. */
1924void
1925selwakeuppri(struct selinfo *sip, int pri)
1926{
1927	doselwakeup(sip, pri);
1928}
1929
1930/*
1931 * Do a wakeup when a selectable event occurs.
1932 */
1933static void
1934doselwakeup(struct selinfo *sip, int pri)
1935{
1936	struct selfd *sfp;
1937	struct selfd *sfn;
1938	struct seltd *stp;
1939
1940	/* If it's not initialized there can't be any waiters. */
1941	if (sip->si_mtx == NULL)
1942		return;
1943	/*
1944	 * Locking the selinfo locks all selfds associated with it.
1945	 */
1946	mtx_lock(sip->si_mtx);
1947	TAILQ_FOREACH_SAFE(sfp, &sip->si_tdlist, sf_threads, sfn) {
1948		/*
1949		 * Once we remove this sfp from the list and clear the
1950		 * sf_si seltdclear will know to ignore this si.
1951		 */
1952		TAILQ_REMOVE(&sip->si_tdlist, sfp, sf_threads);
1953		stp = sfp->sf_td;
1954		mtx_lock(&stp->st_mtx);
1955		stp->st_flags |= SELTD_PENDING;
1956		cv_broadcastpri(&stp->st_wait, pri);
1957		mtx_unlock(&stp->st_mtx);
1958		/*
1959		 * Paired with selfdfree.
1960		 *
1961		 * Storing this only after the wakeup provides an invariant that
1962		 * stp is not used after selfdfree returns.
1963		 */
1964		atomic_store_rel_ptr((uintptr_t *)&sfp->sf_si, (uintptr_t)NULL);
1965	}
1966	mtx_unlock(sip->si_mtx);
1967}
1968
1969static void
1970seltdinit(struct thread *td)
1971{
1972	struct seltd *stp;
1973
1974	stp = td->td_sel;
1975	if (stp != NULL) {
1976		MPASS(stp->st_flags == 0);
1977		MPASS(STAILQ_EMPTY(&stp->st_selq));
1978		return;
1979	}
1980	stp = malloc(sizeof(*stp), M_SELECT, M_WAITOK|M_ZERO);
1981	mtx_init(&stp->st_mtx, "sellck", NULL, MTX_DEF);
1982	cv_init(&stp->st_wait, "select");
1983	stp->st_flags = 0;
1984	STAILQ_INIT(&stp->st_selq);
1985	td->td_sel = stp;
1986}
1987
1988static int
1989seltdwait(struct thread *td, sbintime_t sbt, sbintime_t precision)
1990{
1991	struct seltd *stp;
1992	int error;
1993
1994	stp = td->td_sel;
1995	/*
1996	 * An event of interest may occur while we do not hold the seltd
1997	 * locked so check the pending flag before we sleep.
1998	 */
1999	mtx_lock(&stp->st_mtx);
2000	/*
2001	 * Any further calls to selrecord will be a rescan.
2002	 */
2003	stp->st_flags |= SELTD_RESCAN;
2004	if (stp->st_flags & SELTD_PENDING) {
2005		mtx_unlock(&stp->st_mtx);
2006		return (0);
2007	}
2008	if (sbt == 0)
2009		error = EWOULDBLOCK;
2010	else if (sbt != -1)
2011		error = cv_timedwait_sig_sbt(&stp->st_wait, &stp->st_mtx,
2012		    sbt, precision, C_ABSOLUTE);
2013	else
2014		error = cv_wait_sig(&stp->st_wait, &stp->st_mtx);
2015	mtx_unlock(&stp->st_mtx);
2016
2017	return (error);
2018}
2019
2020void
2021seltdfini(struct thread *td)
2022{
2023	struct seltd *stp;
2024
2025	stp = td->td_sel;
2026	if (stp == NULL)
2027		return;
2028	MPASS(stp->st_flags == 0);
2029	MPASS(STAILQ_EMPTY(&stp->st_selq));
2030	if (stp->st_free1)
2031		free(stp->st_free1, M_SELFD);
2032	if (stp->st_free2)
2033		free(stp->st_free2, M_SELFD);
2034	td->td_sel = NULL;
2035	cv_destroy(&stp->st_wait);
2036	mtx_destroy(&stp->st_mtx);
2037	free(stp, M_SELECT);
2038}
2039
2040/*
2041 * Remove the references to the thread from all of the objects we were
2042 * polling.
2043 */
2044static void
2045seltdclear(struct thread *td)
2046{
2047	struct seltd *stp;
2048	struct selfd *sfp;
2049	struct selfd *sfn;
2050
2051	stp = td->td_sel;
2052	STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn)
2053		selfdfree(stp, sfp);
2054	stp->st_flags = 0;
2055}
2056
2057static void selectinit(void *);
2058SYSINIT(select, SI_SUB_SYSCALLS, SI_ORDER_ANY, selectinit, NULL);
2059static void
2060selectinit(void *dummy __unused)
2061{
2062
2063	mtxpool_select = mtx_pool_create("select mtxpool", 128, MTX_DEF);
2064}
2065
2066/*
2067 * Set up a syscall return value that follows the convention specified for
2068 * posix_* functions.
2069 */
2070int
2071kern_posix_error(struct thread *td, int error)
2072{
2073
2074	if (error <= 0)
2075		return (error);
2076	td->td_errno = error;
2077	td->td_pflags |= TDP_NERRNO;
2078	td->td_retval[0] = error;
2079	return (0);
2080}
2081
2082int
2083kcmp_cmp(uintptr_t a, uintptr_t b)
2084{
2085	if (a == b)
2086		return (0);
2087	else if (a < b)
2088		return (1);
2089	return (2);
2090}
2091
2092static int
2093kcmp_pget(struct thread *td, pid_t pid, struct proc **pp)
2094{
2095	int error;
2096
2097	if (pid == td->td_proc->p_pid) {
2098		*pp = td->td_proc;
2099		return (0);
2100	}
2101	error = pget(pid, PGET_NOTID | PGET_CANDEBUG | PGET_NOTWEXIT |
2102	    PGET_HOLD, pp);
2103	MPASS(*pp != td->td_proc);
2104	return (error);
2105}
2106
2107int
2108kern_kcmp(struct thread *td, pid_t pid1, pid_t pid2, int type,
2109    uintptr_t idx1, uintptr_t idx2)
2110{
2111	struct proc *p1, *p2;
2112	struct file *fp1, *fp2;
2113	int error, res;
2114
2115	res = -1;
2116	p1 = p2 = NULL;
2117	error = kcmp_pget(td, pid1, &p1);
2118	if (error == 0)
2119		error = kcmp_pget(td, pid2, &p2);
2120	if (error != 0)
2121		goto out;
2122
2123	switch (type) {
2124	case KCMP_FILE:
2125	case KCMP_FILEOBJ:
2126		error = fget_remote(td, p1, idx1, &fp1);
2127		if (error == 0) {
2128			error = fget_remote(td, p2, idx2, &fp2);
2129			if (error == 0) {
2130				if (type == KCMP_FILEOBJ)
2131					res = fo_cmp(fp1, fp2, td);
2132				else
2133					res = kcmp_cmp((uintptr_t)fp1,
2134					    (uintptr_t)fp2);
2135				fdrop(fp2, td);
2136			}
2137			fdrop(fp1, td);
2138		}
2139		break;
2140	case KCMP_FILES:
2141		res = kcmp_cmp((uintptr_t)p1->p_fd, (uintptr_t)p2->p_fd);
2142		break;
2143	case KCMP_SIGHAND:
2144		res = kcmp_cmp((uintptr_t)p1->p_sigacts,
2145		    (uintptr_t)p2->p_sigacts);
2146		break;
2147	case KCMP_VM:
2148		res = kcmp_cmp((uintptr_t)p1->p_vmspace,
2149		    (uintptr_t)p2->p_vmspace);
2150		break;
2151	default:
2152		error = EINVAL;
2153		break;
2154	}
2155
2156out:
2157	if (p1 != NULL && p1 != td->td_proc)
2158		PRELE(p1);
2159	if (p2 != NULL && p2 != td->td_proc)
2160		PRELE(p2);
2161
2162	td->td_retval[0] = res;
2163	return (error);
2164}
2165
2166int
2167sys_kcmp(struct thread *td, struct kcmp_args *uap)
2168{
2169	return (kern_kcmp(td, uap->pid1, uap->pid2, uap->type,
2170	    uap->idx1, uap->idx2));
2171}
2172
2173int
2174file_kcmp_generic(struct file *fp1, struct file *fp2, struct thread *td)
2175{
2176	if (fp1->f_type != fp2->f_type)
2177		return (3);
2178	return (kcmp_cmp((uintptr_t)fp1->f_data, (uintptr_t)fp2->f_data));
2179}
2180