kern_event.c revision 280258
1/*-
2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
4 * Copyright (c) 2009 Apple, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/10/sys/kern/kern_event.c 280258 2015-03-19 13:37:36Z rwatson $");
31
32#include "opt_ktrace.h"
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/capsicum.h>
37#include <sys/kernel.h>
38#include <sys/lock.h>
39#include <sys/mutex.h>
40#include <sys/rwlock.h>
41#include <sys/proc.h>
42#include <sys/malloc.h>
43#include <sys/unistd.h>
44#include <sys/file.h>
45#include <sys/filedesc.h>
46#include <sys/filio.h>
47#include <sys/fcntl.h>
48#include <sys/kthread.h>
49#include <sys/selinfo.h>
50#include <sys/stdatomic.h>
51#include <sys/queue.h>
52#include <sys/event.h>
53#include <sys/eventvar.h>
54#include <sys/poll.h>
55#include <sys/protosw.h>
56#include <sys/sigio.h>
57#include <sys/signalvar.h>
58#include <sys/socket.h>
59#include <sys/socketvar.h>
60#include <sys/stat.h>
61#include <sys/sysctl.h>
62#include <sys/sysproto.h>
63#include <sys/syscallsubr.h>
64#include <sys/taskqueue.h>
65#include <sys/uio.h>
66#ifdef KTRACE
67#include <sys/ktrace.h>
68#endif
69
70#include <vm/uma.h>
71
72static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
73
74/*
75 * This lock is used if multiple kq locks are required.  This possibly
76 * should be made into a per proc lock.
77 */
78static struct mtx	kq_global;
79MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
80#define KQ_GLOBAL_LOCK(lck, haslck)	do {	\
81	if (!haslck)				\
82		mtx_lock(lck);			\
83	haslck = 1;				\
84} while (0)
85#define KQ_GLOBAL_UNLOCK(lck, haslck)	do {	\
86	if (haslck)				\
87		mtx_unlock(lck);			\
88	haslck = 0;				\
89} while (0)
90
91TASKQUEUE_DEFINE_THREAD(kqueue);
92
93static int	kevent_copyout(void *arg, struct kevent *kevp, int count);
94static int	kevent_copyin(void *arg, struct kevent *kevp, int count);
95static int	kqueue_register(struct kqueue *kq, struct kevent *kev,
96		    struct thread *td, int waitok);
97static int	kqueue_acquire(struct file *fp, struct kqueue **kqp);
98static void	kqueue_release(struct kqueue *kq, int locked);
99static int	kqueue_expand(struct kqueue *kq, struct filterops *fops,
100		    uintptr_t ident, int waitok);
101static void	kqueue_task(void *arg, int pending);
102static int	kqueue_scan(struct kqueue *kq, int maxevents,
103		    struct kevent_copyops *k_ops,
104		    const struct timespec *timeout,
105		    struct kevent *keva, struct thread *td);
106static void 	kqueue_wakeup(struct kqueue *kq);
107static struct filterops *kqueue_fo_find(int filt);
108static void	kqueue_fo_release(int filt);
109
110static fo_rdwr_t	kqueue_read;
111static fo_rdwr_t	kqueue_write;
112static fo_truncate_t	kqueue_truncate;
113static fo_ioctl_t	kqueue_ioctl;
114static fo_poll_t	kqueue_poll;
115static fo_kqfilter_t	kqueue_kqfilter;
116static fo_stat_t	kqueue_stat;
117static fo_close_t	kqueue_close;
118
119static struct fileops kqueueops = {
120	.fo_read = kqueue_read,
121	.fo_write = kqueue_write,
122	.fo_truncate = kqueue_truncate,
123	.fo_ioctl = kqueue_ioctl,
124	.fo_poll = kqueue_poll,
125	.fo_kqfilter = kqueue_kqfilter,
126	.fo_stat = kqueue_stat,
127	.fo_close = kqueue_close,
128	.fo_chmod = invfo_chmod,
129	.fo_chown = invfo_chown,
130	.fo_sendfile = invfo_sendfile,
131};
132
133static int 	knote_attach(struct knote *kn, struct kqueue *kq);
134static void 	knote_drop(struct knote *kn, struct thread *td);
135static void 	knote_enqueue(struct knote *kn);
136static void 	knote_dequeue(struct knote *kn);
137static void 	knote_init(void);
138static struct 	knote *knote_alloc(int waitok);
139static void 	knote_free(struct knote *kn);
140
141static void	filt_kqdetach(struct knote *kn);
142static int	filt_kqueue(struct knote *kn, long hint);
143static int	filt_procattach(struct knote *kn);
144static void	filt_procdetach(struct knote *kn);
145static int	filt_proc(struct knote *kn, long hint);
146static int	filt_fileattach(struct knote *kn);
147static void	filt_timerexpire(void *knx);
148static int	filt_timerattach(struct knote *kn);
149static void	filt_timerdetach(struct knote *kn);
150static int	filt_timer(struct knote *kn, long hint);
151static int	filt_userattach(struct knote *kn);
152static void	filt_userdetach(struct knote *kn);
153static int	filt_user(struct knote *kn, long hint);
154static void	filt_usertouch(struct knote *kn, struct kevent *kev,
155		    u_long type);
156
157static struct filterops file_filtops = {
158	.f_isfd = 1,
159	.f_attach = filt_fileattach,
160};
161static struct filterops kqread_filtops = {
162	.f_isfd = 1,
163	.f_detach = filt_kqdetach,
164	.f_event = filt_kqueue,
165};
166/* XXX - move to kern_proc.c?  */
167static struct filterops proc_filtops = {
168	.f_isfd = 0,
169	.f_attach = filt_procattach,
170	.f_detach = filt_procdetach,
171	.f_event = filt_proc,
172};
173static struct filterops timer_filtops = {
174	.f_isfd = 0,
175	.f_attach = filt_timerattach,
176	.f_detach = filt_timerdetach,
177	.f_event = filt_timer,
178};
179static struct filterops user_filtops = {
180	.f_attach = filt_userattach,
181	.f_detach = filt_userdetach,
182	.f_event = filt_user,
183	.f_touch = filt_usertouch,
184};
185
186static uma_zone_t	knote_zone;
187static atomic_uint	kq_ncallouts = ATOMIC_VAR_INIT(0);
188static unsigned int 	kq_calloutmax = 4 * 1024;
189SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
190    &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
191
192/* XXX - ensure not KN_INFLUX?? */
193#define KNOTE_ACTIVATE(kn, islock) do { 				\
194	if ((islock))							\
195		mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);		\
196	else								\
197		KQ_LOCK((kn)->kn_kq);					\
198	(kn)->kn_status |= KN_ACTIVE;					\
199	if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
200		knote_enqueue((kn));					\
201	if (!(islock))							\
202		KQ_UNLOCK((kn)->kn_kq);					\
203} while(0)
204#define KQ_LOCK(kq) do {						\
205	mtx_lock(&(kq)->kq_lock);					\
206} while (0)
207#define KQ_FLUX_WAKEUP(kq) do {						\
208	if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {		\
209		(kq)->kq_state &= ~KQ_FLUXWAIT;				\
210		wakeup((kq));						\
211	}								\
212} while (0)
213#define KQ_UNLOCK_FLUX(kq) do {						\
214	KQ_FLUX_WAKEUP(kq);						\
215	mtx_unlock(&(kq)->kq_lock);					\
216} while (0)
217#define KQ_UNLOCK(kq) do {						\
218	mtx_unlock(&(kq)->kq_lock);					\
219} while (0)
220#define KQ_OWNED(kq) do {						\
221	mtx_assert(&(kq)->kq_lock, MA_OWNED);				\
222} while (0)
223#define KQ_NOTOWNED(kq) do {						\
224	mtx_assert(&(kq)->kq_lock, MA_NOTOWNED);			\
225} while (0)
226#define KN_LIST_LOCK(kn) do {						\
227	if (kn->kn_knlist != NULL)					\
228		kn->kn_knlist->kl_lock(kn->kn_knlist->kl_lockarg);	\
229} while (0)
230#define KN_LIST_UNLOCK(kn) do {						\
231	if (kn->kn_knlist != NULL) 					\
232		kn->kn_knlist->kl_unlock(kn->kn_knlist->kl_lockarg);	\
233} while (0)
234#define	KNL_ASSERT_LOCK(knl, islocked) do {				\
235	if (islocked)							\
236		KNL_ASSERT_LOCKED(knl);				\
237	else								\
238		KNL_ASSERT_UNLOCKED(knl);				\
239} while (0)
240#ifdef INVARIANTS
241#define	KNL_ASSERT_LOCKED(knl) do {					\
242	knl->kl_assert_locked((knl)->kl_lockarg);			\
243} while (0)
244#define	KNL_ASSERT_UNLOCKED(knl) do {					\
245	knl->kl_assert_unlocked((knl)->kl_lockarg);			\
246} while (0)
247#else /* !INVARIANTS */
248#define	KNL_ASSERT_LOCKED(knl) do {} while(0)
249#define	KNL_ASSERT_UNLOCKED(knl) do {} while (0)
250#endif /* INVARIANTS */
251
252#define	KN_HASHSIZE		64		/* XXX should be tunable */
253#define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
254
255static int
256filt_nullattach(struct knote *kn)
257{
258
259	return (ENXIO);
260};
261
262struct filterops null_filtops = {
263	.f_isfd = 0,
264	.f_attach = filt_nullattach,
265};
266
267/* XXX - make SYSINIT to add these, and move into respective modules. */
268extern struct filterops sig_filtops;
269extern struct filterops fs_filtops;
270
271/*
272 * Table for for all system-defined filters.
273 */
274static struct mtx	filterops_lock;
275MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
276	MTX_DEF);
277static struct {
278	struct filterops *for_fop;
279	int for_refcnt;
280} sysfilt_ops[EVFILT_SYSCOUNT] = {
281	{ &file_filtops },			/* EVFILT_READ */
282	{ &file_filtops },			/* EVFILT_WRITE */
283	{ &null_filtops },			/* EVFILT_AIO */
284	{ &file_filtops },			/* EVFILT_VNODE */
285	{ &proc_filtops },			/* EVFILT_PROC */
286	{ &sig_filtops },			/* EVFILT_SIGNAL */
287	{ &timer_filtops },			/* EVFILT_TIMER */
288	{ &null_filtops },			/* former EVFILT_NETDEV */
289	{ &fs_filtops },			/* EVFILT_FS */
290	{ &null_filtops },			/* EVFILT_LIO */
291	{ &user_filtops },			/* EVFILT_USER */
292};
293
294/*
295 * Simple redirection for all cdevsw style objects to call their fo_kqfilter
296 * method.
297 */
298static int
299filt_fileattach(struct knote *kn)
300{
301
302	return (fo_kqfilter(kn->kn_fp, kn));
303}
304
305/*ARGSUSED*/
306static int
307kqueue_kqfilter(struct file *fp, struct knote *kn)
308{
309	struct kqueue *kq = kn->kn_fp->f_data;
310
311	if (kn->kn_filter != EVFILT_READ)
312		return (EINVAL);
313
314	kn->kn_status |= KN_KQUEUE;
315	kn->kn_fop = &kqread_filtops;
316	knlist_add(&kq->kq_sel.si_note, kn, 0);
317
318	return (0);
319}
320
321static void
322filt_kqdetach(struct knote *kn)
323{
324	struct kqueue *kq = kn->kn_fp->f_data;
325
326	knlist_remove(&kq->kq_sel.si_note, kn, 0);
327}
328
329/*ARGSUSED*/
330static int
331filt_kqueue(struct knote *kn, long hint)
332{
333	struct kqueue *kq = kn->kn_fp->f_data;
334
335	kn->kn_data = kq->kq_count;
336	return (kn->kn_data > 0);
337}
338
339/* XXX - move to kern_proc.c?  */
340static int
341filt_procattach(struct knote *kn)
342{
343	struct proc *p;
344	int immediate;
345	int error;
346
347	immediate = 0;
348	p = pfind(kn->kn_id);
349	if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
350		p = zpfind(kn->kn_id);
351		immediate = 1;
352	} else if (p != NULL && (p->p_flag & P_WEXIT)) {
353		immediate = 1;
354	}
355
356	if (p == NULL)
357		return (ESRCH);
358	if ((error = p_cansee(curthread, p))) {
359		PROC_UNLOCK(p);
360		return (error);
361	}
362
363	kn->kn_ptr.p_proc = p;
364	kn->kn_flags |= EV_CLEAR;		/* automatically set */
365
366	/*
367	 * internal flag indicating registration done by kernel
368	 */
369	if (kn->kn_flags & EV_FLAG1) {
370		kn->kn_data = kn->kn_sdata;		/* ppid */
371		kn->kn_fflags = NOTE_CHILD;
372		kn->kn_flags &= ~EV_FLAG1;
373	}
374
375	if (immediate == 0)
376		knlist_add(&p->p_klist, kn, 1);
377
378	/*
379	 * Immediately activate any exit notes if the target process is a
380	 * zombie.  This is necessary to handle the case where the target
381	 * process, e.g. a child, dies before the kevent is registered.
382	 */
383	if (immediate && filt_proc(kn, NOTE_EXIT))
384		KNOTE_ACTIVATE(kn, 0);
385
386	PROC_UNLOCK(p);
387
388	return (0);
389}
390
391/*
392 * The knote may be attached to a different process, which may exit,
393 * leaving nothing for the knote to be attached to.  So when the process
394 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
395 * it will be deleted when read out.  However, as part of the knote deletion,
396 * this routine is called, so a check is needed to avoid actually performing
397 * a detach, because the original process does not exist any more.
398 */
399/* XXX - move to kern_proc.c?  */
400static void
401filt_procdetach(struct knote *kn)
402{
403	struct proc *p;
404
405	p = kn->kn_ptr.p_proc;
406	knlist_remove(&p->p_klist, kn, 0);
407	kn->kn_ptr.p_proc = NULL;
408}
409
410/* XXX - move to kern_proc.c?  */
411static int
412filt_proc(struct knote *kn, long hint)
413{
414	struct proc *p = kn->kn_ptr.p_proc;
415	u_int event;
416
417	/*
418	 * mask off extra data
419	 */
420	event = (u_int)hint & NOTE_PCTRLMASK;
421
422	/*
423	 * if the user is interested in this event, record it.
424	 */
425	if (kn->kn_sfflags & event)
426		kn->kn_fflags |= event;
427
428	/*
429	 * process is gone, so flag the event as finished.
430	 */
431	if (event == NOTE_EXIT) {
432		if (!(kn->kn_status & KN_DETACHED))
433			knlist_remove_inevent(&p->p_klist, kn);
434		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
435		kn->kn_ptr.p_proc = NULL;
436		if (kn->kn_fflags & NOTE_EXIT)
437			kn->kn_data = p->p_xstat;
438		if (kn->kn_fflags == 0)
439			kn->kn_flags |= EV_DROP;
440		return (1);
441	}
442
443	return (kn->kn_fflags != 0);
444}
445
446/*
447 * Called when the process forked. It mostly does the same as the
448 * knote(), activating all knotes registered to be activated when the
449 * process forked. Additionally, for each knote attached to the
450 * parent, check whether user wants to track the new process. If so
451 * attach a new knote to it, and immediately report an event with the
452 * child's pid.
453 */
454void
455knote_fork(struct knlist *list, int pid)
456{
457	struct kqueue *kq;
458	struct knote *kn;
459	struct kevent kev;
460	int error;
461
462	if (list == NULL)
463		return;
464	list->kl_lock(list->kl_lockarg);
465
466	SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
467		if ((kn->kn_status & KN_INFLUX) == KN_INFLUX)
468			continue;
469		kq = kn->kn_kq;
470		KQ_LOCK(kq);
471		if ((kn->kn_status & (KN_INFLUX | KN_SCAN)) == KN_INFLUX) {
472			KQ_UNLOCK(kq);
473			continue;
474		}
475
476		/*
477		 * The same as knote(), activate the event.
478		 */
479		if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
480			kn->kn_status |= KN_HASKQLOCK;
481			if (kn->kn_fop->f_event(kn, NOTE_FORK))
482				KNOTE_ACTIVATE(kn, 1);
483			kn->kn_status &= ~KN_HASKQLOCK;
484			KQ_UNLOCK(kq);
485			continue;
486		}
487
488		/*
489		 * The NOTE_TRACK case. In addition to the activation
490		 * of the event, we need to register new event to
491		 * track the child. Drop the locks in preparation for
492		 * the call to kqueue_register().
493		 */
494		kn->kn_status |= KN_INFLUX;
495		KQ_UNLOCK(kq);
496		list->kl_unlock(list->kl_lockarg);
497
498		/*
499		 * Activate existing knote and register a knote with
500		 * new process.
501		 */
502		kev.ident = pid;
503		kev.filter = kn->kn_filter;
504		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
505		kev.fflags = kn->kn_sfflags;
506		kev.data = kn->kn_id;		/* parent */
507		kev.udata = kn->kn_kevent.udata;/* preserve udata */
508		error = kqueue_register(kq, &kev, NULL, 0);
509		if (error)
510			kn->kn_fflags |= NOTE_TRACKERR;
511		if (kn->kn_fop->f_event(kn, NOTE_FORK))
512			KNOTE_ACTIVATE(kn, 0);
513		KQ_LOCK(kq);
514		kn->kn_status &= ~KN_INFLUX;
515		KQ_UNLOCK_FLUX(kq);
516		list->kl_lock(list->kl_lockarg);
517	}
518	list->kl_unlock(list->kl_lockarg);
519}
520
521/*
522 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
523 * interval timer support code.
524 */
525
526#define NOTE_TIMER_PRECMASK	(NOTE_SECONDS|NOTE_MSECONDS|NOTE_USECONDS| \
527				NOTE_NSECONDS)
528
529static __inline sbintime_t
530timer2sbintime(intptr_t data, int flags)
531{
532	sbintime_t modifier;
533
534	switch (flags & NOTE_TIMER_PRECMASK) {
535	case NOTE_SECONDS:
536		modifier = SBT_1S;
537		break;
538	case NOTE_MSECONDS: /* FALLTHROUGH */
539	case 0:
540		modifier = SBT_1MS;
541		break;
542	case NOTE_USECONDS:
543		modifier = SBT_1US;
544		break;
545	case NOTE_NSECONDS:
546		modifier = SBT_1NS;
547		break;
548	default:
549		return (-1);
550	}
551
552#ifdef __LP64__
553	if (data > SBT_MAX / modifier)
554		return (SBT_MAX);
555#endif
556	return (modifier * data);
557}
558
559static void
560filt_timerexpire(void *knx)
561{
562	struct callout *calloutp;
563	struct knote *kn;
564
565	kn = knx;
566	kn->kn_data++;
567	KNOTE_ACTIVATE(kn, 0);	/* XXX - handle locking */
568
569	if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) {
570		calloutp = (struct callout *)kn->kn_hook;
571		*kn->kn_ptr.p_nexttime += timer2sbintime(kn->kn_sdata,
572		    kn->kn_sfflags);
573		callout_reset_sbt_on(calloutp, *kn->kn_ptr.p_nexttime, 0,
574		    filt_timerexpire, kn, PCPU_GET(cpuid), C_ABSOLUTE);
575	}
576}
577
578/*
579 * data contains amount of time to sleep
580 */
581static int
582filt_timerattach(struct knote *kn)
583{
584	struct callout *calloutp;
585	sbintime_t to;
586	unsigned int ncallouts;
587
588	if ((intptr_t)kn->kn_sdata < 0)
589		return (EINVAL);
590	if ((intptr_t)kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0)
591		kn->kn_sdata = 1;
592	/* Only precision unit are supported in flags so far */
593	if (kn->kn_sfflags & ~NOTE_TIMER_PRECMASK)
594		return (EINVAL);
595
596	to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags);
597	if (to < 0)
598		return (EINVAL);
599
600	ncallouts = atomic_load_explicit(&kq_ncallouts, memory_order_relaxed);
601	do {
602		if (ncallouts >= kq_calloutmax)
603			return (ENOMEM);
604	} while (!atomic_compare_exchange_weak_explicit(&kq_ncallouts,
605	    &ncallouts, ncallouts + 1, memory_order_relaxed,
606	    memory_order_relaxed));
607
608	kn->kn_flags |= EV_CLEAR;		/* automatically set */
609	kn->kn_status &= ~KN_DETACHED;		/* knlist_add clears it */
610	kn->kn_ptr.p_nexttime = malloc(sizeof(sbintime_t), M_KQUEUE, M_WAITOK);
611	calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
612	callout_init(calloutp, CALLOUT_MPSAFE);
613	kn->kn_hook = calloutp;
614	*kn->kn_ptr.p_nexttime = to + sbinuptime();
615	callout_reset_sbt_on(calloutp, *kn->kn_ptr.p_nexttime, 0,
616	    filt_timerexpire, kn, PCPU_GET(cpuid), C_ABSOLUTE);
617
618	return (0);
619}
620
621static void
622filt_timerdetach(struct knote *kn)
623{
624	struct callout *calloutp;
625	unsigned int old;
626
627	calloutp = (struct callout *)kn->kn_hook;
628	callout_drain(calloutp);
629	free(calloutp, M_KQUEUE);
630	free(kn->kn_ptr.p_nexttime, M_KQUEUE);
631	old = atomic_fetch_sub_explicit(&kq_ncallouts, 1, memory_order_relaxed);
632	KASSERT(old > 0, ("Number of callouts cannot become negative"));
633	kn->kn_status |= KN_DETACHED;	/* knlist_remove sets it */
634}
635
636static int
637filt_timer(struct knote *kn, long hint)
638{
639
640	return (kn->kn_data != 0);
641}
642
643static int
644filt_userattach(struct knote *kn)
645{
646
647	/*
648	 * EVFILT_USER knotes are not attached to anything in the kernel.
649	 */
650	kn->kn_hook = NULL;
651	if (kn->kn_fflags & NOTE_TRIGGER)
652		kn->kn_hookid = 1;
653	else
654		kn->kn_hookid = 0;
655	return (0);
656}
657
658static void
659filt_userdetach(__unused struct knote *kn)
660{
661
662	/*
663	 * EVFILT_USER knotes are not attached to anything in the kernel.
664	 */
665}
666
667static int
668filt_user(struct knote *kn, __unused long hint)
669{
670
671	return (kn->kn_hookid);
672}
673
674static void
675filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
676{
677	u_int ffctrl;
678
679	switch (type) {
680	case EVENT_REGISTER:
681		if (kev->fflags & NOTE_TRIGGER)
682			kn->kn_hookid = 1;
683
684		ffctrl = kev->fflags & NOTE_FFCTRLMASK;
685		kev->fflags &= NOTE_FFLAGSMASK;
686		switch (ffctrl) {
687		case NOTE_FFNOP:
688			break;
689
690		case NOTE_FFAND:
691			kn->kn_sfflags &= kev->fflags;
692			break;
693
694		case NOTE_FFOR:
695			kn->kn_sfflags |= kev->fflags;
696			break;
697
698		case NOTE_FFCOPY:
699			kn->kn_sfflags = kev->fflags;
700			break;
701
702		default:
703			/* XXX Return error? */
704			break;
705		}
706		kn->kn_sdata = kev->data;
707		if (kev->flags & EV_CLEAR) {
708			kn->kn_hookid = 0;
709			kn->kn_data = 0;
710			kn->kn_fflags = 0;
711		}
712		break;
713
714        case EVENT_PROCESS:
715		*kev = kn->kn_kevent;
716		kev->fflags = kn->kn_sfflags;
717		kev->data = kn->kn_sdata;
718		if (kn->kn_flags & EV_CLEAR) {
719			kn->kn_hookid = 0;
720			kn->kn_data = 0;
721			kn->kn_fflags = 0;
722		}
723		break;
724
725	default:
726		panic("filt_usertouch() - invalid type (%ld)", type);
727		break;
728	}
729}
730
731int
732sys_kqueue(struct thread *td, struct kqueue_args *uap)
733{
734	struct filedesc *fdp;
735	struct kqueue *kq;
736	struct file *fp;
737	int fd, error;
738
739	fdp = td->td_proc->p_fd;
740	error = falloc(td, &fp, &fd, 0);
741	if (error)
742		goto done2;
743
744	/* An extra reference on `fp' has been held for us by falloc(). */
745	kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
746	mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK);
747	TAILQ_INIT(&kq->kq_head);
748	kq->kq_fdp = fdp;
749	knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
750	TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
751
752	FILEDESC_XLOCK(fdp);
753	TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
754	FILEDESC_XUNLOCK(fdp);
755
756	finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
757	fdrop(fp, td);
758
759	td->td_retval[0] = fd;
760done2:
761	return (error);
762}
763
764#ifndef _SYS_SYSPROTO_H_
765struct kevent_args {
766	int	fd;
767	const struct kevent *changelist;
768	int	nchanges;
769	struct	kevent *eventlist;
770	int	nevents;
771	const struct timespec *timeout;
772};
773#endif
774int
775sys_kevent(struct thread *td, struct kevent_args *uap)
776{
777	struct timespec ts, *tsp;
778	struct kevent_copyops k_ops = { uap,
779					kevent_copyout,
780					kevent_copyin};
781	int error;
782#ifdef KTRACE
783	struct uio ktruio;
784	struct iovec ktriov;
785	struct uio *ktruioin = NULL;
786	struct uio *ktruioout = NULL;
787#endif
788
789	if (uap->timeout != NULL) {
790		error = copyin(uap->timeout, &ts, sizeof(ts));
791		if (error)
792			return (error);
793		tsp = &ts;
794	} else
795		tsp = NULL;
796
797#ifdef KTRACE
798	if (KTRPOINT(td, KTR_GENIO)) {
799		ktriov.iov_base = uap->changelist;
800		ktriov.iov_len = uap->nchanges * sizeof(struct kevent);
801		ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1,
802		    .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ,
803		    .uio_td = td };
804		ktruioin = cloneuio(&ktruio);
805		ktriov.iov_base = uap->eventlist;
806		ktriov.iov_len = uap->nevents * sizeof(struct kevent);
807		ktruioout = cloneuio(&ktruio);
808	}
809#endif
810
811	error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
812	    &k_ops, tsp);
813
814#ifdef KTRACE
815	if (ktruioin != NULL) {
816		ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent);
817		ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0);
818		ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent);
819		ktrgenio(uap->fd, UIO_READ, ktruioout, error);
820	}
821#endif
822
823	return (error);
824}
825
826/*
827 * Copy 'count' items into the destination list pointed to by uap->eventlist.
828 */
829static int
830kevent_copyout(void *arg, struct kevent *kevp, int count)
831{
832	struct kevent_args *uap;
833	int error;
834
835	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
836	uap = (struct kevent_args *)arg;
837
838	error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
839	if (error == 0)
840		uap->eventlist += count;
841	return (error);
842}
843
844/*
845 * Copy 'count' items from the list pointed to by uap->changelist.
846 */
847static int
848kevent_copyin(void *arg, struct kevent *kevp, int count)
849{
850	struct kevent_args *uap;
851	int error;
852
853	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
854	uap = (struct kevent_args *)arg;
855
856	error = copyin(uap->changelist, kevp, count * sizeof *kevp);
857	if (error == 0)
858		uap->changelist += count;
859	return (error);
860}
861
862int
863kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
864    struct kevent_copyops *k_ops, const struct timespec *timeout)
865{
866	struct kevent keva[KQ_NEVENTS];
867	struct kevent *kevp, *changes;
868	struct kqueue *kq;
869	struct file *fp;
870	cap_rights_t rights;
871	int i, n, nerrors, error;
872
873	cap_rights_init(&rights);
874	if (nchanges > 0)
875		cap_rights_set(&rights, CAP_KQUEUE_CHANGE);
876	if (nevents > 0)
877		cap_rights_set(&rights, CAP_KQUEUE_EVENT);
878	error = fget(td, fd, &rights, &fp);
879	if (error != 0)
880		return (error);
881
882	error = kqueue_acquire(fp, &kq);
883	if (error != 0)
884		goto done_norel;
885
886	nerrors = 0;
887
888	while (nchanges > 0) {
889		n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
890		error = k_ops->k_copyin(k_ops->arg, keva, n);
891		if (error)
892			goto done;
893		changes = keva;
894		for (i = 0; i < n; i++) {
895			kevp = &changes[i];
896			if (!kevp->filter)
897				continue;
898			kevp->flags &= ~EV_SYSFLAGS;
899			error = kqueue_register(kq, kevp, td, 1);
900			if (error || (kevp->flags & EV_RECEIPT)) {
901				if (nevents != 0) {
902					kevp->flags = EV_ERROR;
903					kevp->data = error;
904					(void) k_ops->k_copyout(k_ops->arg,
905					    kevp, 1);
906					nevents--;
907					nerrors++;
908				} else {
909					goto done;
910				}
911			}
912		}
913		nchanges -= n;
914	}
915	if (nerrors) {
916		td->td_retval[0] = nerrors;
917		error = 0;
918		goto done;
919	}
920
921	error = kqueue_scan(kq, nevents, k_ops, timeout, keva, td);
922done:
923	kqueue_release(kq, 0);
924done_norel:
925	fdrop(fp, td);
926	return (error);
927}
928
929int
930kqueue_add_filteropts(int filt, struct filterops *filtops)
931{
932	int error;
933
934	error = 0;
935	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
936		printf(
937"trying to add a filterop that is out of range: %d is beyond %d\n",
938		    ~filt, EVFILT_SYSCOUNT);
939		return EINVAL;
940	}
941	mtx_lock(&filterops_lock);
942	if (sysfilt_ops[~filt].for_fop != &null_filtops &&
943	    sysfilt_ops[~filt].for_fop != NULL)
944		error = EEXIST;
945	else {
946		sysfilt_ops[~filt].for_fop = filtops;
947		sysfilt_ops[~filt].for_refcnt = 0;
948	}
949	mtx_unlock(&filterops_lock);
950
951	return (error);
952}
953
954int
955kqueue_del_filteropts(int filt)
956{
957	int error;
958
959	error = 0;
960	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
961		return EINVAL;
962
963	mtx_lock(&filterops_lock);
964	if (sysfilt_ops[~filt].for_fop == &null_filtops ||
965	    sysfilt_ops[~filt].for_fop == NULL)
966		error = EINVAL;
967	else if (sysfilt_ops[~filt].for_refcnt != 0)
968		error = EBUSY;
969	else {
970		sysfilt_ops[~filt].for_fop = &null_filtops;
971		sysfilt_ops[~filt].for_refcnt = 0;
972	}
973	mtx_unlock(&filterops_lock);
974
975	return error;
976}
977
978static struct filterops *
979kqueue_fo_find(int filt)
980{
981
982	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
983		return NULL;
984
985	mtx_lock(&filterops_lock);
986	sysfilt_ops[~filt].for_refcnt++;
987	if (sysfilt_ops[~filt].for_fop == NULL)
988		sysfilt_ops[~filt].for_fop = &null_filtops;
989	mtx_unlock(&filterops_lock);
990
991	return sysfilt_ops[~filt].for_fop;
992}
993
994static void
995kqueue_fo_release(int filt)
996{
997
998	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
999		return;
1000
1001	mtx_lock(&filterops_lock);
1002	KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
1003	    ("filter object refcount not valid on release"));
1004	sysfilt_ops[~filt].for_refcnt--;
1005	mtx_unlock(&filterops_lock);
1006}
1007
1008/*
1009 * A ref to kq (obtained via kqueue_acquire) must be held.  waitok will
1010 * influence if memory allocation should wait.  Make sure it is 0 if you
1011 * hold any mutexes.
1012 */
1013static int
1014kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
1015{
1016	struct filterops *fops;
1017	struct file *fp;
1018	struct knote *kn, *tkn;
1019	cap_rights_t rights;
1020	int error, filt, event;
1021	int haskqglobal, filedesc_unlock;
1022
1023	fp = NULL;
1024	kn = NULL;
1025	error = 0;
1026	haskqglobal = 0;
1027	filedesc_unlock = 0;
1028
1029	filt = kev->filter;
1030	fops = kqueue_fo_find(filt);
1031	if (fops == NULL)
1032		return EINVAL;
1033
1034	tkn = knote_alloc(waitok);		/* prevent waiting with locks */
1035
1036findkn:
1037	if (fops->f_isfd) {
1038		KASSERT(td != NULL, ("td is NULL"));
1039		error = fget(td, kev->ident,
1040		    cap_rights_init(&rights, CAP_EVENT), &fp);
1041		if (error)
1042			goto done;
1043
1044		if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
1045		    kev->ident, 0) != 0) {
1046			/* try again */
1047			fdrop(fp, td);
1048			fp = NULL;
1049			error = kqueue_expand(kq, fops, kev->ident, waitok);
1050			if (error)
1051				goto done;
1052			goto findkn;
1053		}
1054
1055		if (fp->f_type == DTYPE_KQUEUE) {
1056			/*
1057			 * if we add some inteligence about what we are doing,
1058			 * we should be able to support events on ourselves.
1059			 * We need to know when we are doing this to prevent
1060			 * getting both the knlist lock and the kq lock since
1061			 * they are the same thing.
1062			 */
1063			if (fp->f_data == kq) {
1064				error = EINVAL;
1065				goto done;
1066			}
1067
1068			/*
1069			 * Pre-lock the filedesc before the global
1070			 * lock mutex, see the comment in
1071			 * kqueue_close().
1072			 */
1073			FILEDESC_XLOCK(td->td_proc->p_fd);
1074			filedesc_unlock = 1;
1075			KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1076		}
1077
1078		KQ_LOCK(kq);
1079		if (kev->ident < kq->kq_knlistsize) {
1080			SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
1081				if (kev->filter == kn->kn_filter)
1082					break;
1083		}
1084	} else {
1085		if ((kev->flags & EV_ADD) == EV_ADD)
1086			kqueue_expand(kq, fops, kev->ident, waitok);
1087
1088		KQ_LOCK(kq);
1089		if (kq->kq_knhashmask != 0) {
1090			struct klist *list;
1091
1092			list = &kq->kq_knhash[
1093			    KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1094			SLIST_FOREACH(kn, list, kn_link)
1095				if (kev->ident == kn->kn_id &&
1096				    kev->filter == kn->kn_filter)
1097					break;
1098		}
1099	}
1100
1101	/* knote is in the process of changing, wait for it to stablize. */
1102	if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1103		KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1104		if (filedesc_unlock) {
1105			FILEDESC_XUNLOCK(td->td_proc->p_fd);
1106			filedesc_unlock = 0;
1107		}
1108		kq->kq_state |= KQ_FLUXWAIT;
1109		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
1110		if (fp != NULL) {
1111			fdrop(fp, td);
1112			fp = NULL;
1113		}
1114		goto findkn;
1115	}
1116
1117	/*
1118	 * kn now contains the matching knote, or NULL if no match
1119	 */
1120	if (kn == NULL) {
1121		if (kev->flags & EV_ADD) {
1122			kn = tkn;
1123			tkn = NULL;
1124			if (kn == NULL) {
1125				KQ_UNLOCK(kq);
1126				error = ENOMEM;
1127				goto done;
1128			}
1129			kn->kn_fp = fp;
1130			kn->kn_kq = kq;
1131			kn->kn_fop = fops;
1132			/*
1133			 * apply reference counts to knote structure, and
1134			 * do not release it at the end of this routine.
1135			 */
1136			fops = NULL;
1137			fp = NULL;
1138
1139			kn->kn_sfflags = kev->fflags;
1140			kn->kn_sdata = kev->data;
1141			kev->fflags = 0;
1142			kev->data = 0;
1143			kn->kn_kevent = *kev;
1144			kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
1145			    EV_ENABLE | EV_DISABLE);
1146			kn->kn_status = KN_INFLUX|KN_DETACHED;
1147
1148			error = knote_attach(kn, kq);
1149			KQ_UNLOCK(kq);
1150			if (error != 0) {
1151				tkn = kn;
1152				goto done;
1153			}
1154
1155			if ((error = kn->kn_fop->f_attach(kn)) != 0) {
1156				knote_drop(kn, td);
1157				goto done;
1158			}
1159			KN_LIST_LOCK(kn);
1160			goto done_ev_add;
1161		} else {
1162			/* No matching knote and the EV_ADD flag is not set. */
1163			KQ_UNLOCK(kq);
1164			error = ENOENT;
1165			goto done;
1166		}
1167	}
1168
1169	if (kev->flags & EV_DELETE) {
1170		kn->kn_status |= KN_INFLUX;
1171		KQ_UNLOCK(kq);
1172		if (!(kn->kn_status & KN_DETACHED))
1173			kn->kn_fop->f_detach(kn);
1174		knote_drop(kn, td);
1175		goto done;
1176	}
1177
1178	/*
1179	 * The user may change some filter values after the initial EV_ADD,
1180	 * but doing so will not reset any filter which has already been
1181	 * triggered.
1182	 */
1183	kn->kn_status |= KN_INFLUX | KN_SCAN;
1184	KQ_UNLOCK(kq);
1185	KN_LIST_LOCK(kn);
1186	kn->kn_kevent.udata = kev->udata;
1187	if (!fops->f_isfd && fops->f_touch != NULL) {
1188		fops->f_touch(kn, kev, EVENT_REGISTER);
1189	} else {
1190		kn->kn_sfflags = kev->fflags;
1191		kn->kn_sdata = kev->data;
1192	}
1193
1194	/*
1195	 * We can get here with kn->kn_knlist == NULL.  This can happen when
1196	 * the initial attach event decides that the event is "completed"
1197	 * already.  i.e. filt_procattach is called on a zombie process.  It
1198	 * will call filt_proc which will remove it from the list, and NULL
1199	 * kn_knlist.
1200	 */
1201done_ev_add:
1202	event = kn->kn_fop->f_event(kn, 0);
1203	KQ_LOCK(kq);
1204	if (event)
1205		KNOTE_ACTIVATE(kn, 1);
1206	kn->kn_status &= ~(KN_INFLUX | KN_SCAN);
1207	KN_LIST_UNLOCK(kn);
1208
1209	if ((kev->flags & EV_DISABLE) &&
1210	    ((kn->kn_status & KN_DISABLED) == 0)) {
1211		kn->kn_status |= KN_DISABLED;
1212	}
1213
1214	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
1215		kn->kn_status &= ~KN_DISABLED;
1216		if ((kn->kn_status & KN_ACTIVE) &&
1217		    ((kn->kn_status & KN_QUEUED) == 0))
1218			knote_enqueue(kn);
1219	}
1220	KQ_UNLOCK_FLUX(kq);
1221
1222done:
1223	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1224	if (filedesc_unlock)
1225		FILEDESC_XUNLOCK(td->td_proc->p_fd);
1226	if (fp != NULL)
1227		fdrop(fp, td);
1228	if (tkn != NULL)
1229		knote_free(tkn);
1230	if (fops != NULL)
1231		kqueue_fo_release(filt);
1232	return (error);
1233}
1234
1235static int
1236kqueue_acquire(struct file *fp, struct kqueue **kqp)
1237{
1238	int error;
1239	struct kqueue *kq;
1240
1241	error = 0;
1242
1243	kq = fp->f_data;
1244	if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
1245		return (EBADF);
1246	*kqp = kq;
1247	KQ_LOCK(kq);
1248	if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
1249		KQ_UNLOCK(kq);
1250		return (EBADF);
1251	}
1252	kq->kq_refcnt++;
1253	KQ_UNLOCK(kq);
1254
1255	return error;
1256}
1257
1258static void
1259kqueue_release(struct kqueue *kq, int locked)
1260{
1261	if (locked)
1262		KQ_OWNED(kq);
1263	else
1264		KQ_LOCK(kq);
1265	kq->kq_refcnt--;
1266	if (kq->kq_refcnt == 1)
1267		wakeup(&kq->kq_refcnt);
1268	if (!locked)
1269		KQ_UNLOCK(kq);
1270}
1271
1272static void
1273kqueue_schedtask(struct kqueue *kq)
1274{
1275
1276	KQ_OWNED(kq);
1277	KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
1278	    ("scheduling kqueue task while draining"));
1279
1280	if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
1281		taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task);
1282		kq->kq_state |= KQ_TASKSCHED;
1283	}
1284}
1285
1286/*
1287 * Expand the kq to make sure we have storage for fops/ident pair.
1288 *
1289 * Return 0 on success (or no work necessary), return errno on failure.
1290 *
1291 * Not calling hashinit w/ waitok (proper malloc flag) should be safe.
1292 * If kqueue_register is called from a non-fd context, there usually/should
1293 * be no locks held.
1294 */
1295static int
1296kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
1297	int waitok)
1298{
1299	struct klist *list, *tmp_knhash, *to_free;
1300	u_long tmp_knhashmask;
1301	int size;
1302	int fd;
1303	int mflag = waitok ? M_WAITOK : M_NOWAIT;
1304
1305	KQ_NOTOWNED(kq);
1306
1307	to_free = NULL;
1308	if (fops->f_isfd) {
1309		fd = ident;
1310		if (kq->kq_knlistsize <= fd) {
1311			size = kq->kq_knlistsize;
1312			while (size <= fd)
1313				size += KQEXTENT;
1314			list = malloc(size * sizeof(*list), M_KQUEUE, mflag);
1315			if (list == NULL)
1316				return ENOMEM;
1317			KQ_LOCK(kq);
1318			if (kq->kq_knlistsize > fd) {
1319				to_free = list;
1320				list = NULL;
1321			} else {
1322				if (kq->kq_knlist != NULL) {
1323					bcopy(kq->kq_knlist, list,
1324					    kq->kq_knlistsize * sizeof(*list));
1325					to_free = kq->kq_knlist;
1326					kq->kq_knlist = NULL;
1327				}
1328				bzero((caddr_t)list +
1329				    kq->kq_knlistsize * sizeof(*list),
1330				    (size - kq->kq_knlistsize) * sizeof(*list));
1331				kq->kq_knlistsize = size;
1332				kq->kq_knlist = list;
1333			}
1334			KQ_UNLOCK(kq);
1335		}
1336	} else {
1337		if (kq->kq_knhashmask == 0) {
1338			tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1339			    &tmp_knhashmask);
1340			if (tmp_knhash == NULL)
1341				return ENOMEM;
1342			KQ_LOCK(kq);
1343			if (kq->kq_knhashmask == 0) {
1344				kq->kq_knhash = tmp_knhash;
1345				kq->kq_knhashmask = tmp_knhashmask;
1346			} else {
1347				to_free = tmp_knhash;
1348			}
1349			KQ_UNLOCK(kq);
1350		}
1351	}
1352	free(to_free, M_KQUEUE);
1353
1354	KQ_NOTOWNED(kq);
1355	return 0;
1356}
1357
1358static void
1359kqueue_task(void *arg, int pending)
1360{
1361	struct kqueue *kq;
1362	int haskqglobal;
1363
1364	haskqglobal = 0;
1365	kq = arg;
1366
1367	KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1368	KQ_LOCK(kq);
1369
1370	KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1371
1372	kq->kq_state &= ~KQ_TASKSCHED;
1373	if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1374		wakeup(&kq->kq_state);
1375	}
1376	KQ_UNLOCK(kq);
1377	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1378}
1379
1380/*
1381 * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
1382 * We treat KN_MARKER knotes as if they are INFLUX.
1383 */
1384static int
1385kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
1386    const struct timespec *tsp, struct kevent *keva, struct thread *td)
1387{
1388	struct kevent *kevp;
1389	struct knote *kn, *marker;
1390	sbintime_t asbt, rsbt;
1391	int count, error, haskqglobal, influx, nkev, touch;
1392
1393	count = maxevents;
1394	nkev = 0;
1395	error = 0;
1396	haskqglobal = 0;
1397
1398	if (maxevents == 0)
1399		goto done_nl;
1400
1401	rsbt = 0;
1402	if (tsp != NULL) {
1403		if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 ||
1404		    tsp->tv_nsec >= 1000000000) {
1405			error = EINVAL;
1406			goto done_nl;
1407		}
1408		if (timespecisset(tsp)) {
1409			if (tsp->tv_sec <= INT32_MAX) {
1410				rsbt = tstosbt(*tsp);
1411				if (TIMESEL(&asbt, rsbt))
1412					asbt += tc_tick_sbt;
1413				if (asbt <= INT64_MAX - rsbt)
1414					asbt += rsbt;
1415				else
1416					asbt = 0;
1417				rsbt >>= tc_precexp;
1418			} else
1419				asbt = 0;
1420		} else
1421			asbt = -1;
1422	} else
1423		asbt = 0;
1424	marker = knote_alloc(1);
1425	if (marker == NULL) {
1426		error = ENOMEM;
1427		goto done_nl;
1428	}
1429	marker->kn_status = KN_MARKER;
1430	KQ_LOCK(kq);
1431
1432retry:
1433	kevp = keva;
1434	if (kq->kq_count == 0) {
1435		if (asbt == -1) {
1436			error = EWOULDBLOCK;
1437		} else {
1438			kq->kq_state |= KQ_SLEEP;
1439			error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
1440			    "kqread", asbt, rsbt, C_ABSOLUTE);
1441		}
1442		if (error == 0)
1443			goto retry;
1444		/* don't restart after signals... */
1445		if (error == ERESTART)
1446			error = EINTR;
1447		else if (error == EWOULDBLOCK)
1448			error = 0;
1449		goto done;
1450	}
1451
1452	TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1453	influx = 0;
1454	while (count) {
1455		KQ_OWNED(kq);
1456		kn = TAILQ_FIRST(&kq->kq_head);
1457
1458		if ((kn->kn_status == KN_MARKER && kn != marker) ||
1459		    (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1460			if (influx) {
1461				influx = 0;
1462				KQ_FLUX_WAKEUP(kq);
1463			}
1464			kq->kq_state |= KQ_FLUXWAIT;
1465			error = msleep(kq, &kq->kq_lock, PSOCK,
1466			    "kqflxwt", 0);
1467			continue;
1468		}
1469
1470		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1471		if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
1472			kn->kn_status &= ~KN_QUEUED;
1473			kq->kq_count--;
1474			continue;
1475		}
1476		if (kn == marker) {
1477			KQ_FLUX_WAKEUP(kq);
1478			if (count == maxevents)
1479				goto retry;
1480			goto done;
1481		}
1482		KASSERT((kn->kn_status & KN_INFLUX) == 0,
1483		    ("KN_INFLUX set when not suppose to be"));
1484
1485		if ((kn->kn_flags & EV_DROP) == EV_DROP) {
1486			kn->kn_status &= ~KN_QUEUED;
1487			kn->kn_status |= KN_INFLUX;
1488			kq->kq_count--;
1489			KQ_UNLOCK(kq);
1490			/*
1491			 * We don't need to lock the list since we've marked
1492			 * it _INFLUX.
1493			 */
1494			if (!(kn->kn_status & KN_DETACHED))
1495				kn->kn_fop->f_detach(kn);
1496			knote_drop(kn, td);
1497			KQ_LOCK(kq);
1498			continue;
1499		} else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
1500			kn->kn_status &= ~KN_QUEUED;
1501			kn->kn_status |= KN_INFLUX;
1502			kq->kq_count--;
1503			KQ_UNLOCK(kq);
1504			/*
1505			 * We don't need to lock the list since we've marked
1506			 * it _INFLUX.
1507			 */
1508			*kevp = kn->kn_kevent;
1509			if (!(kn->kn_status & KN_DETACHED))
1510				kn->kn_fop->f_detach(kn);
1511			knote_drop(kn, td);
1512			KQ_LOCK(kq);
1513			kn = NULL;
1514		} else {
1515			kn->kn_status |= KN_INFLUX | KN_SCAN;
1516			KQ_UNLOCK(kq);
1517			if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
1518				KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1519			KN_LIST_LOCK(kn);
1520			if (kn->kn_fop->f_event(kn, 0) == 0) {
1521				KQ_LOCK(kq);
1522				KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1523				kn->kn_status &=
1524				    ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX |
1525				    KN_SCAN);
1526				kq->kq_count--;
1527				KN_LIST_UNLOCK(kn);
1528				influx = 1;
1529				continue;
1530			}
1531			touch = (!kn->kn_fop->f_isfd &&
1532			    kn->kn_fop->f_touch != NULL);
1533			if (touch)
1534				kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS);
1535			else
1536				*kevp = kn->kn_kevent;
1537			KQ_LOCK(kq);
1538			KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1539			if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
1540				/*
1541				 * Manually clear knotes who weren't
1542				 * 'touch'ed.
1543				 */
1544				if (touch == 0 && kn->kn_flags & EV_CLEAR) {
1545					kn->kn_data = 0;
1546					kn->kn_fflags = 0;
1547				}
1548				if (kn->kn_flags & EV_DISPATCH)
1549					kn->kn_status |= KN_DISABLED;
1550				kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1551				kq->kq_count--;
1552			} else
1553				TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1554
1555			kn->kn_status &= ~(KN_INFLUX | KN_SCAN);
1556			KN_LIST_UNLOCK(kn);
1557			influx = 1;
1558		}
1559
1560		/* we are returning a copy to the user */
1561		kevp++;
1562		nkev++;
1563		count--;
1564
1565		if (nkev == KQ_NEVENTS) {
1566			influx = 0;
1567			KQ_UNLOCK_FLUX(kq);
1568			error = k_ops->k_copyout(k_ops->arg, keva, nkev);
1569			nkev = 0;
1570			kevp = keva;
1571			KQ_LOCK(kq);
1572			if (error)
1573				break;
1574		}
1575	}
1576	TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
1577done:
1578	KQ_OWNED(kq);
1579	KQ_UNLOCK_FLUX(kq);
1580	knote_free(marker);
1581done_nl:
1582	KQ_NOTOWNED(kq);
1583	if (nkev != 0)
1584		error = k_ops->k_copyout(k_ops->arg, keva, nkev);
1585	td->td_retval[0] = maxevents - count;
1586	return (error);
1587}
1588
1589/*
1590 * XXX
1591 * This could be expanded to call kqueue_scan, if desired.
1592 */
1593/*ARGSUSED*/
1594static int
1595kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
1596	int flags, struct thread *td)
1597{
1598	return (ENXIO);
1599}
1600
1601/*ARGSUSED*/
1602static int
1603kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
1604	 int flags, struct thread *td)
1605{
1606	return (ENXIO);
1607}
1608
1609/*ARGSUSED*/
1610static int
1611kqueue_truncate(struct file *fp, off_t length, struct ucred *active_cred,
1612	struct thread *td)
1613{
1614
1615	return (EINVAL);
1616}
1617
1618/*ARGSUSED*/
1619static int
1620kqueue_ioctl(struct file *fp, u_long cmd, void *data,
1621	struct ucred *active_cred, struct thread *td)
1622{
1623	/*
1624	 * Enabling sigio causes two major problems:
1625	 * 1) infinite recursion:
1626	 * Synopsys: kevent is being used to track signals and have FIOASYNC
1627	 * set.  On receipt of a signal this will cause a kqueue to recurse
1628	 * into itself over and over.  Sending the sigio causes the kqueue
1629	 * to become ready, which in turn posts sigio again, forever.
1630	 * Solution: this can be solved by setting a flag in the kqueue that
1631	 * we have a SIGIO in progress.
1632	 * 2) locking problems:
1633	 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
1634	 * us above the proc and pgrp locks.
1635	 * Solution: Post a signal using an async mechanism, being sure to
1636	 * record a generation count in the delivery so that we do not deliver
1637	 * a signal to the wrong process.
1638	 *
1639	 * Note, these two mechanisms are somewhat mutually exclusive!
1640	 */
1641#if 0
1642	struct kqueue *kq;
1643
1644	kq = fp->f_data;
1645	switch (cmd) {
1646	case FIOASYNC:
1647		if (*(int *)data) {
1648			kq->kq_state |= KQ_ASYNC;
1649		} else {
1650			kq->kq_state &= ~KQ_ASYNC;
1651		}
1652		return (0);
1653
1654	case FIOSETOWN:
1655		return (fsetown(*(int *)data, &kq->kq_sigio));
1656
1657	case FIOGETOWN:
1658		*(int *)data = fgetown(&kq->kq_sigio);
1659		return (0);
1660	}
1661#endif
1662
1663	return (ENOTTY);
1664}
1665
1666/*ARGSUSED*/
1667static int
1668kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
1669	struct thread *td)
1670{
1671	struct kqueue *kq;
1672	int revents = 0;
1673	int error;
1674
1675	if ((error = kqueue_acquire(fp, &kq)))
1676		return POLLERR;
1677
1678	KQ_LOCK(kq);
1679	if (events & (POLLIN | POLLRDNORM)) {
1680		if (kq->kq_count) {
1681			revents |= events & (POLLIN | POLLRDNORM);
1682		} else {
1683			selrecord(td, &kq->kq_sel);
1684			if (SEL_WAITING(&kq->kq_sel))
1685				kq->kq_state |= KQ_SEL;
1686		}
1687	}
1688	kqueue_release(kq, 1);
1689	KQ_UNLOCK(kq);
1690	return (revents);
1691}
1692
1693/*ARGSUSED*/
1694static int
1695kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
1696	struct thread *td)
1697{
1698
1699	bzero((void *)st, sizeof *st);
1700	/*
1701	 * We no longer return kq_count because the unlocked value is useless.
1702	 * If you spent all this time getting the count, why not spend your
1703	 * syscall better by calling kevent?
1704	 *
1705	 * XXX - This is needed for libc_r.
1706	 */
1707	st->st_mode = S_IFIFO;
1708	return (0);
1709}
1710
1711/*ARGSUSED*/
1712static int
1713kqueue_close(struct file *fp, struct thread *td)
1714{
1715	struct kqueue *kq = fp->f_data;
1716	struct filedesc *fdp;
1717	struct knote *kn;
1718	int i;
1719	int error;
1720	int filedesc_unlock;
1721
1722	if ((error = kqueue_acquire(fp, &kq)))
1723		return error;
1724
1725	filedesc_unlock = 0;
1726	KQ_LOCK(kq);
1727
1728	KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
1729	    ("kqueue already closing"));
1730	kq->kq_state |= KQ_CLOSING;
1731	if (kq->kq_refcnt > 1)
1732		msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
1733
1734	KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
1735	fdp = kq->kq_fdp;
1736
1737	KASSERT(knlist_empty(&kq->kq_sel.si_note),
1738	    ("kqueue's knlist not empty"));
1739
1740	for (i = 0; i < kq->kq_knlistsize; i++) {
1741		while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
1742			if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1743				kq->kq_state |= KQ_FLUXWAIT;
1744				msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
1745				continue;
1746			}
1747			kn->kn_status |= KN_INFLUX;
1748			KQ_UNLOCK(kq);
1749			if (!(kn->kn_status & KN_DETACHED))
1750				kn->kn_fop->f_detach(kn);
1751			knote_drop(kn, td);
1752			KQ_LOCK(kq);
1753		}
1754	}
1755	if (kq->kq_knhashmask != 0) {
1756		for (i = 0; i <= kq->kq_knhashmask; i++) {
1757			while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
1758				if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1759					kq->kq_state |= KQ_FLUXWAIT;
1760					msleep(kq, &kq->kq_lock, PSOCK,
1761					       "kqclo2", 0);
1762					continue;
1763				}
1764				kn->kn_status |= KN_INFLUX;
1765				KQ_UNLOCK(kq);
1766				if (!(kn->kn_status & KN_DETACHED))
1767					kn->kn_fop->f_detach(kn);
1768				knote_drop(kn, td);
1769				KQ_LOCK(kq);
1770			}
1771		}
1772	}
1773
1774	if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
1775		kq->kq_state |= KQ_TASKDRAIN;
1776		msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
1777	}
1778
1779	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1780		selwakeuppri(&kq->kq_sel, PSOCK);
1781		if (!SEL_WAITING(&kq->kq_sel))
1782			kq->kq_state &= ~KQ_SEL;
1783	}
1784
1785	KQ_UNLOCK(kq);
1786
1787	/*
1788	 * We could be called due to the knote_drop() doing fdrop(),
1789	 * called from kqueue_register().  In this case the global
1790	 * lock is owned, and filedesc sx is locked before, to not
1791	 * take the sleepable lock after non-sleepable.
1792	 */
1793	if (!sx_xlocked(FILEDESC_LOCK(fdp))) {
1794		FILEDESC_XLOCK(fdp);
1795		filedesc_unlock = 1;
1796	} else
1797		filedesc_unlock = 0;
1798	TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list);
1799	if (filedesc_unlock)
1800		FILEDESC_XUNLOCK(fdp);
1801
1802	seldrain(&kq->kq_sel);
1803	knlist_destroy(&kq->kq_sel.si_note);
1804	mtx_destroy(&kq->kq_lock);
1805	kq->kq_fdp = NULL;
1806
1807	if (kq->kq_knhash != NULL)
1808		free(kq->kq_knhash, M_KQUEUE);
1809	if (kq->kq_knlist != NULL)
1810		free(kq->kq_knlist, M_KQUEUE);
1811
1812	funsetown(&kq->kq_sigio);
1813	free(kq, M_KQUEUE);
1814	fp->f_data = NULL;
1815
1816	return (0);
1817}
1818
1819static void
1820kqueue_wakeup(struct kqueue *kq)
1821{
1822	KQ_OWNED(kq);
1823
1824	if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
1825		kq->kq_state &= ~KQ_SLEEP;
1826		wakeup(kq);
1827	}
1828	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1829		selwakeuppri(&kq->kq_sel, PSOCK);
1830		if (!SEL_WAITING(&kq->kq_sel))
1831			kq->kq_state &= ~KQ_SEL;
1832	}
1833	if (!knlist_empty(&kq->kq_sel.si_note))
1834		kqueue_schedtask(kq);
1835	if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
1836		pgsigio(&kq->kq_sigio, SIGIO, 0);
1837	}
1838}
1839
1840/*
1841 * Walk down a list of knotes, activating them if their event has triggered.
1842 *
1843 * There is a possibility to optimize in the case of one kq watching another.
1844 * Instead of scheduling a task to wake it up, you could pass enough state
1845 * down the chain to make up the parent kqueue.  Make this code functional
1846 * first.
1847 */
1848void
1849knote(struct knlist *list, long hint, int lockflags)
1850{
1851	struct kqueue *kq;
1852	struct knote *kn;
1853	int error;
1854
1855	if (list == NULL)
1856		return;
1857
1858	KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED);
1859
1860	if ((lockflags & KNF_LISTLOCKED) == 0)
1861		list->kl_lock(list->kl_lockarg);
1862
1863	/*
1864	 * If we unlock the list lock (and set KN_INFLUX), we can eliminate
1865	 * the kqueue scheduling, but this will introduce four
1866	 * lock/unlock's for each knote to test.  If we do, continue to use
1867	 * SLIST_FOREACH, SLIST_FOREACH_SAFE is not safe in our case, it is
1868	 * only safe if you want to remove the current item, which we are
1869	 * not doing.
1870	 */
1871	SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
1872		kq = kn->kn_kq;
1873		KQ_LOCK(kq);
1874		if ((kn->kn_status & (KN_INFLUX | KN_SCAN)) == KN_INFLUX) {
1875			/*
1876			 * Do not process the influx notes, except for
1877			 * the influx coming from the kq unlock in the
1878			 * kqueue_scan().  In the later case, we do
1879			 * not interfere with the scan, since the code
1880			 * fragment in kqueue_scan() locks the knlist,
1881			 * and cannot proceed until we finished.
1882			 */
1883			KQ_UNLOCK(kq);
1884		} else if ((lockflags & KNF_NOKQLOCK) != 0) {
1885			kn->kn_status |= KN_INFLUX;
1886			KQ_UNLOCK(kq);
1887			error = kn->kn_fop->f_event(kn, hint);
1888			KQ_LOCK(kq);
1889			kn->kn_status &= ~KN_INFLUX;
1890			if (error)
1891				KNOTE_ACTIVATE(kn, 1);
1892			KQ_UNLOCK_FLUX(kq);
1893		} else {
1894			kn->kn_status |= KN_HASKQLOCK;
1895			if (kn->kn_fop->f_event(kn, hint))
1896				KNOTE_ACTIVATE(kn, 1);
1897			kn->kn_status &= ~KN_HASKQLOCK;
1898			KQ_UNLOCK(kq);
1899		}
1900	}
1901	if ((lockflags & KNF_LISTLOCKED) == 0)
1902		list->kl_unlock(list->kl_lockarg);
1903}
1904
1905/*
1906 * add a knote to a knlist
1907 */
1908void
1909knlist_add(struct knlist *knl, struct knote *kn, int islocked)
1910{
1911	KNL_ASSERT_LOCK(knl, islocked);
1912	KQ_NOTOWNED(kn->kn_kq);
1913	KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) ==
1914	    (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED"));
1915	if (!islocked)
1916		knl->kl_lock(knl->kl_lockarg);
1917	SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
1918	if (!islocked)
1919		knl->kl_unlock(knl->kl_lockarg);
1920	KQ_LOCK(kn->kn_kq);
1921	kn->kn_knlist = knl;
1922	kn->kn_status &= ~KN_DETACHED;
1923	KQ_UNLOCK(kn->kn_kq);
1924}
1925
1926static void
1927knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked)
1928{
1929	KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked"));
1930	KNL_ASSERT_LOCK(knl, knlislocked);
1931	mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
1932	if (!kqislocked)
1933		KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX,
1934    ("knlist_remove called w/o knote being KN_INFLUX or already removed"));
1935	if (!knlislocked)
1936		knl->kl_lock(knl->kl_lockarg);
1937	SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
1938	kn->kn_knlist = NULL;
1939	if (!knlislocked)
1940		knl->kl_unlock(knl->kl_lockarg);
1941	if (!kqislocked)
1942		KQ_LOCK(kn->kn_kq);
1943	kn->kn_status |= KN_DETACHED;
1944	if (!kqislocked)
1945		KQ_UNLOCK(kn->kn_kq);
1946}
1947
1948/*
1949 * remove knote from the specified knlist
1950 */
1951void
1952knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
1953{
1954
1955	knlist_remove_kq(knl, kn, islocked, 0);
1956}
1957
1958/*
1959 * remove knote from the specified knlist while in f_event handler.
1960 */
1961void
1962knlist_remove_inevent(struct knlist *knl, struct knote *kn)
1963{
1964
1965	knlist_remove_kq(knl, kn, 1,
1966	    (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK);
1967}
1968
1969int
1970knlist_empty(struct knlist *knl)
1971{
1972
1973	KNL_ASSERT_LOCKED(knl);
1974	return SLIST_EMPTY(&knl->kl_list);
1975}
1976
1977static struct mtx	knlist_lock;
1978MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
1979	MTX_DEF);
1980static void knlist_mtx_lock(void *arg);
1981static void knlist_mtx_unlock(void *arg);
1982
1983static void
1984knlist_mtx_lock(void *arg)
1985{
1986
1987	mtx_lock((struct mtx *)arg);
1988}
1989
1990static void
1991knlist_mtx_unlock(void *arg)
1992{
1993
1994	mtx_unlock((struct mtx *)arg);
1995}
1996
1997static void
1998knlist_mtx_assert_locked(void *arg)
1999{
2000
2001	mtx_assert((struct mtx *)arg, MA_OWNED);
2002}
2003
2004static void
2005knlist_mtx_assert_unlocked(void *arg)
2006{
2007
2008	mtx_assert((struct mtx *)arg, MA_NOTOWNED);
2009}
2010
2011static void
2012knlist_rw_rlock(void *arg)
2013{
2014
2015	rw_rlock((struct rwlock *)arg);
2016}
2017
2018static void
2019knlist_rw_runlock(void *arg)
2020{
2021
2022	rw_runlock((struct rwlock *)arg);
2023}
2024
2025static void
2026knlist_rw_assert_locked(void *arg)
2027{
2028
2029	rw_assert((struct rwlock *)arg, RA_LOCKED);
2030}
2031
2032static void
2033knlist_rw_assert_unlocked(void *arg)
2034{
2035
2036	rw_assert((struct rwlock *)arg, RA_UNLOCKED);
2037}
2038
2039void
2040knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
2041    void (*kl_unlock)(void *),
2042    void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *))
2043{
2044
2045	if (lock == NULL)
2046		knl->kl_lockarg = &knlist_lock;
2047	else
2048		knl->kl_lockarg = lock;
2049
2050	if (kl_lock == NULL)
2051		knl->kl_lock = knlist_mtx_lock;
2052	else
2053		knl->kl_lock = kl_lock;
2054	if (kl_unlock == NULL)
2055		knl->kl_unlock = knlist_mtx_unlock;
2056	else
2057		knl->kl_unlock = kl_unlock;
2058	if (kl_assert_locked == NULL)
2059		knl->kl_assert_locked = knlist_mtx_assert_locked;
2060	else
2061		knl->kl_assert_locked = kl_assert_locked;
2062	if (kl_assert_unlocked == NULL)
2063		knl->kl_assert_unlocked = knlist_mtx_assert_unlocked;
2064	else
2065		knl->kl_assert_unlocked = kl_assert_unlocked;
2066
2067	SLIST_INIT(&knl->kl_list);
2068}
2069
2070void
2071knlist_init_mtx(struct knlist *knl, struct mtx *lock)
2072{
2073
2074	knlist_init(knl, lock, NULL, NULL, NULL, NULL);
2075}
2076
2077void
2078knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock)
2079{
2080
2081	knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock,
2082	    knlist_rw_assert_locked, knlist_rw_assert_unlocked);
2083}
2084
2085void
2086knlist_destroy(struct knlist *knl)
2087{
2088
2089#ifdef INVARIANTS
2090	/*
2091	 * if we run across this error, we need to find the offending
2092	 * driver and have it call knlist_clear or knlist_delete.
2093	 */
2094	if (!SLIST_EMPTY(&knl->kl_list))
2095		printf("WARNING: destroying knlist w/ knotes on it!\n");
2096#endif
2097
2098	knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL;
2099	SLIST_INIT(&knl->kl_list);
2100}
2101
2102/*
2103 * Even if we are locked, we may need to drop the lock to allow any influx
2104 * knotes time to "settle".
2105 */
2106void
2107knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
2108{
2109	struct knote *kn, *kn2;
2110	struct kqueue *kq;
2111
2112	if (islocked)
2113		KNL_ASSERT_LOCKED(knl);
2114	else {
2115		KNL_ASSERT_UNLOCKED(knl);
2116again:		/* need to reacquire lock since we have dropped it */
2117		knl->kl_lock(knl->kl_lockarg);
2118	}
2119
2120	SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
2121		kq = kn->kn_kq;
2122		KQ_LOCK(kq);
2123		if ((kn->kn_status & KN_INFLUX)) {
2124			KQ_UNLOCK(kq);
2125			continue;
2126		}
2127		knlist_remove_kq(knl, kn, 1, 1);
2128		if (killkn) {
2129			kn->kn_status |= KN_INFLUX | KN_DETACHED;
2130			KQ_UNLOCK(kq);
2131			knote_drop(kn, td);
2132		} else {
2133			/* Make sure cleared knotes disappear soon */
2134			kn->kn_flags |= (EV_EOF | EV_ONESHOT);
2135			KQ_UNLOCK(kq);
2136		}
2137		kq = NULL;
2138	}
2139
2140	if (!SLIST_EMPTY(&knl->kl_list)) {
2141		/* there are still KN_INFLUX remaining */
2142		kn = SLIST_FIRST(&knl->kl_list);
2143		kq = kn->kn_kq;
2144		KQ_LOCK(kq);
2145		KASSERT(kn->kn_status & KN_INFLUX,
2146		    ("knote removed w/o list lock"));
2147		knl->kl_unlock(knl->kl_lockarg);
2148		kq->kq_state |= KQ_FLUXWAIT;
2149		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
2150		kq = NULL;
2151		goto again;
2152	}
2153
2154	if (islocked)
2155		KNL_ASSERT_LOCKED(knl);
2156	else {
2157		knl->kl_unlock(knl->kl_lockarg);
2158		KNL_ASSERT_UNLOCKED(knl);
2159	}
2160}
2161
2162/*
2163 * Remove all knotes referencing a specified fd must be called with FILEDESC
2164 * lock.  This prevents a race where a new fd comes along and occupies the
2165 * entry and we attach a knote to the fd.
2166 */
2167void
2168knote_fdclose(struct thread *td, int fd)
2169{
2170	struct filedesc *fdp = td->td_proc->p_fd;
2171	struct kqueue *kq;
2172	struct knote *kn;
2173	int influx;
2174
2175	FILEDESC_XLOCK_ASSERT(fdp);
2176
2177	/*
2178	 * We shouldn't have to worry about new kevents appearing on fd
2179	 * since filedesc is locked.
2180	 */
2181	TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
2182		KQ_LOCK(kq);
2183
2184again:
2185		influx = 0;
2186		while (kq->kq_knlistsize > fd &&
2187		    (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
2188			if (kn->kn_status & KN_INFLUX) {
2189				/* someone else might be waiting on our knote */
2190				if (influx)
2191					wakeup(kq);
2192				kq->kq_state |= KQ_FLUXWAIT;
2193				msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
2194				goto again;
2195			}
2196			kn->kn_status |= KN_INFLUX;
2197			KQ_UNLOCK(kq);
2198			if (!(kn->kn_status & KN_DETACHED))
2199				kn->kn_fop->f_detach(kn);
2200			knote_drop(kn, td);
2201			influx = 1;
2202			KQ_LOCK(kq);
2203		}
2204		KQ_UNLOCK_FLUX(kq);
2205	}
2206}
2207
2208static int
2209knote_attach(struct knote *kn, struct kqueue *kq)
2210{
2211	struct klist *list;
2212
2213	KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX"));
2214	KQ_OWNED(kq);
2215
2216	if (kn->kn_fop->f_isfd) {
2217		if (kn->kn_id >= kq->kq_knlistsize)
2218			return ENOMEM;
2219		list = &kq->kq_knlist[kn->kn_id];
2220	} else {
2221		if (kq->kq_knhash == NULL)
2222			return ENOMEM;
2223		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2224	}
2225
2226	SLIST_INSERT_HEAD(list, kn, kn_link);
2227
2228	return 0;
2229}
2230
2231/*
2232 * knote must already have been detached using the f_detach method.
2233 * no lock need to be held, it is assumed that the KN_INFLUX flag is set
2234 * to prevent other removal.
2235 */
2236static void
2237knote_drop(struct knote *kn, struct thread *td)
2238{
2239	struct kqueue *kq;
2240	struct klist *list;
2241
2242	kq = kn->kn_kq;
2243
2244	KQ_NOTOWNED(kq);
2245	KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX,
2246	    ("knote_drop called without KN_INFLUX set in kn_status"));
2247
2248	KQ_LOCK(kq);
2249	if (kn->kn_fop->f_isfd)
2250		list = &kq->kq_knlist[kn->kn_id];
2251	else
2252		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2253
2254	if (!SLIST_EMPTY(list))
2255		SLIST_REMOVE(list, kn, knote, kn_link);
2256	if (kn->kn_status & KN_QUEUED)
2257		knote_dequeue(kn);
2258	KQ_UNLOCK_FLUX(kq);
2259
2260	if (kn->kn_fop->f_isfd) {
2261		fdrop(kn->kn_fp, td);
2262		kn->kn_fp = NULL;
2263	}
2264	kqueue_fo_release(kn->kn_kevent.filter);
2265	kn->kn_fop = NULL;
2266	knote_free(kn);
2267}
2268
2269static void
2270knote_enqueue(struct knote *kn)
2271{
2272	struct kqueue *kq = kn->kn_kq;
2273
2274	KQ_OWNED(kn->kn_kq);
2275	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
2276
2277	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2278	kn->kn_status |= KN_QUEUED;
2279	kq->kq_count++;
2280	kqueue_wakeup(kq);
2281}
2282
2283static void
2284knote_dequeue(struct knote *kn)
2285{
2286	struct kqueue *kq = kn->kn_kq;
2287
2288	KQ_OWNED(kn->kn_kq);
2289	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
2290
2291	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2292	kn->kn_status &= ~KN_QUEUED;
2293	kq->kq_count--;
2294}
2295
2296static void
2297knote_init(void)
2298{
2299
2300	knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
2301	    NULL, NULL, UMA_ALIGN_PTR, 0);
2302}
2303SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
2304
2305static struct knote *
2306knote_alloc(int waitok)
2307{
2308	return ((struct knote *)uma_zalloc(knote_zone,
2309	    (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO));
2310}
2311
2312static void
2313knote_free(struct knote *kn)
2314{
2315	if (kn != NULL)
2316		uma_zfree(knote_zone, kn);
2317}
2318
2319/*
2320 * Register the kev w/ the kq specified by fd.
2321 */
2322int
2323kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok)
2324{
2325	struct kqueue *kq;
2326	struct file *fp;
2327	cap_rights_t rights;
2328	int error;
2329
2330	error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp);
2331	if (error != 0)
2332		return (error);
2333	if ((error = kqueue_acquire(fp, &kq)) != 0)
2334		goto noacquire;
2335
2336	error = kqueue_register(kq, kev, td, waitok);
2337
2338	kqueue_release(kq, 0);
2339
2340noacquire:
2341	fdrop(fp, td);
2342
2343	return error;
2344}
2345