kern_event.c revision 258324
1226048Sobrien/*-
268349Sobrien * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
3267843Sdelphij * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
468349Sobrien * Copyright (c) 2009 Apple, Inc.
568349Sobrien * All rights reserved.
668349Sobrien *
7226048Sobrien * Redistribution and use in source and binary forms, with or without
8186690Sobrien * modification, are permitted provided that the following conditions
9226048Sobrien * are met:
10226048Sobrien * 1. Redistributions of source code must retain the above copyright
11226048Sobrien *    notice, this list of conditions and the following disclaimer.
12226048Sobrien * 2. Redistributions in binary form must reproduce the above copyright
13226048Sobrien *    notice, this list of conditions and the following disclaimer in the
14226048Sobrien *    documentation and/or other materials provided with the distribution.
1568349Sobrien *
1668349Sobrien * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1768349Sobrien * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1868349Sobrien * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19186690Sobrien * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20133359Sobrien * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21133359Sobrien * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22133359Sobrien * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23133359Sobrien * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24133359Sobrien * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25226048Sobrien * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26226048Sobrien * SUCH DAMAGE.
27226048Sobrien */
28226048Sobrien
29226048Sobrien#include <sys/cdefs.h>
30226048Sobrien__FBSDID("$FreeBSD: stable/10/sys/kern/kern_event.c 258324 2013-11-18 22:37:01Z pjd $");
31226048Sobrien
32226048Sobrien#include "opt_ktrace.h"
33226048Sobrien
34267843Sdelphij#include <sys/param.h>
35226048Sobrien#include <sys/systm.h>
36226048Sobrien#include <sys/capability.h>
37267843Sdelphij#include <sys/kernel.h>
38267843Sdelphij#include <sys/lock.h>
39267843Sdelphij#include <sys/mutex.h>
40267843Sdelphij#include <sys/rwlock.h>
41267843Sdelphij#include <sys/proc.h>
42267843Sdelphij#include <sys/malloc.h>
43267843Sdelphij#include <sys/unistd.h>
44267843Sdelphij#include <sys/file.h>
4568349Sobrien#include <sys/filedesc.h>
46#include <sys/filio.h>
47#include <sys/fcntl.h>
48#include <sys/kthread.h>
49#include <sys/selinfo.h>
50#include <sys/stdatomic.h>
51#include <sys/queue.h>
52#include <sys/event.h>
53#include <sys/eventvar.h>
54#include <sys/poll.h>
55#include <sys/protosw.h>
56#include <sys/sigio.h>
57#include <sys/signalvar.h>
58#include <sys/socket.h>
59#include <sys/socketvar.h>
60#include <sys/stat.h>
61#include <sys/sysctl.h>
62#include <sys/sysproto.h>
63#include <sys/syscallsubr.h>
64#include <sys/taskqueue.h>
65#include <sys/uio.h>
66#ifdef KTRACE
67#include <sys/ktrace.h>
68#endif
69
70#include <vm/uma.h>
71
72static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
73
74/*
75 * This lock is used if multiple kq locks are required.  This possibly
76 * should be made into a per proc lock.
77 */
78static struct mtx	kq_global;
79MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
80#define KQ_GLOBAL_LOCK(lck, haslck)	do {	\
81	if (!haslck)				\
82		mtx_lock(lck);			\
83	haslck = 1;				\
84} while (0)
85#define KQ_GLOBAL_UNLOCK(lck, haslck)	do {	\
86	if (haslck)				\
87		mtx_unlock(lck);			\
88	haslck = 0;				\
89} while (0)
90
91TASKQUEUE_DEFINE_THREAD(kqueue);
92
93static int	kevent_copyout(void *arg, struct kevent *kevp, int count);
94static int	kevent_copyin(void *arg, struct kevent *kevp, int count);
95static int	kqueue_register(struct kqueue *kq, struct kevent *kev,
96		    struct thread *td, int waitok);
97static int	kqueue_acquire(struct file *fp, struct kqueue **kqp);
98static void	kqueue_release(struct kqueue *kq, int locked);
99static int	kqueue_expand(struct kqueue *kq, struct filterops *fops,
100		    uintptr_t ident, int waitok);
101static void	kqueue_task(void *arg, int pending);
102static int	kqueue_scan(struct kqueue *kq, int maxevents,
103		    struct kevent_copyops *k_ops,
104		    const struct timespec *timeout,
105		    struct kevent *keva, struct thread *td);
106static void 	kqueue_wakeup(struct kqueue *kq);
107static struct filterops *kqueue_fo_find(int filt);
108static void	kqueue_fo_release(int filt);
109
110static fo_rdwr_t	kqueue_read;
111static fo_rdwr_t	kqueue_write;
112static fo_truncate_t	kqueue_truncate;
113static fo_ioctl_t	kqueue_ioctl;
114static fo_poll_t	kqueue_poll;
115static fo_kqfilter_t	kqueue_kqfilter;
116static fo_stat_t	kqueue_stat;
117static fo_close_t	kqueue_close;
118
119static struct fileops kqueueops = {
120	.fo_read = kqueue_read,
121	.fo_write = kqueue_write,
122	.fo_truncate = kqueue_truncate,
123	.fo_ioctl = kqueue_ioctl,
124	.fo_poll = kqueue_poll,
125	.fo_kqfilter = kqueue_kqfilter,
126	.fo_stat = kqueue_stat,
127	.fo_close = kqueue_close,
128	.fo_chmod = invfo_chmod,
129	.fo_chown = invfo_chown,
130	.fo_sendfile = invfo_sendfile,
131};
132
133static int 	knote_attach(struct knote *kn, struct kqueue *kq);
134static void 	knote_drop(struct knote *kn, struct thread *td);
135static void 	knote_enqueue(struct knote *kn);
136static void 	knote_dequeue(struct knote *kn);
137static void 	knote_init(void);
138static struct 	knote *knote_alloc(int waitok);
139static void 	knote_free(struct knote *kn);
140
141static void	filt_kqdetach(struct knote *kn);
142static int	filt_kqueue(struct knote *kn, long hint);
143static int	filt_procattach(struct knote *kn);
144static void	filt_procdetach(struct knote *kn);
145static int	filt_proc(struct knote *kn, long hint);
146static int	filt_fileattach(struct knote *kn);
147static void	filt_timerexpire(void *knx);
148static int	filt_timerattach(struct knote *kn);
149static void	filt_timerdetach(struct knote *kn);
150static int	filt_timer(struct knote *kn, long hint);
151static int	filt_userattach(struct knote *kn);
152static void	filt_userdetach(struct knote *kn);
153static int	filt_user(struct knote *kn, long hint);
154static void	filt_usertouch(struct knote *kn, struct kevent *kev,
155		    u_long type);
156
157static struct filterops file_filtops = {
158	.f_isfd = 1,
159	.f_attach = filt_fileattach,
160};
161static struct filterops kqread_filtops = {
162	.f_isfd = 1,
163	.f_detach = filt_kqdetach,
164	.f_event = filt_kqueue,
165};
166/* XXX - move to kern_proc.c?  */
167static struct filterops proc_filtops = {
168	.f_isfd = 0,
169	.f_attach = filt_procattach,
170	.f_detach = filt_procdetach,
171	.f_event = filt_proc,
172};
173static struct filterops timer_filtops = {
174	.f_isfd = 0,
175	.f_attach = filt_timerattach,
176	.f_detach = filt_timerdetach,
177	.f_event = filt_timer,
178};
179static struct filterops user_filtops = {
180	.f_attach = filt_userattach,
181	.f_detach = filt_userdetach,
182	.f_event = filt_user,
183	.f_touch = filt_usertouch,
184};
185
186static uma_zone_t	knote_zone;
187static atomic_uint	kq_ncallouts = ATOMIC_VAR_INIT(0);
188static unsigned int 	kq_calloutmax = 4 * 1024;
189SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
190    &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
191
192/* XXX - ensure not KN_INFLUX?? */
193#define KNOTE_ACTIVATE(kn, islock) do { 				\
194	if ((islock))							\
195		mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);		\
196	else								\
197		KQ_LOCK((kn)->kn_kq);					\
198	(kn)->kn_status |= KN_ACTIVE;					\
199	if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
200		knote_enqueue((kn));					\
201	if (!(islock))							\
202		KQ_UNLOCK((kn)->kn_kq);					\
203} while(0)
204#define KQ_LOCK(kq) do {						\
205	mtx_lock(&(kq)->kq_lock);					\
206} while (0)
207#define KQ_FLUX_WAKEUP(kq) do {						\
208	if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {		\
209		(kq)->kq_state &= ~KQ_FLUXWAIT;				\
210		wakeup((kq));						\
211	}								\
212} while (0)
213#define KQ_UNLOCK_FLUX(kq) do {						\
214	KQ_FLUX_WAKEUP(kq);						\
215	mtx_unlock(&(kq)->kq_lock);					\
216} while (0)
217#define KQ_UNLOCK(kq) do {						\
218	mtx_unlock(&(kq)->kq_lock);					\
219} while (0)
220#define KQ_OWNED(kq) do {						\
221	mtx_assert(&(kq)->kq_lock, MA_OWNED);				\
222} while (0)
223#define KQ_NOTOWNED(kq) do {						\
224	mtx_assert(&(kq)->kq_lock, MA_NOTOWNED);			\
225} while (0)
226#define KN_LIST_LOCK(kn) do {						\
227	if (kn->kn_knlist != NULL)					\
228		kn->kn_knlist->kl_lock(kn->kn_knlist->kl_lockarg);	\
229} while (0)
230#define KN_LIST_UNLOCK(kn) do {						\
231	if (kn->kn_knlist != NULL) 					\
232		kn->kn_knlist->kl_unlock(kn->kn_knlist->kl_lockarg);	\
233} while (0)
234#define	KNL_ASSERT_LOCK(knl, islocked) do {				\
235	if (islocked)							\
236		KNL_ASSERT_LOCKED(knl);				\
237	else								\
238		KNL_ASSERT_UNLOCKED(knl);				\
239} while (0)
240#ifdef INVARIANTS
241#define	KNL_ASSERT_LOCKED(knl) do {					\
242	knl->kl_assert_locked((knl)->kl_lockarg);			\
243} while (0)
244#define	KNL_ASSERT_UNLOCKED(knl) do {					\
245	knl->kl_assert_unlocked((knl)->kl_lockarg);			\
246} while (0)
247#else /* !INVARIANTS */
248#define	KNL_ASSERT_LOCKED(knl) do {} while(0)
249#define	KNL_ASSERT_UNLOCKED(knl) do {} while (0)
250#endif /* INVARIANTS */
251
252#define	KN_HASHSIZE		64		/* XXX should be tunable */
253#define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
254
255static int
256filt_nullattach(struct knote *kn)
257{
258
259	return (ENXIO);
260};
261
262struct filterops null_filtops = {
263	.f_isfd = 0,
264	.f_attach = filt_nullattach,
265};
266
267/* XXX - make SYSINIT to add these, and move into respective modules. */
268extern struct filterops sig_filtops;
269extern struct filterops fs_filtops;
270
271/*
272 * Table for for all system-defined filters.
273 */
274static struct mtx	filterops_lock;
275MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
276	MTX_DEF);
277static struct {
278	struct filterops *for_fop;
279	int for_refcnt;
280} sysfilt_ops[EVFILT_SYSCOUNT] = {
281	{ &file_filtops },			/* EVFILT_READ */
282	{ &file_filtops },			/* EVFILT_WRITE */
283	{ &null_filtops },			/* EVFILT_AIO */
284	{ &file_filtops },			/* EVFILT_VNODE */
285	{ &proc_filtops },			/* EVFILT_PROC */
286	{ &sig_filtops },			/* EVFILT_SIGNAL */
287	{ &timer_filtops },			/* EVFILT_TIMER */
288	{ &null_filtops },			/* former EVFILT_NETDEV */
289	{ &fs_filtops },			/* EVFILT_FS */
290	{ &null_filtops },			/* EVFILT_LIO */
291	{ &user_filtops },			/* EVFILT_USER */
292};
293
294/*
295 * Simple redirection for all cdevsw style objects to call their fo_kqfilter
296 * method.
297 */
298static int
299filt_fileattach(struct knote *kn)
300{
301
302	return (fo_kqfilter(kn->kn_fp, kn));
303}
304
305/*ARGSUSED*/
306static int
307kqueue_kqfilter(struct file *fp, struct knote *kn)
308{
309	struct kqueue *kq = kn->kn_fp->f_data;
310
311	if (kn->kn_filter != EVFILT_READ)
312		return (EINVAL);
313
314	kn->kn_status |= KN_KQUEUE;
315	kn->kn_fop = &kqread_filtops;
316	knlist_add(&kq->kq_sel.si_note, kn, 0);
317
318	return (0);
319}
320
321static void
322filt_kqdetach(struct knote *kn)
323{
324	struct kqueue *kq = kn->kn_fp->f_data;
325
326	knlist_remove(&kq->kq_sel.si_note, kn, 0);
327}
328
329/*ARGSUSED*/
330static int
331filt_kqueue(struct knote *kn, long hint)
332{
333	struct kqueue *kq = kn->kn_fp->f_data;
334
335	kn->kn_data = kq->kq_count;
336	return (kn->kn_data > 0);
337}
338
339/* XXX - move to kern_proc.c?  */
340static int
341filt_procattach(struct knote *kn)
342{
343	struct proc *p;
344	int immediate;
345	int error;
346
347	immediate = 0;
348	p = pfind(kn->kn_id);
349	if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
350		p = zpfind(kn->kn_id);
351		immediate = 1;
352	} else if (p != NULL && (p->p_flag & P_WEXIT)) {
353		immediate = 1;
354	}
355
356	if (p == NULL)
357		return (ESRCH);
358	if ((error = p_cansee(curthread, p))) {
359		PROC_UNLOCK(p);
360		return (error);
361	}
362
363	kn->kn_ptr.p_proc = p;
364	kn->kn_flags |= EV_CLEAR;		/* automatically set */
365
366	/*
367	 * internal flag indicating registration done by kernel
368	 */
369	if (kn->kn_flags & EV_FLAG1) {
370		kn->kn_data = kn->kn_sdata;		/* ppid */
371		kn->kn_fflags = NOTE_CHILD;
372		kn->kn_flags &= ~EV_FLAG1;
373	}
374
375	if (immediate == 0)
376		knlist_add(&p->p_klist, kn, 1);
377
378	/*
379	 * Immediately activate any exit notes if the target process is a
380	 * zombie.  This is necessary to handle the case where the target
381	 * process, e.g. a child, dies before the kevent is registered.
382	 */
383	if (immediate && filt_proc(kn, NOTE_EXIT))
384		KNOTE_ACTIVATE(kn, 0);
385
386	PROC_UNLOCK(p);
387
388	return (0);
389}
390
391/*
392 * The knote may be attached to a different process, which may exit,
393 * leaving nothing for the knote to be attached to.  So when the process
394 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
395 * it will be deleted when read out.  However, as part of the knote deletion,
396 * this routine is called, so a check is needed to avoid actually performing
397 * a detach, because the original process does not exist any more.
398 */
399/* XXX - move to kern_proc.c?  */
400static void
401filt_procdetach(struct knote *kn)
402{
403	struct proc *p;
404
405	p = kn->kn_ptr.p_proc;
406	knlist_remove(&p->p_klist, kn, 0);
407	kn->kn_ptr.p_proc = NULL;
408}
409
410/* XXX - move to kern_proc.c?  */
411static int
412filt_proc(struct knote *kn, long hint)
413{
414	struct proc *p = kn->kn_ptr.p_proc;
415	u_int event;
416
417	/*
418	 * mask off extra data
419	 */
420	event = (u_int)hint & NOTE_PCTRLMASK;
421
422	/*
423	 * if the user is interested in this event, record it.
424	 */
425	if (kn->kn_sfflags & event)
426		kn->kn_fflags |= event;
427
428	/*
429	 * process is gone, so flag the event as finished.
430	 */
431	if (event == NOTE_EXIT) {
432		if (!(kn->kn_status & KN_DETACHED))
433			knlist_remove_inevent(&p->p_klist, kn);
434		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
435		kn->kn_ptr.p_proc = NULL;
436		if (kn->kn_fflags & NOTE_EXIT)
437			kn->kn_data = p->p_xstat;
438		if (kn->kn_fflags == 0)
439			kn->kn_flags |= EV_DROP;
440		return (1);
441	}
442
443	return (kn->kn_fflags != 0);
444}
445
446/*
447 * Called when the process forked. It mostly does the same as the
448 * knote(), activating all knotes registered to be activated when the
449 * process forked. Additionally, for each knote attached to the
450 * parent, check whether user wants to track the new process. If so
451 * attach a new knote to it, and immediately report an event with the
452 * child's pid.
453 */
454void
455knote_fork(struct knlist *list, int pid)
456{
457	struct kqueue *kq;
458	struct knote *kn;
459	struct kevent kev;
460	int error;
461
462	if (list == NULL)
463		return;
464	list->kl_lock(list->kl_lockarg);
465
466	SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
467		if ((kn->kn_status & KN_INFLUX) == KN_INFLUX)
468			continue;
469		kq = kn->kn_kq;
470		KQ_LOCK(kq);
471		if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
472			KQ_UNLOCK(kq);
473			continue;
474		}
475
476		/*
477		 * The same as knote(), activate the event.
478		 */
479		if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
480			kn->kn_status |= KN_HASKQLOCK;
481			if (kn->kn_fop->f_event(kn, NOTE_FORK))
482				KNOTE_ACTIVATE(kn, 1);
483			kn->kn_status &= ~KN_HASKQLOCK;
484			KQ_UNLOCK(kq);
485			continue;
486		}
487
488		/*
489		 * The NOTE_TRACK case. In addition to the activation
490		 * of the event, we need to register new event to
491		 * track the child. Drop the locks in preparation for
492		 * the call to kqueue_register().
493		 */
494		kn->kn_status |= KN_INFLUX;
495		KQ_UNLOCK(kq);
496		list->kl_unlock(list->kl_lockarg);
497
498		/*
499		 * Activate existing knote and register a knote with
500		 * new process.
501		 */
502		kev.ident = pid;
503		kev.filter = kn->kn_filter;
504		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
505		kev.fflags = kn->kn_sfflags;
506		kev.data = kn->kn_id;		/* parent */
507		kev.udata = kn->kn_kevent.udata;/* preserve udata */
508		error = kqueue_register(kq, &kev, NULL, 0);
509		if (error)
510			kn->kn_fflags |= NOTE_TRACKERR;
511		if (kn->kn_fop->f_event(kn, NOTE_FORK))
512			KNOTE_ACTIVATE(kn, 0);
513		KQ_LOCK(kq);
514		kn->kn_status &= ~KN_INFLUX;
515		KQ_UNLOCK_FLUX(kq);
516		list->kl_lock(list->kl_lockarg);
517	}
518	list->kl_unlock(list->kl_lockarg);
519}
520
521/*
522 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
523 * interval timer support code.
524 */
525static __inline sbintime_t
526timer2sbintime(intptr_t data)
527{
528
529	return (SBT_1MS * data);
530}
531
532static void
533filt_timerexpire(void *knx)
534{
535	struct callout *calloutp;
536	struct knote *kn;
537
538	kn = knx;
539	kn->kn_data++;
540	KNOTE_ACTIVATE(kn, 0);	/* XXX - handle locking */
541
542	if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) {
543		calloutp = (struct callout *)kn->kn_hook;
544		callout_reset_sbt_on(calloutp,
545		    timer2sbintime(kn->kn_sdata), 0 /* 1ms? */,
546		    filt_timerexpire, kn, PCPU_GET(cpuid), 0);
547	}
548}
549
550/*
551 * data contains amount of time to sleep, in milliseconds
552 */
553static int
554filt_timerattach(struct knote *kn)
555{
556	struct callout *calloutp;
557	sbintime_t to;
558	unsigned int ncallouts;
559
560	if ((intptr_t)kn->kn_sdata < 0)
561		return (EINVAL);
562	if ((intptr_t)kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0)
563		kn->kn_sdata = 1;
564	to = timer2sbintime(kn->kn_sdata);
565	if (to < 0)
566		return (EINVAL);
567
568	ncallouts = atomic_load_explicit(&kq_ncallouts, memory_order_relaxed);
569	do {
570		if (ncallouts >= kq_calloutmax)
571			return (ENOMEM);
572	} while (!atomic_compare_exchange_weak_explicit(&kq_ncallouts,
573	    &ncallouts, ncallouts + 1, memory_order_relaxed,
574	    memory_order_relaxed));
575
576	kn->kn_flags |= EV_CLEAR;		/* automatically set */
577	kn->kn_status &= ~KN_DETACHED;		/* knlist_add clears it */
578	calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
579	callout_init(calloutp, CALLOUT_MPSAFE);
580	kn->kn_hook = calloutp;
581	callout_reset_sbt_on(calloutp, to, 0 /* 1ms? */,
582	    filt_timerexpire, kn, PCPU_GET(cpuid), 0);
583
584	return (0);
585}
586
587static void
588filt_timerdetach(struct knote *kn)
589{
590	struct callout *calloutp;
591	unsigned int old;
592
593	calloutp = (struct callout *)kn->kn_hook;
594	callout_drain(calloutp);
595	free(calloutp, M_KQUEUE);
596	old = atomic_fetch_sub_explicit(&kq_ncallouts, 1, memory_order_relaxed);
597	KASSERT(old > 0, ("Number of callouts cannot become negative"));
598	kn->kn_status |= KN_DETACHED;	/* knlist_remove sets it */
599}
600
601static int
602filt_timer(struct knote *kn, long hint)
603{
604
605	return (kn->kn_data != 0);
606}
607
608static int
609filt_userattach(struct knote *kn)
610{
611
612	/*
613	 * EVFILT_USER knotes are not attached to anything in the kernel.
614	 */
615	kn->kn_hook = NULL;
616	if (kn->kn_fflags & NOTE_TRIGGER)
617		kn->kn_hookid = 1;
618	else
619		kn->kn_hookid = 0;
620	return (0);
621}
622
623static void
624filt_userdetach(__unused struct knote *kn)
625{
626
627	/*
628	 * EVFILT_USER knotes are not attached to anything in the kernel.
629	 */
630}
631
632static int
633filt_user(struct knote *kn, __unused long hint)
634{
635
636	return (kn->kn_hookid);
637}
638
639static void
640filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
641{
642	u_int ffctrl;
643
644	switch (type) {
645	case EVENT_REGISTER:
646		if (kev->fflags & NOTE_TRIGGER)
647			kn->kn_hookid = 1;
648
649		ffctrl = kev->fflags & NOTE_FFCTRLMASK;
650		kev->fflags &= NOTE_FFLAGSMASK;
651		switch (ffctrl) {
652		case NOTE_FFNOP:
653			break;
654
655		case NOTE_FFAND:
656			kn->kn_sfflags &= kev->fflags;
657			break;
658
659		case NOTE_FFOR:
660			kn->kn_sfflags |= kev->fflags;
661			break;
662
663		case NOTE_FFCOPY:
664			kn->kn_sfflags = kev->fflags;
665			break;
666
667		default:
668			/* XXX Return error? */
669			break;
670		}
671		kn->kn_sdata = kev->data;
672		if (kev->flags & EV_CLEAR) {
673			kn->kn_hookid = 0;
674			kn->kn_data = 0;
675			kn->kn_fflags = 0;
676		}
677		break;
678
679        case EVENT_PROCESS:
680		*kev = kn->kn_kevent;
681		kev->fflags = kn->kn_sfflags;
682		kev->data = kn->kn_sdata;
683		if (kn->kn_flags & EV_CLEAR) {
684			kn->kn_hookid = 0;
685			kn->kn_data = 0;
686			kn->kn_fflags = 0;
687		}
688		break;
689
690	default:
691		panic("filt_usertouch() - invalid type (%ld)", type);
692		break;
693	}
694}
695
696int
697sys_kqueue(struct thread *td, struct kqueue_args *uap)
698{
699	struct filedesc *fdp;
700	struct kqueue *kq;
701	struct file *fp;
702	int fd, error;
703
704	fdp = td->td_proc->p_fd;
705	error = falloc(td, &fp, &fd, 0);
706	if (error)
707		goto done2;
708
709	/* An extra reference on `fp' has been held for us by falloc(). */
710	kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
711	mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK);
712	TAILQ_INIT(&kq->kq_head);
713	kq->kq_fdp = fdp;
714	knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
715	TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
716
717	FILEDESC_XLOCK(fdp);
718	TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
719	FILEDESC_XUNLOCK(fdp);
720
721	finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
722	fdrop(fp, td);
723
724	td->td_retval[0] = fd;
725done2:
726	return (error);
727}
728
729#ifndef _SYS_SYSPROTO_H_
730struct kevent_args {
731	int	fd;
732	const struct kevent *changelist;
733	int	nchanges;
734	struct	kevent *eventlist;
735	int	nevents;
736	const struct timespec *timeout;
737};
738#endif
739int
740sys_kevent(struct thread *td, struct kevent_args *uap)
741{
742	struct timespec ts, *tsp;
743	struct kevent_copyops k_ops = { uap,
744					kevent_copyout,
745					kevent_copyin};
746	int error;
747#ifdef KTRACE
748	struct uio ktruio;
749	struct iovec ktriov;
750	struct uio *ktruioin = NULL;
751	struct uio *ktruioout = NULL;
752#endif
753
754	if (uap->timeout != NULL) {
755		error = copyin(uap->timeout, &ts, sizeof(ts));
756		if (error)
757			return (error);
758		tsp = &ts;
759	} else
760		tsp = NULL;
761
762#ifdef KTRACE
763	if (KTRPOINT(td, KTR_GENIO)) {
764		ktriov.iov_base = uap->changelist;
765		ktriov.iov_len = uap->nchanges * sizeof(struct kevent);
766		ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1,
767		    .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ,
768		    .uio_td = td };
769		ktruioin = cloneuio(&ktruio);
770		ktriov.iov_base = uap->eventlist;
771		ktriov.iov_len = uap->nevents * sizeof(struct kevent);
772		ktruioout = cloneuio(&ktruio);
773	}
774#endif
775
776	error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
777	    &k_ops, tsp);
778
779#ifdef KTRACE
780	if (ktruioin != NULL) {
781		ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent);
782		ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0);
783		ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent);
784		ktrgenio(uap->fd, UIO_READ, ktruioout, error);
785	}
786#endif
787
788	return (error);
789}
790
791/*
792 * Copy 'count' items into the destination list pointed to by uap->eventlist.
793 */
794static int
795kevent_copyout(void *arg, struct kevent *kevp, int count)
796{
797	struct kevent_args *uap;
798	int error;
799
800	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
801	uap = (struct kevent_args *)arg;
802
803	error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
804	if (error == 0)
805		uap->eventlist += count;
806	return (error);
807}
808
809/*
810 * Copy 'count' items from the list pointed to by uap->changelist.
811 */
812static int
813kevent_copyin(void *arg, struct kevent *kevp, int count)
814{
815	struct kevent_args *uap;
816	int error;
817
818	KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
819	uap = (struct kevent_args *)arg;
820
821	error = copyin(uap->changelist, kevp, count * sizeof *kevp);
822	if (error == 0)
823		uap->changelist += count;
824	return (error);
825}
826
827int
828kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
829    struct kevent_copyops *k_ops, const struct timespec *timeout)
830{
831	struct kevent keva[KQ_NEVENTS];
832	struct kevent *kevp, *changes;
833	struct kqueue *kq;
834	struct file *fp;
835	cap_rights_t rights;
836	int i, n, nerrors, error;
837
838	cap_rights_init(&rights);
839	if (nchanges > 0)
840		cap_rights_set(&rights, CAP_KQUEUE_CHANGE);
841	if (nevents > 0)
842		cap_rights_set(&rights, CAP_KQUEUE_EVENT);
843	error = fget(td, fd, &rights, &fp);
844	if (error != 0)
845		return (error);
846
847	error = kqueue_acquire(fp, &kq);
848	if (error != 0)
849		goto done_norel;
850
851	nerrors = 0;
852
853	while (nchanges > 0) {
854		n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
855		error = k_ops->k_copyin(k_ops->arg, keva, n);
856		if (error)
857			goto done;
858		changes = keva;
859		for (i = 0; i < n; i++) {
860			kevp = &changes[i];
861			if (!kevp->filter)
862				continue;
863			kevp->flags &= ~EV_SYSFLAGS;
864			error = kqueue_register(kq, kevp, td, 1);
865			if (error || (kevp->flags & EV_RECEIPT)) {
866				if (nevents != 0) {
867					kevp->flags = EV_ERROR;
868					kevp->data = error;
869					(void) k_ops->k_copyout(k_ops->arg,
870					    kevp, 1);
871					nevents--;
872					nerrors++;
873				} else {
874					goto done;
875				}
876			}
877		}
878		nchanges -= n;
879	}
880	if (nerrors) {
881		td->td_retval[0] = nerrors;
882		error = 0;
883		goto done;
884	}
885
886	error = kqueue_scan(kq, nevents, k_ops, timeout, keva, td);
887done:
888	kqueue_release(kq, 0);
889done_norel:
890	fdrop(fp, td);
891	return (error);
892}
893
894int
895kqueue_add_filteropts(int filt, struct filterops *filtops)
896{
897	int error;
898
899	error = 0;
900	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
901		printf(
902"trying to add a filterop that is out of range: %d is beyond %d\n",
903		    ~filt, EVFILT_SYSCOUNT);
904		return EINVAL;
905	}
906	mtx_lock(&filterops_lock);
907	if (sysfilt_ops[~filt].for_fop != &null_filtops &&
908	    sysfilt_ops[~filt].for_fop != NULL)
909		error = EEXIST;
910	else {
911		sysfilt_ops[~filt].for_fop = filtops;
912		sysfilt_ops[~filt].for_refcnt = 0;
913	}
914	mtx_unlock(&filterops_lock);
915
916	return (error);
917}
918
919int
920kqueue_del_filteropts(int filt)
921{
922	int error;
923
924	error = 0;
925	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
926		return EINVAL;
927
928	mtx_lock(&filterops_lock);
929	if (sysfilt_ops[~filt].for_fop == &null_filtops ||
930	    sysfilt_ops[~filt].for_fop == NULL)
931		error = EINVAL;
932	else if (sysfilt_ops[~filt].for_refcnt != 0)
933		error = EBUSY;
934	else {
935		sysfilt_ops[~filt].for_fop = &null_filtops;
936		sysfilt_ops[~filt].for_refcnt = 0;
937	}
938	mtx_unlock(&filterops_lock);
939
940	return error;
941}
942
943static struct filterops *
944kqueue_fo_find(int filt)
945{
946
947	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
948		return NULL;
949
950	mtx_lock(&filterops_lock);
951	sysfilt_ops[~filt].for_refcnt++;
952	if (sysfilt_ops[~filt].for_fop == NULL)
953		sysfilt_ops[~filt].for_fop = &null_filtops;
954	mtx_unlock(&filterops_lock);
955
956	return sysfilt_ops[~filt].for_fop;
957}
958
959static void
960kqueue_fo_release(int filt)
961{
962
963	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
964		return;
965
966	mtx_lock(&filterops_lock);
967	KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
968	    ("filter object refcount not valid on release"));
969	sysfilt_ops[~filt].for_refcnt--;
970	mtx_unlock(&filterops_lock);
971}
972
973/*
974 * A ref to kq (obtained via kqueue_acquire) must be held.  waitok will
975 * influence if memory allocation should wait.  Make sure it is 0 if you
976 * hold any mutexes.
977 */
978static int
979kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
980{
981	struct filterops *fops;
982	struct file *fp;
983	struct knote *kn, *tkn;
984	cap_rights_t rights;
985	int error, filt, event;
986	int haskqglobal, filedesc_unlock;
987
988	fp = NULL;
989	kn = NULL;
990	error = 0;
991	haskqglobal = 0;
992	filedesc_unlock = 0;
993
994	filt = kev->filter;
995	fops = kqueue_fo_find(filt);
996	if (fops == NULL)
997		return EINVAL;
998
999	tkn = knote_alloc(waitok);		/* prevent waiting with locks */
1000
1001findkn:
1002	if (fops->f_isfd) {
1003		KASSERT(td != NULL, ("td is NULL"));
1004		error = fget(td, kev->ident,
1005		    cap_rights_init(&rights, CAP_EVENT), &fp);
1006		if (error)
1007			goto done;
1008
1009		if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
1010		    kev->ident, 0) != 0) {
1011			/* try again */
1012			fdrop(fp, td);
1013			fp = NULL;
1014			error = kqueue_expand(kq, fops, kev->ident, waitok);
1015			if (error)
1016				goto done;
1017			goto findkn;
1018		}
1019
1020		if (fp->f_type == DTYPE_KQUEUE) {
1021			/*
1022			 * if we add some inteligence about what we are doing,
1023			 * we should be able to support events on ourselves.
1024			 * We need to know when we are doing this to prevent
1025			 * getting both the knlist lock and the kq lock since
1026			 * they are the same thing.
1027			 */
1028			if (fp->f_data == kq) {
1029				error = EINVAL;
1030				goto done;
1031			}
1032
1033			/*
1034			 * Pre-lock the filedesc before the global
1035			 * lock mutex, see the comment in
1036			 * kqueue_close().
1037			 */
1038			FILEDESC_XLOCK(td->td_proc->p_fd);
1039			filedesc_unlock = 1;
1040			KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1041		}
1042
1043		KQ_LOCK(kq);
1044		if (kev->ident < kq->kq_knlistsize) {
1045			SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
1046				if (kev->filter == kn->kn_filter)
1047					break;
1048		}
1049	} else {
1050		if ((kev->flags & EV_ADD) == EV_ADD)
1051			kqueue_expand(kq, fops, kev->ident, waitok);
1052
1053		KQ_LOCK(kq);
1054		if (kq->kq_knhashmask != 0) {
1055			struct klist *list;
1056
1057			list = &kq->kq_knhash[
1058			    KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1059			SLIST_FOREACH(kn, list, kn_link)
1060				if (kev->ident == kn->kn_id &&
1061				    kev->filter == kn->kn_filter)
1062					break;
1063		}
1064	}
1065
1066	/* knote is in the process of changing, wait for it to stablize. */
1067	if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1068		KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1069		if (filedesc_unlock) {
1070			FILEDESC_XUNLOCK(td->td_proc->p_fd);
1071			filedesc_unlock = 0;
1072		}
1073		kq->kq_state |= KQ_FLUXWAIT;
1074		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
1075		if (fp != NULL) {
1076			fdrop(fp, td);
1077			fp = NULL;
1078		}
1079		goto findkn;
1080	}
1081
1082	/*
1083	 * kn now contains the matching knote, or NULL if no match
1084	 */
1085	if (kn == NULL) {
1086		if (kev->flags & EV_ADD) {
1087			kn = tkn;
1088			tkn = NULL;
1089			if (kn == NULL) {
1090				KQ_UNLOCK(kq);
1091				error = ENOMEM;
1092				goto done;
1093			}
1094			kn->kn_fp = fp;
1095			kn->kn_kq = kq;
1096			kn->kn_fop = fops;
1097			/*
1098			 * apply reference counts to knote structure, and
1099			 * do not release it at the end of this routine.
1100			 */
1101			fops = NULL;
1102			fp = NULL;
1103
1104			kn->kn_sfflags = kev->fflags;
1105			kn->kn_sdata = kev->data;
1106			kev->fflags = 0;
1107			kev->data = 0;
1108			kn->kn_kevent = *kev;
1109			kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
1110			    EV_ENABLE | EV_DISABLE);
1111			kn->kn_status = KN_INFLUX|KN_DETACHED;
1112
1113			error = knote_attach(kn, kq);
1114			KQ_UNLOCK(kq);
1115			if (error != 0) {
1116				tkn = kn;
1117				goto done;
1118			}
1119
1120			if ((error = kn->kn_fop->f_attach(kn)) != 0) {
1121				knote_drop(kn, td);
1122				goto done;
1123			}
1124			KN_LIST_LOCK(kn);
1125			goto done_ev_add;
1126		} else {
1127			/* No matching knote and the EV_ADD flag is not set. */
1128			KQ_UNLOCK(kq);
1129			error = ENOENT;
1130			goto done;
1131		}
1132	}
1133
1134	if (kev->flags & EV_DELETE) {
1135		kn->kn_status |= KN_INFLUX;
1136		KQ_UNLOCK(kq);
1137		if (!(kn->kn_status & KN_DETACHED))
1138			kn->kn_fop->f_detach(kn);
1139		knote_drop(kn, td);
1140		goto done;
1141	}
1142
1143	/*
1144	 * The user may change some filter values after the initial EV_ADD,
1145	 * but doing so will not reset any filter which has already been
1146	 * triggered.
1147	 */
1148	kn->kn_status |= KN_INFLUX;
1149	KQ_UNLOCK(kq);
1150	KN_LIST_LOCK(kn);
1151	kn->kn_kevent.udata = kev->udata;
1152	if (!fops->f_isfd && fops->f_touch != NULL) {
1153		fops->f_touch(kn, kev, EVENT_REGISTER);
1154	} else {
1155		kn->kn_sfflags = kev->fflags;
1156		kn->kn_sdata = kev->data;
1157	}
1158
1159	/*
1160	 * We can get here with kn->kn_knlist == NULL.  This can happen when
1161	 * the initial attach event decides that the event is "completed"
1162	 * already.  i.e. filt_procattach is called on a zombie process.  It
1163	 * will call filt_proc which will remove it from the list, and NULL
1164	 * kn_knlist.
1165	 */
1166done_ev_add:
1167	event = kn->kn_fop->f_event(kn, 0);
1168	KQ_LOCK(kq);
1169	if (event)
1170		KNOTE_ACTIVATE(kn, 1);
1171	kn->kn_status &= ~KN_INFLUX;
1172	KN_LIST_UNLOCK(kn);
1173
1174	if ((kev->flags & EV_DISABLE) &&
1175	    ((kn->kn_status & KN_DISABLED) == 0)) {
1176		kn->kn_status |= KN_DISABLED;
1177	}
1178
1179	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
1180		kn->kn_status &= ~KN_DISABLED;
1181		if ((kn->kn_status & KN_ACTIVE) &&
1182		    ((kn->kn_status & KN_QUEUED) == 0))
1183			knote_enqueue(kn);
1184	}
1185	KQ_UNLOCK_FLUX(kq);
1186
1187done:
1188	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1189	if (filedesc_unlock)
1190		FILEDESC_XUNLOCK(td->td_proc->p_fd);
1191	if (fp != NULL)
1192		fdrop(fp, td);
1193	if (tkn != NULL)
1194		knote_free(tkn);
1195	if (fops != NULL)
1196		kqueue_fo_release(filt);
1197	return (error);
1198}
1199
1200static int
1201kqueue_acquire(struct file *fp, struct kqueue **kqp)
1202{
1203	int error;
1204	struct kqueue *kq;
1205
1206	error = 0;
1207
1208	kq = fp->f_data;
1209	if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
1210		return (EBADF);
1211	*kqp = kq;
1212	KQ_LOCK(kq);
1213	if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
1214		KQ_UNLOCK(kq);
1215		return (EBADF);
1216	}
1217	kq->kq_refcnt++;
1218	KQ_UNLOCK(kq);
1219
1220	return error;
1221}
1222
1223static void
1224kqueue_release(struct kqueue *kq, int locked)
1225{
1226	if (locked)
1227		KQ_OWNED(kq);
1228	else
1229		KQ_LOCK(kq);
1230	kq->kq_refcnt--;
1231	if (kq->kq_refcnt == 1)
1232		wakeup(&kq->kq_refcnt);
1233	if (!locked)
1234		KQ_UNLOCK(kq);
1235}
1236
1237static void
1238kqueue_schedtask(struct kqueue *kq)
1239{
1240
1241	KQ_OWNED(kq);
1242	KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
1243	    ("scheduling kqueue task while draining"));
1244
1245	if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
1246		taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task);
1247		kq->kq_state |= KQ_TASKSCHED;
1248	}
1249}
1250
1251/*
1252 * Expand the kq to make sure we have storage for fops/ident pair.
1253 *
1254 * Return 0 on success (or no work necessary), return errno on failure.
1255 *
1256 * Not calling hashinit w/ waitok (proper malloc flag) should be safe.
1257 * If kqueue_register is called from a non-fd context, there usually/should
1258 * be no locks held.
1259 */
1260static int
1261kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
1262	int waitok)
1263{
1264	struct klist *list, *tmp_knhash, *to_free;
1265	u_long tmp_knhashmask;
1266	int size;
1267	int fd;
1268	int mflag = waitok ? M_WAITOK : M_NOWAIT;
1269
1270	KQ_NOTOWNED(kq);
1271
1272	to_free = NULL;
1273	if (fops->f_isfd) {
1274		fd = ident;
1275		if (kq->kq_knlistsize <= fd) {
1276			size = kq->kq_knlistsize;
1277			while (size <= fd)
1278				size += KQEXTENT;
1279			list = malloc(size * sizeof(*list), M_KQUEUE, mflag);
1280			if (list == NULL)
1281				return ENOMEM;
1282			KQ_LOCK(kq);
1283			if (kq->kq_knlistsize > fd) {
1284				to_free = list;
1285				list = NULL;
1286			} else {
1287				if (kq->kq_knlist != NULL) {
1288					bcopy(kq->kq_knlist, list,
1289					    kq->kq_knlistsize * sizeof(*list));
1290					to_free = kq->kq_knlist;
1291					kq->kq_knlist = NULL;
1292				}
1293				bzero((caddr_t)list +
1294				    kq->kq_knlistsize * sizeof(*list),
1295				    (size - kq->kq_knlistsize) * sizeof(*list));
1296				kq->kq_knlistsize = size;
1297				kq->kq_knlist = list;
1298			}
1299			KQ_UNLOCK(kq);
1300		}
1301	} else {
1302		if (kq->kq_knhashmask == 0) {
1303			tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
1304			    &tmp_knhashmask);
1305			if (tmp_knhash == NULL)
1306				return ENOMEM;
1307			KQ_LOCK(kq);
1308			if (kq->kq_knhashmask == 0) {
1309				kq->kq_knhash = tmp_knhash;
1310				kq->kq_knhashmask = tmp_knhashmask;
1311			} else {
1312				to_free = tmp_knhash;
1313			}
1314			KQ_UNLOCK(kq);
1315		}
1316	}
1317	free(to_free, M_KQUEUE);
1318
1319	KQ_NOTOWNED(kq);
1320	return 0;
1321}
1322
1323static void
1324kqueue_task(void *arg, int pending)
1325{
1326	struct kqueue *kq;
1327	int haskqglobal;
1328
1329	haskqglobal = 0;
1330	kq = arg;
1331
1332	KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1333	KQ_LOCK(kq);
1334
1335	KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1336
1337	kq->kq_state &= ~KQ_TASKSCHED;
1338	if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1339		wakeup(&kq->kq_state);
1340	}
1341	KQ_UNLOCK(kq);
1342	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1343}
1344
1345/*
1346 * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
1347 * We treat KN_MARKER knotes as if they are INFLUX.
1348 */
1349static int
1350kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
1351    const struct timespec *tsp, struct kevent *keva, struct thread *td)
1352{
1353	struct kevent *kevp;
1354	struct knote *kn, *marker;
1355	sbintime_t asbt, rsbt;
1356	int count, error, haskqglobal, influx, nkev, touch;
1357
1358	count = maxevents;
1359	nkev = 0;
1360	error = 0;
1361	haskqglobal = 0;
1362
1363	if (maxevents == 0)
1364		goto done_nl;
1365
1366	rsbt = 0;
1367	if (tsp != NULL) {
1368		if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 ||
1369		    tsp->tv_nsec >= 1000000000) {
1370			error = EINVAL;
1371			goto done_nl;
1372		}
1373		if (timespecisset(tsp)) {
1374			if (tsp->tv_sec <= INT32_MAX) {
1375				rsbt = tstosbt(*tsp);
1376				if (TIMESEL(&asbt, rsbt))
1377					asbt += tc_tick_sbt;
1378				if (asbt <= INT64_MAX - rsbt)
1379					asbt += rsbt;
1380				else
1381					asbt = 0;
1382				rsbt >>= tc_precexp;
1383			} else
1384				asbt = 0;
1385		} else
1386			asbt = -1;
1387	} else
1388		asbt = 0;
1389	marker = knote_alloc(1);
1390	if (marker == NULL) {
1391		error = ENOMEM;
1392		goto done_nl;
1393	}
1394	marker->kn_status = KN_MARKER;
1395	KQ_LOCK(kq);
1396
1397retry:
1398	kevp = keva;
1399	if (kq->kq_count == 0) {
1400		if (asbt == -1) {
1401			error = EWOULDBLOCK;
1402		} else {
1403			kq->kq_state |= KQ_SLEEP;
1404			error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
1405			    "kqread", asbt, rsbt, C_ABSOLUTE);
1406		}
1407		if (error == 0)
1408			goto retry;
1409		/* don't restart after signals... */
1410		if (error == ERESTART)
1411			error = EINTR;
1412		else if (error == EWOULDBLOCK)
1413			error = 0;
1414		goto done;
1415	}
1416
1417	TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1418	influx = 0;
1419	while (count) {
1420		KQ_OWNED(kq);
1421		kn = TAILQ_FIRST(&kq->kq_head);
1422
1423		if ((kn->kn_status == KN_MARKER && kn != marker) ||
1424		    (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1425			if (influx) {
1426				influx = 0;
1427				KQ_FLUX_WAKEUP(kq);
1428			}
1429			kq->kq_state |= KQ_FLUXWAIT;
1430			error = msleep(kq, &kq->kq_lock, PSOCK,
1431			    "kqflxwt", 0);
1432			continue;
1433		}
1434
1435		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1436		if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
1437			kn->kn_status &= ~KN_QUEUED;
1438			kq->kq_count--;
1439			continue;
1440		}
1441		if (kn == marker) {
1442			KQ_FLUX_WAKEUP(kq);
1443			if (count == maxevents)
1444				goto retry;
1445			goto done;
1446		}
1447		KASSERT((kn->kn_status & KN_INFLUX) == 0,
1448		    ("KN_INFLUX set when not suppose to be"));
1449
1450		if ((kn->kn_flags & EV_DROP) == EV_DROP) {
1451			kn->kn_status &= ~KN_QUEUED;
1452			kn->kn_status |= KN_INFLUX;
1453			kq->kq_count--;
1454			KQ_UNLOCK(kq);
1455			/*
1456			 * We don't need to lock the list since we've marked
1457			 * it _INFLUX.
1458			 */
1459			if (!(kn->kn_status & KN_DETACHED))
1460				kn->kn_fop->f_detach(kn);
1461			knote_drop(kn, td);
1462			KQ_LOCK(kq);
1463			continue;
1464		} else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
1465			kn->kn_status &= ~KN_QUEUED;
1466			kn->kn_status |= KN_INFLUX;
1467			kq->kq_count--;
1468			KQ_UNLOCK(kq);
1469			/*
1470			 * We don't need to lock the list since we've marked
1471			 * it _INFLUX.
1472			 */
1473			*kevp = kn->kn_kevent;
1474			if (!(kn->kn_status & KN_DETACHED))
1475				kn->kn_fop->f_detach(kn);
1476			knote_drop(kn, td);
1477			KQ_LOCK(kq);
1478			kn = NULL;
1479		} else {
1480			kn->kn_status |= KN_INFLUX;
1481			KQ_UNLOCK(kq);
1482			if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
1483				KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1484			KN_LIST_LOCK(kn);
1485			if (kn->kn_fop->f_event(kn, 0) == 0) {
1486				KQ_LOCK(kq);
1487				KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1488				kn->kn_status &=
1489				    ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX);
1490				kq->kq_count--;
1491				KN_LIST_UNLOCK(kn);
1492				influx = 1;
1493				continue;
1494			}
1495			touch = (!kn->kn_fop->f_isfd &&
1496			    kn->kn_fop->f_touch != NULL);
1497			if (touch)
1498				kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS);
1499			else
1500				*kevp = kn->kn_kevent;
1501			KQ_LOCK(kq);
1502			KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1503			if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
1504				/*
1505				 * Manually clear knotes who weren't
1506				 * 'touch'ed.
1507				 */
1508				if (touch == 0 && kn->kn_flags & EV_CLEAR) {
1509					kn->kn_data = 0;
1510					kn->kn_fflags = 0;
1511				}
1512				if (kn->kn_flags & EV_DISPATCH)
1513					kn->kn_status |= KN_DISABLED;
1514				kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
1515				kq->kq_count--;
1516			} else
1517				TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1518
1519			kn->kn_status &= ~(KN_INFLUX);
1520			KN_LIST_UNLOCK(kn);
1521			influx = 1;
1522		}
1523
1524		/* we are returning a copy to the user */
1525		kevp++;
1526		nkev++;
1527		count--;
1528
1529		if (nkev == KQ_NEVENTS) {
1530			influx = 0;
1531			KQ_UNLOCK_FLUX(kq);
1532			error = k_ops->k_copyout(k_ops->arg, keva, nkev);
1533			nkev = 0;
1534			kevp = keva;
1535			KQ_LOCK(kq);
1536			if (error)
1537				break;
1538		}
1539	}
1540	TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
1541done:
1542	KQ_OWNED(kq);
1543	KQ_UNLOCK_FLUX(kq);
1544	knote_free(marker);
1545done_nl:
1546	KQ_NOTOWNED(kq);
1547	if (nkev != 0)
1548		error = k_ops->k_copyout(k_ops->arg, keva, nkev);
1549	td->td_retval[0] = maxevents - count;
1550	return (error);
1551}
1552
1553/*
1554 * XXX
1555 * This could be expanded to call kqueue_scan, if desired.
1556 */
1557/*ARGSUSED*/
1558static int
1559kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
1560	int flags, struct thread *td)
1561{
1562	return (ENXIO);
1563}
1564
1565/*ARGSUSED*/
1566static int
1567kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
1568	 int flags, struct thread *td)
1569{
1570	return (ENXIO);
1571}
1572
1573/*ARGSUSED*/
1574static int
1575kqueue_truncate(struct file *fp, off_t length, struct ucred *active_cred,
1576	struct thread *td)
1577{
1578
1579	return (EINVAL);
1580}
1581
1582/*ARGSUSED*/
1583static int
1584kqueue_ioctl(struct file *fp, u_long cmd, void *data,
1585	struct ucred *active_cred, struct thread *td)
1586{
1587	/*
1588	 * Enabling sigio causes two major problems:
1589	 * 1) infinite recursion:
1590	 * Synopsys: kevent is being used to track signals and have FIOASYNC
1591	 * set.  On receipt of a signal this will cause a kqueue to recurse
1592	 * into itself over and over.  Sending the sigio causes the kqueue
1593	 * to become ready, which in turn posts sigio again, forever.
1594	 * Solution: this can be solved by setting a flag in the kqueue that
1595	 * we have a SIGIO in progress.
1596	 * 2) locking problems:
1597	 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
1598	 * us above the proc and pgrp locks.
1599	 * Solution: Post a signal using an async mechanism, being sure to
1600	 * record a generation count in the delivery so that we do not deliver
1601	 * a signal to the wrong process.
1602	 *
1603	 * Note, these two mechanisms are somewhat mutually exclusive!
1604	 */
1605#if 0
1606	struct kqueue *kq;
1607
1608	kq = fp->f_data;
1609	switch (cmd) {
1610	case FIOASYNC:
1611		if (*(int *)data) {
1612			kq->kq_state |= KQ_ASYNC;
1613		} else {
1614			kq->kq_state &= ~KQ_ASYNC;
1615		}
1616		return (0);
1617
1618	case FIOSETOWN:
1619		return (fsetown(*(int *)data, &kq->kq_sigio));
1620
1621	case FIOGETOWN:
1622		*(int *)data = fgetown(&kq->kq_sigio);
1623		return (0);
1624	}
1625#endif
1626
1627	return (ENOTTY);
1628}
1629
1630/*ARGSUSED*/
1631static int
1632kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
1633	struct thread *td)
1634{
1635	struct kqueue *kq;
1636	int revents = 0;
1637	int error;
1638
1639	if ((error = kqueue_acquire(fp, &kq)))
1640		return POLLERR;
1641
1642	KQ_LOCK(kq);
1643	if (events & (POLLIN | POLLRDNORM)) {
1644		if (kq->kq_count) {
1645			revents |= events & (POLLIN | POLLRDNORM);
1646		} else {
1647			selrecord(td, &kq->kq_sel);
1648			if (SEL_WAITING(&kq->kq_sel))
1649				kq->kq_state |= KQ_SEL;
1650		}
1651	}
1652	kqueue_release(kq, 1);
1653	KQ_UNLOCK(kq);
1654	return (revents);
1655}
1656
1657/*ARGSUSED*/
1658static int
1659kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
1660	struct thread *td)
1661{
1662
1663	bzero((void *)st, sizeof *st);
1664	/*
1665	 * We no longer return kq_count because the unlocked value is useless.
1666	 * If you spent all this time getting the count, why not spend your
1667	 * syscall better by calling kevent?
1668	 *
1669	 * XXX - This is needed for libc_r.
1670	 */
1671	st->st_mode = S_IFIFO;
1672	return (0);
1673}
1674
1675/*ARGSUSED*/
1676static int
1677kqueue_close(struct file *fp, struct thread *td)
1678{
1679	struct kqueue *kq = fp->f_data;
1680	struct filedesc *fdp;
1681	struct knote *kn;
1682	int i;
1683	int error;
1684	int filedesc_unlock;
1685
1686	if ((error = kqueue_acquire(fp, &kq)))
1687		return error;
1688
1689	filedesc_unlock = 0;
1690	KQ_LOCK(kq);
1691
1692	KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
1693	    ("kqueue already closing"));
1694	kq->kq_state |= KQ_CLOSING;
1695	if (kq->kq_refcnt > 1)
1696		msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
1697
1698	KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
1699	fdp = kq->kq_fdp;
1700
1701	KASSERT(knlist_empty(&kq->kq_sel.si_note),
1702	    ("kqueue's knlist not empty"));
1703
1704	for (i = 0; i < kq->kq_knlistsize; i++) {
1705		while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
1706			if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1707				kq->kq_state |= KQ_FLUXWAIT;
1708				msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
1709				continue;
1710			}
1711			kn->kn_status |= KN_INFLUX;
1712			KQ_UNLOCK(kq);
1713			if (!(kn->kn_status & KN_DETACHED))
1714				kn->kn_fop->f_detach(kn);
1715			knote_drop(kn, td);
1716			KQ_LOCK(kq);
1717		}
1718	}
1719	if (kq->kq_knhashmask != 0) {
1720		for (i = 0; i <= kq->kq_knhashmask; i++) {
1721			while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
1722				if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1723					kq->kq_state |= KQ_FLUXWAIT;
1724					msleep(kq, &kq->kq_lock, PSOCK,
1725					       "kqclo2", 0);
1726					continue;
1727				}
1728				kn->kn_status |= KN_INFLUX;
1729				KQ_UNLOCK(kq);
1730				if (!(kn->kn_status & KN_DETACHED))
1731					kn->kn_fop->f_detach(kn);
1732				knote_drop(kn, td);
1733				KQ_LOCK(kq);
1734			}
1735		}
1736	}
1737
1738	if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
1739		kq->kq_state |= KQ_TASKDRAIN;
1740		msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
1741	}
1742
1743	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1744		selwakeuppri(&kq->kq_sel, PSOCK);
1745		if (!SEL_WAITING(&kq->kq_sel))
1746			kq->kq_state &= ~KQ_SEL;
1747	}
1748
1749	KQ_UNLOCK(kq);
1750
1751	/*
1752	 * We could be called due to the knote_drop() doing fdrop(),
1753	 * called from kqueue_register().  In this case the global
1754	 * lock is owned, and filedesc sx is locked before, to not
1755	 * take the sleepable lock after non-sleepable.
1756	 */
1757	if (!sx_xlocked(FILEDESC_LOCK(fdp))) {
1758		FILEDESC_XLOCK(fdp);
1759		filedesc_unlock = 1;
1760	} else
1761		filedesc_unlock = 0;
1762	TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list);
1763	if (filedesc_unlock)
1764		FILEDESC_XUNLOCK(fdp);
1765
1766	seldrain(&kq->kq_sel);
1767	knlist_destroy(&kq->kq_sel.si_note);
1768	mtx_destroy(&kq->kq_lock);
1769	kq->kq_fdp = NULL;
1770
1771	if (kq->kq_knhash != NULL)
1772		free(kq->kq_knhash, M_KQUEUE);
1773	if (kq->kq_knlist != NULL)
1774		free(kq->kq_knlist, M_KQUEUE);
1775
1776	funsetown(&kq->kq_sigio);
1777	free(kq, M_KQUEUE);
1778	fp->f_data = NULL;
1779
1780	return (0);
1781}
1782
1783static void
1784kqueue_wakeup(struct kqueue *kq)
1785{
1786	KQ_OWNED(kq);
1787
1788	if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
1789		kq->kq_state &= ~KQ_SLEEP;
1790		wakeup(kq);
1791	}
1792	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1793		selwakeuppri(&kq->kq_sel, PSOCK);
1794		if (!SEL_WAITING(&kq->kq_sel))
1795			kq->kq_state &= ~KQ_SEL;
1796	}
1797	if (!knlist_empty(&kq->kq_sel.si_note))
1798		kqueue_schedtask(kq);
1799	if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
1800		pgsigio(&kq->kq_sigio, SIGIO, 0);
1801	}
1802}
1803
1804/*
1805 * Walk down a list of knotes, activating them if their event has triggered.
1806 *
1807 * There is a possibility to optimize in the case of one kq watching another.
1808 * Instead of scheduling a task to wake it up, you could pass enough state
1809 * down the chain to make up the parent kqueue.  Make this code functional
1810 * first.
1811 */
1812void
1813knote(struct knlist *list, long hint, int lockflags)
1814{
1815	struct kqueue *kq;
1816	struct knote *kn;
1817	int error;
1818
1819	if (list == NULL)
1820		return;
1821
1822	KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED);
1823
1824	if ((lockflags & KNF_LISTLOCKED) == 0)
1825		list->kl_lock(list->kl_lockarg);
1826
1827	/*
1828	 * If we unlock the list lock (and set KN_INFLUX), we can eliminate
1829	 * the kqueue scheduling, but this will introduce four
1830	 * lock/unlock's for each knote to test.  If we do, continue to use
1831	 * SLIST_FOREACH, SLIST_FOREACH_SAFE is not safe in our case, it is
1832	 * only safe if you want to remove the current item, which we are
1833	 * not doing.
1834	 */
1835	SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
1836		kq = kn->kn_kq;
1837		if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) {
1838			KQ_LOCK(kq);
1839			if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
1840				KQ_UNLOCK(kq);
1841			} else if ((lockflags & KNF_NOKQLOCK) != 0) {
1842				kn->kn_status |= KN_INFLUX;
1843				KQ_UNLOCK(kq);
1844				error = kn->kn_fop->f_event(kn, hint);
1845				KQ_LOCK(kq);
1846				kn->kn_status &= ~KN_INFLUX;
1847				if (error)
1848					KNOTE_ACTIVATE(kn, 1);
1849				KQ_UNLOCK_FLUX(kq);
1850			} else {
1851				kn->kn_status |= KN_HASKQLOCK;
1852				if (kn->kn_fop->f_event(kn, hint))
1853					KNOTE_ACTIVATE(kn, 1);
1854				kn->kn_status &= ~KN_HASKQLOCK;
1855				KQ_UNLOCK(kq);
1856			}
1857		}
1858		kq = NULL;
1859	}
1860	if ((lockflags & KNF_LISTLOCKED) == 0)
1861		list->kl_unlock(list->kl_lockarg);
1862}
1863
1864/*
1865 * add a knote to a knlist
1866 */
1867void
1868knlist_add(struct knlist *knl, struct knote *kn, int islocked)
1869{
1870	KNL_ASSERT_LOCK(knl, islocked);
1871	KQ_NOTOWNED(kn->kn_kq);
1872	KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) ==
1873	    (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED"));
1874	if (!islocked)
1875		knl->kl_lock(knl->kl_lockarg);
1876	SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
1877	if (!islocked)
1878		knl->kl_unlock(knl->kl_lockarg);
1879	KQ_LOCK(kn->kn_kq);
1880	kn->kn_knlist = knl;
1881	kn->kn_status &= ~KN_DETACHED;
1882	KQ_UNLOCK(kn->kn_kq);
1883}
1884
1885static void
1886knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked)
1887{
1888	KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked"));
1889	KNL_ASSERT_LOCK(knl, knlislocked);
1890	mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
1891	if (!kqislocked)
1892		KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX,
1893    ("knlist_remove called w/o knote being KN_INFLUX or already removed"));
1894	if (!knlislocked)
1895		knl->kl_lock(knl->kl_lockarg);
1896	SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
1897	kn->kn_knlist = NULL;
1898	if (!knlislocked)
1899		knl->kl_unlock(knl->kl_lockarg);
1900	if (!kqislocked)
1901		KQ_LOCK(kn->kn_kq);
1902	kn->kn_status |= KN_DETACHED;
1903	if (!kqislocked)
1904		KQ_UNLOCK(kn->kn_kq);
1905}
1906
1907/*
1908 * remove knote from the specified knlist
1909 */
1910void
1911knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
1912{
1913
1914	knlist_remove_kq(knl, kn, islocked, 0);
1915}
1916
1917/*
1918 * remove knote from the specified knlist while in f_event handler.
1919 */
1920void
1921knlist_remove_inevent(struct knlist *knl, struct knote *kn)
1922{
1923
1924	knlist_remove_kq(knl, kn, 1,
1925	    (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK);
1926}
1927
1928int
1929knlist_empty(struct knlist *knl)
1930{
1931
1932	KNL_ASSERT_LOCKED(knl);
1933	return SLIST_EMPTY(&knl->kl_list);
1934}
1935
1936static struct mtx	knlist_lock;
1937MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
1938	MTX_DEF);
1939static void knlist_mtx_lock(void *arg);
1940static void knlist_mtx_unlock(void *arg);
1941
1942static void
1943knlist_mtx_lock(void *arg)
1944{
1945
1946	mtx_lock((struct mtx *)arg);
1947}
1948
1949static void
1950knlist_mtx_unlock(void *arg)
1951{
1952
1953	mtx_unlock((struct mtx *)arg);
1954}
1955
1956static void
1957knlist_mtx_assert_locked(void *arg)
1958{
1959
1960	mtx_assert((struct mtx *)arg, MA_OWNED);
1961}
1962
1963static void
1964knlist_mtx_assert_unlocked(void *arg)
1965{
1966
1967	mtx_assert((struct mtx *)arg, MA_NOTOWNED);
1968}
1969
1970static void
1971knlist_rw_rlock(void *arg)
1972{
1973
1974	rw_rlock((struct rwlock *)arg);
1975}
1976
1977static void
1978knlist_rw_runlock(void *arg)
1979{
1980
1981	rw_runlock((struct rwlock *)arg);
1982}
1983
1984static void
1985knlist_rw_assert_locked(void *arg)
1986{
1987
1988	rw_assert((struct rwlock *)arg, RA_LOCKED);
1989}
1990
1991static void
1992knlist_rw_assert_unlocked(void *arg)
1993{
1994
1995	rw_assert((struct rwlock *)arg, RA_UNLOCKED);
1996}
1997
1998void
1999knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
2000    void (*kl_unlock)(void *),
2001    void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *))
2002{
2003
2004	if (lock == NULL)
2005		knl->kl_lockarg = &knlist_lock;
2006	else
2007		knl->kl_lockarg = lock;
2008
2009	if (kl_lock == NULL)
2010		knl->kl_lock = knlist_mtx_lock;
2011	else
2012		knl->kl_lock = kl_lock;
2013	if (kl_unlock == NULL)
2014		knl->kl_unlock = knlist_mtx_unlock;
2015	else
2016		knl->kl_unlock = kl_unlock;
2017	if (kl_assert_locked == NULL)
2018		knl->kl_assert_locked = knlist_mtx_assert_locked;
2019	else
2020		knl->kl_assert_locked = kl_assert_locked;
2021	if (kl_assert_unlocked == NULL)
2022		knl->kl_assert_unlocked = knlist_mtx_assert_unlocked;
2023	else
2024		knl->kl_assert_unlocked = kl_assert_unlocked;
2025
2026	SLIST_INIT(&knl->kl_list);
2027}
2028
2029void
2030knlist_init_mtx(struct knlist *knl, struct mtx *lock)
2031{
2032
2033	knlist_init(knl, lock, NULL, NULL, NULL, NULL);
2034}
2035
2036void
2037knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock)
2038{
2039
2040	knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock,
2041	    knlist_rw_assert_locked, knlist_rw_assert_unlocked);
2042}
2043
2044void
2045knlist_destroy(struct knlist *knl)
2046{
2047
2048#ifdef INVARIANTS
2049	/*
2050	 * if we run across this error, we need to find the offending
2051	 * driver and have it call knlist_clear or knlist_delete.
2052	 */
2053	if (!SLIST_EMPTY(&knl->kl_list))
2054		printf("WARNING: destroying knlist w/ knotes on it!\n");
2055#endif
2056
2057	knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL;
2058	SLIST_INIT(&knl->kl_list);
2059}
2060
2061/*
2062 * Even if we are locked, we may need to drop the lock to allow any influx
2063 * knotes time to "settle".
2064 */
2065void
2066knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
2067{
2068	struct knote *kn, *kn2;
2069	struct kqueue *kq;
2070
2071	if (islocked)
2072		KNL_ASSERT_LOCKED(knl);
2073	else {
2074		KNL_ASSERT_UNLOCKED(knl);
2075again:		/* need to reacquire lock since we have dropped it */
2076		knl->kl_lock(knl->kl_lockarg);
2077	}
2078
2079	SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
2080		kq = kn->kn_kq;
2081		KQ_LOCK(kq);
2082		if ((kn->kn_status & KN_INFLUX)) {
2083			KQ_UNLOCK(kq);
2084			continue;
2085		}
2086		knlist_remove_kq(knl, kn, 1, 1);
2087		if (killkn) {
2088			kn->kn_status |= KN_INFLUX | KN_DETACHED;
2089			KQ_UNLOCK(kq);
2090			knote_drop(kn, td);
2091		} else {
2092			/* Make sure cleared knotes disappear soon */
2093			kn->kn_flags |= (EV_EOF | EV_ONESHOT);
2094			KQ_UNLOCK(kq);
2095		}
2096		kq = NULL;
2097	}
2098
2099	if (!SLIST_EMPTY(&knl->kl_list)) {
2100		/* there are still KN_INFLUX remaining */
2101		kn = SLIST_FIRST(&knl->kl_list);
2102		kq = kn->kn_kq;
2103		KQ_LOCK(kq);
2104		KASSERT(kn->kn_status & KN_INFLUX,
2105		    ("knote removed w/o list lock"));
2106		knl->kl_unlock(knl->kl_lockarg);
2107		kq->kq_state |= KQ_FLUXWAIT;
2108		msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
2109		kq = NULL;
2110		goto again;
2111	}
2112
2113	if (islocked)
2114		KNL_ASSERT_LOCKED(knl);
2115	else {
2116		knl->kl_unlock(knl->kl_lockarg);
2117		KNL_ASSERT_UNLOCKED(knl);
2118	}
2119}
2120
2121/*
2122 * Remove all knotes referencing a specified fd must be called with FILEDESC
2123 * lock.  This prevents a race where a new fd comes along and occupies the
2124 * entry and we attach a knote to the fd.
2125 */
2126void
2127knote_fdclose(struct thread *td, int fd)
2128{
2129	struct filedesc *fdp = td->td_proc->p_fd;
2130	struct kqueue *kq;
2131	struct knote *kn;
2132	int influx;
2133
2134	FILEDESC_XLOCK_ASSERT(fdp);
2135
2136	/*
2137	 * We shouldn't have to worry about new kevents appearing on fd
2138	 * since filedesc is locked.
2139	 */
2140	TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
2141		KQ_LOCK(kq);
2142
2143again:
2144		influx = 0;
2145		while (kq->kq_knlistsize > fd &&
2146		    (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
2147			if (kn->kn_status & KN_INFLUX) {
2148				/* someone else might be waiting on our knote */
2149				if (influx)
2150					wakeup(kq);
2151				kq->kq_state |= KQ_FLUXWAIT;
2152				msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
2153				goto again;
2154			}
2155			kn->kn_status |= KN_INFLUX;
2156			KQ_UNLOCK(kq);
2157			if (!(kn->kn_status & KN_DETACHED))
2158				kn->kn_fop->f_detach(kn);
2159			knote_drop(kn, td);
2160			influx = 1;
2161			KQ_LOCK(kq);
2162		}
2163		KQ_UNLOCK_FLUX(kq);
2164	}
2165}
2166
2167static int
2168knote_attach(struct knote *kn, struct kqueue *kq)
2169{
2170	struct klist *list;
2171
2172	KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX"));
2173	KQ_OWNED(kq);
2174
2175	if (kn->kn_fop->f_isfd) {
2176		if (kn->kn_id >= kq->kq_knlistsize)
2177			return ENOMEM;
2178		list = &kq->kq_knlist[kn->kn_id];
2179	} else {
2180		if (kq->kq_knhash == NULL)
2181			return ENOMEM;
2182		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2183	}
2184
2185	SLIST_INSERT_HEAD(list, kn, kn_link);
2186
2187	return 0;
2188}
2189
2190/*
2191 * knote must already have been detached using the f_detach method.
2192 * no lock need to be held, it is assumed that the KN_INFLUX flag is set
2193 * to prevent other removal.
2194 */
2195static void
2196knote_drop(struct knote *kn, struct thread *td)
2197{
2198	struct kqueue *kq;
2199	struct klist *list;
2200
2201	kq = kn->kn_kq;
2202
2203	KQ_NOTOWNED(kq);
2204	KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX,
2205	    ("knote_drop called without KN_INFLUX set in kn_status"));
2206
2207	KQ_LOCK(kq);
2208	if (kn->kn_fop->f_isfd)
2209		list = &kq->kq_knlist[kn->kn_id];
2210	else
2211		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2212
2213	if (!SLIST_EMPTY(list))
2214		SLIST_REMOVE(list, kn, knote, kn_link);
2215	if (kn->kn_status & KN_QUEUED)
2216		knote_dequeue(kn);
2217	KQ_UNLOCK_FLUX(kq);
2218
2219	if (kn->kn_fop->f_isfd) {
2220		fdrop(kn->kn_fp, td);
2221		kn->kn_fp = NULL;
2222	}
2223	kqueue_fo_release(kn->kn_kevent.filter);
2224	kn->kn_fop = NULL;
2225	knote_free(kn);
2226}
2227
2228static void
2229knote_enqueue(struct knote *kn)
2230{
2231	struct kqueue *kq = kn->kn_kq;
2232
2233	KQ_OWNED(kn->kn_kq);
2234	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
2235
2236	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2237	kn->kn_status |= KN_QUEUED;
2238	kq->kq_count++;
2239	kqueue_wakeup(kq);
2240}
2241
2242static void
2243knote_dequeue(struct knote *kn)
2244{
2245	struct kqueue *kq = kn->kn_kq;
2246
2247	KQ_OWNED(kn->kn_kq);
2248	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
2249
2250	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2251	kn->kn_status &= ~KN_QUEUED;
2252	kq->kq_count--;
2253}
2254
2255static void
2256knote_init(void)
2257{
2258
2259	knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
2260	    NULL, NULL, UMA_ALIGN_PTR, 0);
2261}
2262SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
2263
2264static struct knote *
2265knote_alloc(int waitok)
2266{
2267	return ((struct knote *)uma_zalloc(knote_zone,
2268	    (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO));
2269}
2270
2271static void
2272knote_free(struct knote *kn)
2273{
2274	if (kn != NULL)
2275		uma_zfree(knote_zone, kn);
2276}
2277
2278/*
2279 * Register the kev w/ the kq specified by fd.
2280 */
2281int
2282kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok)
2283{
2284	struct kqueue *kq;
2285	struct file *fp;
2286	cap_rights_t rights;
2287	int error;
2288
2289	error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp);
2290	if (error != 0)
2291		return (error);
2292	if ((error = kqueue_acquire(fp, &kq)) != 0)
2293		goto noacquire;
2294
2295	error = kqueue_register(kq, kev, td, waitok);
2296
2297	kqueue_release(kq, 0);
2298
2299noacquire:
2300	fdrop(fp, td);
2301
2302	return error;
2303}
2304