1171169Smlaier/*
2171169Smlaier * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
3171169Smlaier * All rights reserved.
4171169Smlaier *
5171169Smlaier * Redistribution and use in source and binary forms, with or without
6171169Smlaier * modification, are permitted provided that the following conditions
7171169Smlaier * are met:
8171169Smlaier * 1. Redistributions of source code must retain the above copyright
9171169Smlaier *    notice, this list of conditions and the following disclaimer.
10171169Smlaier * 2. Redistributions in binary form must reproduce the above copyright
11171169Smlaier *    notice, this list of conditions and the following disclaimer in the
12171169Smlaier *    documentation and/or other materials provided with the distribution.
13171169Smlaier * 3. The name of the author may not be used to endorse or promote products
14171169Smlaier *    derived from this software without specific prior written permission.
15171169Smlaier *
16171169Smlaier * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17171169Smlaier * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18171169Smlaier * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19171169Smlaier * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20171169Smlaier * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21171169Smlaier * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22171169Smlaier * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23171169Smlaier * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24171169Smlaier * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25171169Smlaier * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26171169Smlaier */
27171169Smlaier#ifdef HAVE_CONFIG_H
28171169Smlaier#include "config.h"
29171169Smlaier#endif
30171169Smlaier
31171169Smlaier#ifdef WIN32
32171169Smlaier#define WIN32_LEAN_AND_MEAN
33171169Smlaier#include <windows.h>
34171169Smlaier#undef WIN32_LEAN_AND_MEAN
35171169Smlaier#include "misc.h"
36171169Smlaier#endif
37171169Smlaier#include <sys/types.h>
38171169Smlaier#include <sys/tree.h>
39171169Smlaier#ifdef HAVE_SYS_TIME_H
40171169Smlaier#include <sys/time.h>
41171169Smlaier#else
42171169Smlaier#include <sys/_time.h>
43171169Smlaier#endif
44171169Smlaier#include <sys/queue.h>
45171169Smlaier#include <stdio.h>
46171169Smlaier#include <stdlib.h>
47171169Smlaier#ifndef WIN32
48171169Smlaier#include <unistd.h>
49171169Smlaier#endif
50171169Smlaier#include <errno.h>
51171169Smlaier#include <signal.h>
52171169Smlaier#include <string.h>
53171169Smlaier#include <assert.h>
54171169Smlaier
55171169Smlaier#include "event.h"
56171169Smlaier#include "event-internal.h"
57171169Smlaier#include "log.h"
58171169Smlaier
59171169Smlaier#ifdef HAVE_EVENT_PORTS
60171169Smlaierextern const struct eventop evportops;
61171169Smlaier#endif
62171169Smlaier#ifdef HAVE_SELECT
63171169Smlaierextern const struct eventop selectops;
64171169Smlaier#endif
65171169Smlaier#ifdef HAVE_POLL
66171169Smlaierextern const struct eventop pollops;
67171169Smlaier#endif
68171169Smlaier#ifdef HAVE_RTSIG
69171169Smlaierextern const struct eventop rtsigops;
70171169Smlaier#endif
71171169Smlaier#ifdef HAVE_EPOLL
72171169Smlaierextern const struct eventop epollops;
73171169Smlaier#endif
74171169Smlaier#ifdef HAVE_WORKING_KQUEUE
75171169Smlaierextern const struct eventop kqops;
76171169Smlaier#endif
77171169Smlaier#ifdef HAVE_DEVPOLL
78171169Smlaierextern const struct eventop devpollops;
79171169Smlaier#endif
80171169Smlaier#ifdef WIN32
81171169Smlaierextern const struct eventop win32ops;
82171169Smlaier#endif
83171169Smlaier
84171169Smlaier/* In order of preference */
85171169Smlaierconst struct eventop *eventops[] = {
86171169Smlaier#ifdef HAVE_EVENT_PORTS
87171169Smlaier	&evportops,
88171169Smlaier#endif
89171169Smlaier#ifdef HAVE_WORKING_KQUEUE
90171169Smlaier	&kqops,
91171169Smlaier#endif
92171169Smlaier#ifdef HAVE_EPOLL
93171169Smlaier	&epollops,
94171169Smlaier#endif
95171169Smlaier#ifdef HAVE_DEVPOLL
96171169Smlaier	&devpollops,
97171169Smlaier#endif
98171169Smlaier#ifdef HAVE_RTSIG
99171169Smlaier	&rtsigops,
100171169Smlaier#endif
101171169Smlaier#ifdef HAVE_POLL
102171169Smlaier	&pollops,
103171169Smlaier#endif
104171169Smlaier#ifdef HAVE_SELECT
105171169Smlaier	&selectops,
106171169Smlaier#endif
107171169Smlaier#ifdef WIN32
108171169Smlaier	&win32ops,
109171169Smlaier#endif
110171169Smlaier	NULL
111171169Smlaier};
112171169Smlaier
113171169Smlaier/* Global state */
114171169Smlaierstruct event_list signalqueue;
115171169Smlaier
116171169Smlaierstruct event_base *current_base = NULL;
117171169Smlaier
118171169Smlaier/* Handle signals - This is a deprecated interface */
119171169Smlaierint (*event_sigcb)(void);		/* Signal callback when gotsig is set */
120171169Smlaiervolatile sig_atomic_t event_gotsig;	/* Set in signal handler */
121171169Smlaier
122171169Smlaier/* Prototypes */
123171169Smlaierstatic void	event_queue_insert(struct event_base *, struct event *, int);
124171169Smlaierstatic void	event_queue_remove(struct event_base *, struct event *, int);
125171169Smlaierstatic int	event_haveevents(struct event_base *);
126171169Smlaier
127171169Smlaierstatic void	event_process_active(struct event_base *);
128171169Smlaier
129171169Smlaierstatic int	timeout_next(struct event_base *, struct timeval *);
130171169Smlaierstatic void	timeout_process(struct event_base *);
131171169Smlaierstatic void	timeout_correct(struct event_base *, struct timeval *);
132171169Smlaier
133171169Smlaierstatic int
134171169Smlaiercompare(struct event *a, struct event *b)
135171169Smlaier{
136171169Smlaier	if (timercmp(&a->ev_timeout, &b->ev_timeout, <))
137171169Smlaier		return (-1);
138171169Smlaier	else if (timercmp(&a->ev_timeout, &b->ev_timeout, >))
139171169Smlaier		return (1);
140171169Smlaier	if (a < b)
141171169Smlaier		return (-1);
142171169Smlaier	else if (a > b)
143171169Smlaier		return (1);
144171169Smlaier	return (0);
145171169Smlaier}
146171169Smlaier
147171169Smlaierstatic int
148171169Smlaiergettime(struct timeval *tp)
149171169Smlaier{
150171169Smlaier#ifdef HAVE_CLOCK_GETTIME
151171169Smlaier	struct timespec	ts;
152171169Smlaier
153171169Smlaier#ifdef HAVE_CLOCK_MONOTONIC
154171169Smlaier	if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
155171169Smlaier#else
156171169Smlaier	if (clock_gettime(CLOCK_REALTIME, &ts) == -1)
157171169Smlaier#endif
158171169Smlaier		return (-1);
159171169Smlaier	tp->tv_sec = ts.tv_sec;
160171169Smlaier	tp->tv_usec = ts.tv_nsec / 1000;
161171169Smlaier#else
162171169Smlaier	gettimeofday(tp, NULL);
163171169Smlaier#endif
164171169Smlaier
165171169Smlaier	return (0);
166171169Smlaier}
167171169Smlaier
168171169SmlaierRB_PROTOTYPE(event_tree, event, ev_timeout_node, compare);
169171169Smlaier
170171169SmlaierRB_GENERATE(event_tree, event, ev_timeout_node, compare);
171171169Smlaier
172171169Smlaier
173171169Smlaiervoid *
174171169Smlaierevent_init(void)
175171169Smlaier{
176171169Smlaier	int i;
177171169Smlaier
178171169Smlaier	if ((current_base = calloc(1, sizeof(struct event_base))) == NULL)
179171169Smlaier		event_err(1, "%s: calloc");
180171169Smlaier
181171169Smlaier	event_sigcb = NULL;
182171169Smlaier	event_gotsig = 0;
183171169Smlaier	gettime(&current_base->event_tv);
184171169Smlaier
185171169Smlaier	RB_INIT(&current_base->timetree);
186171169Smlaier	TAILQ_INIT(&current_base->eventqueue);
187171169Smlaier	TAILQ_INIT(&signalqueue);
188171169Smlaier
189171169Smlaier	current_base->evbase = NULL;
190171169Smlaier	for (i = 0; eventops[i] && !current_base->evbase; i++) {
191171169Smlaier		current_base->evsel = eventops[i];
192171169Smlaier
193171169Smlaier		current_base->evbase = current_base->evsel->init();
194171169Smlaier	}
195171169Smlaier
196171169Smlaier	if (current_base->evbase == NULL)
197171169Smlaier		event_errx(1, "%s: no event mechanism available", __func__);
198171169Smlaier
199171169Smlaier	if (getenv("EVENT_SHOW_METHOD"))
200171169Smlaier		event_msgx("libevent using: %s\n",
201171169Smlaier			   current_base->evsel->name);
202171169Smlaier
203171169Smlaier	/* allocate a single active event queue */
204171169Smlaier	event_base_priority_init(current_base, 1);
205171169Smlaier
206171169Smlaier	return (current_base);
207171169Smlaier}
208171169Smlaier
209171169Smlaiervoid
210171169Smlaierevent_base_free(struct event_base *base)
211171169Smlaier{
212171169Smlaier	int i;
213171169Smlaier
214171169Smlaier	if (base == NULL && current_base)
215171169Smlaier		base = current_base;
216171169Smlaier        if (base == current_base)
217171169Smlaier		current_base = NULL;
218171169Smlaier
219171169Smlaier	assert(base);
220171169Smlaier	assert(TAILQ_EMPTY(&base->eventqueue));
221171169Smlaier	for (i=0; i < base->nactivequeues; ++i)
222171169Smlaier		assert(TAILQ_EMPTY(base->activequeues[i]));
223171169Smlaier
224171169Smlaier	assert(RB_EMPTY(&base->timetree));
225171169Smlaier
226171169Smlaier	for (i = 0; i < base->nactivequeues; ++i)
227171169Smlaier		free(base->activequeues[i]);
228171169Smlaier	free(base->activequeues);
229171169Smlaier
230171169Smlaier	if (base->evsel->dealloc != NULL)
231171169Smlaier		base->evsel->dealloc(base->evbase);
232171169Smlaier
233171169Smlaier	free(base);
234171169Smlaier}
235171169Smlaier
236171169Smlaierint
237171169Smlaierevent_priority_init(int npriorities)
238171169Smlaier{
239171169Smlaier  return event_base_priority_init(current_base, npriorities);
240171169Smlaier}
241171169Smlaier
242171169Smlaierint
243171169Smlaierevent_base_priority_init(struct event_base *base, int npriorities)
244171169Smlaier{
245171169Smlaier	int i;
246171169Smlaier
247171169Smlaier	if (base->event_count_active)
248171169Smlaier		return (-1);
249171169Smlaier
250171169Smlaier	if (base->nactivequeues && npriorities != base->nactivequeues) {
251171169Smlaier		for (i = 0; i < base->nactivequeues; ++i) {
252171169Smlaier			free(base->activequeues[i]);
253171169Smlaier		}
254171169Smlaier		free(base->activequeues);
255171169Smlaier	}
256171169Smlaier
257171169Smlaier	/* Allocate our priority queues */
258171169Smlaier	base->nactivequeues = npriorities;
259171169Smlaier	base->activequeues = (struct event_list **)calloc(base->nactivequeues,
260171169Smlaier	    npriorities * sizeof(struct event_list *));
261171169Smlaier	if (base->activequeues == NULL)
262171169Smlaier		event_err(1, "%s: calloc", __func__);
263171169Smlaier
264171169Smlaier	for (i = 0; i < base->nactivequeues; ++i) {
265171169Smlaier		base->activequeues[i] = malloc(sizeof(struct event_list));
266171169Smlaier		if (base->activequeues[i] == NULL)
267171169Smlaier			event_err(1, "%s: malloc", __func__);
268171169Smlaier		TAILQ_INIT(base->activequeues[i]);
269171169Smlaier	}
270171169Smlaier
271171169Smlaier	return (0);
272171169Smlaier}
273171169Smlaier
274171169Smlaierint
275171169Smlaierevent_haveevents(struct event_base *base)
276171169Smlaier{
277171169Smlaier	return (base->event_count > 0);
278171169Smlaier}
279171169Smlaier
280171169Smlaier/*
281171169Smlaier * Active events are stored in priority queues.  Lower priorities are always
282171169Smlaier * process before higher priorities.  Low priority events can starve high
283171169Smlaier * priority ones.
284171169Smlaier */
285171169Smlaier
286171169Smlaierstatic void
287171169Smlaierevent_process_active(struct event_base *base)
288171169Smlaier{
289171169Smlaier	struct event *ev;
290171169Smlaier	struct event_list *activeq = NULL;
291171169Smlaier	int i;
292171169Smlaier	short ncalls;
293171169Smlaier
294171169Smlaier	if (!base->event_count_active)
295171169Smlaier		return;
296171169Smlaier
297171169Smlaier	for (i = 0; i < base->nactivequeues; ++i) {
298171169Smlaier		if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
299171169Smlaier			activeq = base->activequeues[i];
300171169Smlaier			break;
301171169Smlaier		}
302171169Smlaier	}
303171169Smlaier
304171169Smlaier	assert(activeq != NULL);
305171169Smlaier
306171169Smlaier	for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
307171169Smlaier		event_queue_remove(base, ev, EVLIST_ACTIVE);
308171169Smlaier
309171169Smlaier		/* Allows deletes to work */
310171169Smlaier		ncalls = ev->ev_ncalls;
311171169Smlaier		ev->ev_pncalls = &ncalls;
312171169Smlaier		while (ncalls) {
313171169Smlaier			ncalls--;
314171169Smlaier			ev->ev_ncalls = ncalls;
315171169Smlaier			(*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
316171169Smlaier			if (event_gotsig)
317171169Smlaier				return;
318171169Smlaier		}
319171169Smlaier	}
320171169Smlaier}
321171169Smlaier
322171169Smlaier/*
323171169Smlaier * Wait continously for events.  We exit only if no events are left.
324171169Smlaier */
325171169Smlaier
326171169Smlaierint
327171169Smlaierevent_dispatch(void)
328171169Smlaier{
329171169Smlaier	return (event_loop(0));
330171169Smlaier}
331171169Smlaier
332171169Smlaierint
333171169Smlaierevent_base_dispatch(struct event_base *event_base)
334171169Smlaier{
335171169Smlaier  return (event_base_loop(event_base, 0));
336171169Smlaier}
337171169Smlaier
338171169Smlaierstatic void
339171169Smlaierevent_loopexit_cb(int fd, short what, void *arg)
340171169Smlaier{
341171169Smlaier	struct event_base *base = arg;
342171169Smlaier	base->event_gotterm = 1;
343171169Smlaier}
344171169Smlaier
345171169Smlaier/* not thread safe */
346171169Smlaier
347171169Smlaierint
348171169Smlaierevent_loopexit(struct timeval *tv)
349171169Smlaier{
350171169Smlaier	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
351171169Smlaier		    current_base, tv));
352171169Smlaier}
353171169Smlaier
354171169Smlaierint
355171169Smlaierevent_base_loopexit(struct event_base *event_base, struct timeval *tv)
356171169Smlaier{
357171169Smlaier	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
358171169Smlaier		    event_base, tv));
359171169Smlaier}
360171169Smlaier
361171169Smlaier/* not thread safe */
362171169Smlaier
363171169Smlaierint
364171169Smlaierevent_loop(int flags)
365171169Smlaier{
366171169Smlaier	return event_base_loop(current_base, flags);
367171169Smlaier}
368171169Smlaier
369171169Smlaierint
370171169Smlaierevent_base_loop(struct event_base *base, int flags)
371171169Smlaier{
372171169Smlaier	const struct eventop *evsel = base->evsel;
373171169Smlaier	void *evbase = base->evbase;
374171169Smlaier	struct timeval tv;
375171169Smlaier	int res, done;
376171169Smlaier
377171169Smlaier	done = 0;
378171169Smlaier	while (!done) {
379171169Smlaier		/* Calculate the initial events that we are waiting for */
380171169Smlaier		if (evsel->recalc(base, evbase, 0) == -1)
381171169Smlaier			return (-1);
382171169Smlaier
383171169Smlaier		/* Terminate the loop if we have been asked to */
384171169Smlaier		if (base->event_gotterm) {
385171169Smlaier			base->event_gotterm = 0;
386171169Smlaier			break;
387171169Smlaier		}
388171169Smlaier
389171169Smlaier		/* You cannot use this interface for multi-threaded apps */
390171169Smlaier		while (event_gotsig) {
391171169Smlaier			event_gotsig = 0;
392171169Smlaier			if (event_sigcb) {
393171169Smlaier				res = (*event_sigcb)();
394171169Smlaier				if (res == -1) {
395171169Smlaier					errno = EINTR;
396171169Smlaier					return (-1);
397171169Smlaier				}
398171169Smlaier			}
399171169Smlaier		}
400171169Smlaier
401171169Smlaier		/* Check if time is running backwards */
402171169Smlaier		gettime(&tv);
403171169Smlaier		if (timercmp(&tv, &base->event_tv, <)) {
404171169Smlaier			struct timeval off;
405171169Smlaier			event_debug(("%s: time is running backwards, corrected",
406171169Smlaier				    __func__));
407171169Smlaier			timersub(&base->event_tv, &tv, &off);
408171169Smlaier			timeout_correct(base, &off);
409171169Smlaier		}
410171169Smlaier		base->event_tv = tv;
411171169Smlaier
412171169Smlaier		if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK))
413171169Smlaier			timeout_next(base, &tv);
414171169Smlaier		else
415171169Smlaier			timerclear(&tv);
416171169Smlaier
417171169Smlaier		/* If we have no events, we just exit */
418171169Smlaier		if (!event_haveevents(base)) {
419171169Smlaier			event_debug(("%s: no events registered.", __func__));
420171169Smlaier			return (1);
421171169Smlaier		}
422171169Smlaier
423171169Smlaier		res = evsel->dispatch(base, evbase, &tv);
424171169Smlaier
425171169Smlaier		if (res == -1)
426171169Smlaier			return (-1);
427171169Smlaier
428171169Smlaier		timeout_process(base);
429171169Smlaier
430171169Smlaier		if (base->event_count_active) {
431171169Smlaier			event_process_active(base);
432171169Smlaier			if (!base->event_count_active && (flags & EVLOOP_ONCE))
433171169Smlaier				done = 1;
434171169Smlaier		} else if (flags & EVLOOP_NONBLOCK)
435171169Smlaier			done = 1;
436171169Smlaier	}
437171169Smlaier
438171169Smlaier	event_debug(("%s: asked to terminate loop.", __func__));
439171169Smlaier	return (0);
440171169Smlaier}
441171169Smlaier
442171169Smlaier/* Sets up an event for processing once */
443171169Smlaier
444171169Smlaierstruct event_once {
445171169Smlaier	struct event ev;
446171169Smlaier
447171169Smlaier	void (*cb)(int, short, void *);
448171169Smlaier	void *arg;
449171169Smlaier};
450171169Smlaier
451171169Smlaier/* One-time callback, it deletes itself */
452171169Smlaier
453171169Smlaierstatic void
454171169Smlaierevent_once_cb(int fd, short events, void *arg)
455171169Smlaier{
456171169Smlaier	struct event_once *eonce = arg;
457171169Smlaier
458171169Smlaier	(*eonce->cb)(fd, events, eonce->arg);
459171169Smlaier	free(eonce);
460171169Smlaier}
461171169Smlaier
462171169Smlaier/* Schedules an event once */
463171169Smlaier
464171169Smlaierint
465171169Smlaierevent_once(int fd, short events,
466171169Smlaier    void (*callback)(int, short, void *), void *arg, struct timeval *tv)
467171169Smlaier{
468171169Smlaier	struct event_once *eonce;
469171169Smlaier	struct timeval etv;
470171169Smlaier	int res;
471171169Smlaier
472171169Smlaier	/* We cannot support signals that just fire once */
473171169Smlaier	if (events & EV_SIGNAL)
474171169Smlaier		return (-1);
475171169Smlaier
476171169Smlaier	if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
477171169Smlaier		return (-1);
478171169Smlaier
479171169Smlaier	eonce->cb = callback;
480171169Smlaier	eonce->arg = arg;
481171169Smlaier
482171169Smlaier	if (events == EV_TIMEOUT) {
483171169Smlaier		if (tv == NULL) {
484171169Smlaier			timerclear(&etv);
485171169Smlaier			tv = &etv;
486171169Smlaier		}
487171169Smlaier
488171169Smlaier		evtimer_set(&eonce->ev, event_once_cb, eonce);
489171169Smlaier	} else if (events & (EV_READ|EV_WRITE)) {
490171169Smlaier		events &= EV_READ|EV_WRITE;
491171169Smlaier
492171169Smlaier		event_set(&eonce->ev, fd, events, event_once_cb, eonce);
493171169Smlaier	} else {
494171169Smlaier		/* Bad event combination */
495171169Smlaier		free(eonce);
496171169Smlaier		return (-1);
497171169Smlaier	}
498171169Smlaier
499171169Smlaier	res = event_add(&eonce->ev, tv);
500171169Smlaier	if (res != 0) {
501171169Smlaier		free(eonce);
502171169Smlaier		return (res);
503171169Smlaier	}
504171169Smlaier
505171169Smlaier	return (0);
506171169Smlaier}
507171169Smlaier
508171169Smlaiervoid
509171169Smlaierevent_set(struct event *ev, int fd, short events,
510171169Smlaier	  void (*callback)(int, short, void *), void *arg)
511171169Smlaier{
512171169Smlaier	/* Take the current base - caller needs to set the real base later */
513171169Smlaier	ev->ev_base = current_base;
514171169Smlaier
515171169Smlaier	ev->ev_callback = callback;
516171169Smlaier	ev->ev_arg = arg;
517171169Smlaier	ev->ev_fd = fd;
518171169Smlaier	ev->ev_events = events;
519171169Smlaier	ev->ev_flags = EVLIST_INIT;
520171169Smlaier	ev->ev_ncalls = 0;
521171169Smlaier	ev->ev_pncalls = NULL;
522171169Smlaier
523171169Smlaier	/* by default, we put new events into the middle priority */
524171169Smlaier	ev->ev_pri = current_base->nactivequeues/2;
525171169Smlaier}
526171169Smlaier
527171169Smlaierint
528171169Smlaierevent_base_set(struct event_base *base, struct event *ev)
529171169Smlaier{
530171169Smlaier	/* Only innocent events may be assigned to a different base */
531171169Smlaier	if (ev->ev_flags != EVLIST_INIT)
532171169Smlaier		return (-1);
533171169Smlaier
534171169Smlaier	ev->ev_base = base;
535171169Smlaier	ev->ev_pri = base->nactivequeues/2;
536171169Smlaier
537171169Smlaier	return (0);
538171169Smlaier}
539171169Smlaier
540171169Smlaier/*
541171169Smlaier * Set's the priority of an event - if an event is already scheduled
542171169Smlaier * changing the priority is going to fail.
543171169Smlaier */
544171169Smlaier
545171169Smlaierint
546171169Smlaierevent_priority_set(struct event *ev, int pri)
547171169Smlaier{
548171169Smlaier	if (ev->ev_flags & EVLIST_ACTIVE)
549171169Smlaier		return (-1);
550171169Smlaier	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
551171169Smlaier		return (-1);
552171169Smlaier
553171169Smlaier	ev->ev_pri = pri;
554171169Smlaier
555171169Smlaier	return (0);
556171169Smlaier}
557171169Smlaier
558171169Smlaier/*
559171169Smlaier * Checks if a specific event is pending or scheduled.
560171169Smlaier */
561171169Smlaier
562171169Smlaierint
563171169Smlaierevent_pending(struct event *ev, short event, struct timeval *tv)
564171169Smlaier{
565171169Smlaier	struct timeval	now, res;
566171169Smlaier	int flags = 0;
567171169Smlaier
568171169Smlaier	if (ev->ev_flags & EVLIST_INSERTED)
569171169Smlaier		flags |= (ev->ev_events & (EV_READ|EV_WRITE));
570171169Smlaier	if (ev->ev_flags & EVLIST_ACTIVE)
571171169Smlaier		flags |= ev->ev_res;
572171169Smlaier	if (ev->ev_flags & EVLIST_TIMEOUT)
573171169Smlaier		flags |= EV_TIMEOUT;
574171169Smlaier	if (ev->ev_flags & EVLIST_SIGNAL)
575171169Smlaier		flags |= EV_SIGNAL;
576171169Smlaier
577171169Smlaier	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
578171169Smlaier
579171169Smlaier	/* See if there is a timeout that we should report */
580171169Smlaier	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
581171169Smlaier		gettime(&now);
582171169Smlaier		timersub(&ev->ev_timeout, &now, &res);
583171169Smlaier		/* correctly remap to real time */
584171169Smlaier		gettimeofday(&now, NULL);
585171169Smlaier		timeradd(&now, &res, tv);
586171169Smlaier	}
587171169Smlaier
588171169Smlaier	return (flags & event);
589171169Smlaier}
590171169Smlaier
591171169Smlaierint
592171169Smlaierevent_add(struct event *ev, struct timeval *tv)
593171169Smlaier{
594171169Smlaier	struct event_base *base = ev->ev_base;
595171169Smlaier	const struct eventop *evsel = base->evsel;
596171169Smlaier	void *evbase = base->evbase;
597171169Smlaier
598171169Smlaier	event_debug((
599171169Smlaier		 "event_add: event: %p, %s%s%scall %p",
600171169Smlaier		 ev,
601171169Smlaier		 ev->ev_events & EV_READ ? "EV_READ " : " ",
602171169Smlaier		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
603171169Smlaier		 tv ? "EV_TIMEOUT " : " ",
604171169Smlaier		 ev->ev_callback));
605171169Smlaier
606171169Smlaier	assert(!(ev->ev_flags & ~EVLIST_ALL));
607171169Smlaier
608171169Smlaier	if (tv != NULL) {
609171169Smlaier		struct timeval now;
610171169Smlaier
611171169Smlaier		if (ev->ev_flags & EVLIST_TIMEOUT)
612171169Smlaier			event_queue_remove(base, ev, EVLIST_TIMEOUT);
613171169Smlaier
614171169Smlaier		/* Check if it is active due to a timeout.  Rescheduling
615171169Smlaier		 * this timeout before the callback can be executed
616171169Smlaier		 * removes it from the active list. */
617171169Smlaier		if ((ev->ev_flags & EVLIST_ACTIVE) &&
618171169Smlaier		    (ev->ev_res & EV_TIMEOUT)) {
619171169Smlaier			/* See if we are just active executing this
620171169Smlaier			 * event in a loop
621171169Smlaier			 */
622171169Smlaier			if (ev->ev_ncalls && ev->ev_pncalls) {
623171169Smlaier				/* Abort loop */
624171169Smlaier				*ev->ev_pncalls = 0;
625171169Smlaier			}
626171169Smlaier
627171169Smlaier			event_queue_remove(base, ev, EVLIST_ACTIVE);
628171169Smlaier		}
629171169Smlaier
630171169Smlaier		gettime(&now);
631171169Smlaier		timeradd(&now, tv, &ev->ev_timeout);
632171169Smlaier
633171169Smlaier		event_debug((
634171169Smlaier			 "event_add: timeout in %d seconds, call %p",
635171169Smlaier			 tv->tv_sec, ev->ev_callback));
636171169Smlaier
637171169Smlaier		event_queue_insert(base, ev, EVLIST_TIMEOUT);
638171169Smlaier	}
639171169Smlaier
640171169Smlaier	if ((ev->ev_events & (EV_READ|EV_WRITE)) &&
641171169Smlaier	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
642171169Smlaier		event_queue_insert(base, ev, EVLIST_INSERTED);
643171169Smlaier
644171169Smlaier		return (evsel->add(evbase, ev));
645171169Smlaier	} else if ((ev->ev_events & EV_SIGNAL) &&
646171169Smlaier	    !(ev->ev_flags & EVLIST_SIGNAL)) {
647171169Smlaier		event_queue_insert(base, ev, EVLIST_SIGNAL);
648171169Smlaier
649171169Smlaier		return (evsel->add(evbase, ev));
650171169Smlaier	}
651171169Smlaier
652171169Smlaier	return (0);
653171169Smlaier}
654171169Smlaier
655171169Smlaierint
656171169Smlaierevent_del(struct event *ev)
657171169Smlaier{
658171169Smlaier	struct event_base *base;
659171169Smlaier	const struct eventop *evsel;
660171169Smlaier	void *evbase;
661171169Smlaier
662171169Smlaier	event_debug(("event_del: %p, callback %p",
663171169Smlaier		 ev, ev->ev_callback));
664171169Smlaier
665171169Smlaier	/* An event without a base has not been added */
666171169Smlaier	if (ev->ev_base == NULL)
667171169Smlaier		return (-1);
668171169Smlaier
669171169Smlaier	base = ev->ev_base;
670171169Smlaier	evsel = base->evsel;
671171169Smlaier	evbase = base->evbase;
672171169Smlaier
673171169Smlaier	assert(!(ev->ev_flags & ~EVLIST_ALL));
674171169Smlaier
675171169Smlaier	/* See if we are just active executing this event in a loop */
676171169Smlaier	if (ev->ev_ncalls && ev->ev_pncalls) {
677171169Smlaier		/* Abort loop */
678171169Smlaier		*ev->ev_pncalls = 0;
679171169Smlaier	}
680171169Smlaier
681171169Smlaier	if (ev->ev_flags & EVLIST_TIMEOUT)
682171169Smlaier		event_queue_remove(base, ev, EVLIST_TIMEOUT);
683171169Smlaier
684171169Smlaier	if (ev->ev_flags & EVLIST_ACTIVE)
685171169Smlaier		event_queue_remove(base, ev, EVLIST_ACTIVE);
686171169Smlaier
687171169Smlaier	if (ev->ev_flags & EVLIST_INSERTED) {
688171169Smlaier		event_queue_remove(base, ev, EVLIST_INSERTED);
689171169Smlaier		return (evsel->del(evbase, ev));
690171169Smlaier	} else if (ev->ev_flags & EVLIST_SIGNAL) {
691171169Smlaier		event_queue_remove(base, ev, EVLIST_SIGNAL);
692171169Smlaier		return (evsel->del(evbase, ev));
693171169Smlaier	}
694171169Smlaier
695171169Smlaier	return (0);
696171169Smlaier}
697171169Smlaier
698171169Smlaiervoid
699171169Smlaierevent_active(struct event *ev, int res, short ncalls)
700171169Smlaier{
701171169Smlaier	/* We get different kinds of events, add them together */
702171169Smlaier	if (ev->ev_flags & EVLIST_ACTIVE) {
703171169Smlaier		ev->ev_res |= res;
704171169Smlaier		return;
705171169Smlaier	}
706171169Smlaier
707171169Smlaier	ev->ev_res = res;
708171169Smlaier	ev->ev_ncalls = ncalls;
709171169Smlaier	ev->ev_pncalls = NULL;
710171169Smlaier	event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
711171169Smlaier}
712171169Smlaier
713171169Smlaierint
714171169Smlaiertimeout_next(struct event_base *base, struct timeval *tv)
715171169Smlaier{
716171169Smlaier	struct timeval dflt = TIMEOUT_DEFAULT;
717171169Smlaier
718171169Smlaier	struct timeval now;
719171169Smlaier	struct event *ev;
720171169Smlaier
721171169Smlaier	if ((ev = RB_MIN(event_tree, &base->timetree)) == NULL) {
722171169Smlaier		*tv = dflt;
723171169Smlaier		return (0);
724171169Smlaier	}
725171169Smlaier
726171169Smlaier	if (gettime(&now) == -1)
727171169Smlaier		return (-1);
728171169Smlaier
729171169Smlaier	if (timercmp(&ev->ev_timeout, &now, <=)) {
730171169Smlaier		timerclear(tv);
731171169Smlaier		return (0);
732171169Smlaier	}
733171169Smlaier
734171169Smlaier	timersub(&ev->ev_timeout, &now, tv);
735171169Smlaier
736171169Smlaier	assert(tv->tv_sec >= 0);
737171169Smlaier	assert(tv->tv_usec >= 0);
738171169Smlaier
739171169Smlaier	event_debug(("timeout_next: in %d seconds", tv->tv_sec));
740171169Smlaier	return (0);
741171169Smlaier}
742171169Smlaier
743171169Smlaierstatic void
744171169Smlaiertimeout_correct(struct event_base *base, struct timeval *off)
745171169Smlaier{
746171169Smlaier	struct event *ev;
747171169Smlaier
748171169Smlaier	/*
749171169Smlaier	 * We can modify the key element of the node without destroying
750171169Smlaier	 * the key, beause we apply it to all in the right order.
751171169Smlaier	 */
752171169Smlaier	RB_FOREACH(ev, event_tree, &base->timetree)
753171169Smlaier		timersub(&ev->ev_timeout, off, &ev->ev_timeout);
754171169Smlaier}
755171169Smlaier
756171169Smlaiervoid
757171169Smlaiertimeout_process(struct event_base *base)
758171169Smlaier{
759171169Smlaier	struct timeval now;
760171169Smlaier	struct event *ev, *next;
761171169Smlaier
762171169Smlaier	gettime(&now);
763171169Smlaier
764171169Smlaier	for (ev = RB_MIN(event_tree, &base->timetree); ev; ev = next) {
765171169Smlaier		if (timercmp(&ev->ev_timeout, &now, >))
766171169Smlaier			break;
767171169Smlaier		next = RB_NEXT(event_tree, &base->timetree, ev);
768171169Smlaier
769171169Smlaier		event_queue_remove(base, ev, EVLIST_TIMEOUT);
770171169Smlaier
771171169Smlaier		/* delete this event from the I/O queues */
772171169Smlaier		event_del(ev);
773171169Smlaier
774171169Smlaier		event_debug(("timeout_process: call %p",
775171169Smlaier			 ev->ev_callback));
776171169Smlaier		event_active(ev, EV_TIMEOUT, 1);
777171169Smlaier	}
778171169Smlaier}
779171169Smlaier
780171169Smlaiervoid
781171169Smlaierevent_queue_remove(struct event_base *base, struct event *ev, int queue)
782171169Smlaier{
783171169Smlaier	int docount = 1;
784171169Smlaier
785171169Smlaier	if (!(ev->ev_flags & queue))
786171169Smlaier		event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
787171169Smlaier			   ev, ev->ev_fd, queue);
788171169Smlaier
789171169Smlaier	if (ev->ev_flags & EVLIST_INTERNAL)
790171169Smlaier		docount = 0;
791171169Smlaier
792171169Smlaier	if (docount)
793171169Smlaier		base->event_count--;
794171169Smlaier
795171169Smlaier	ev->ev_flags &= ~queue;
796171169Smlaier	switch (queue) {
797171169Smlaier	case EVLIST_ACTIVE:
798171169Smlaier		if (docount)
799171169Smlaier			base->event_count_active--;
800171169Smlaier		TAILQ_REMOVE(base->activequeues[ev->ev_pri],
801171169Smlaier		    ev, ev_active_next);
802171169Smlaier		break;
803171169Smlaier	case EVLIST_SIGNAL:
804171169Smlaier		TAILQ_REMOVE(&signalqueue, ev, ev_signal_next);
805171169Smlaier		break;
806171169Smlaier	case EVLIST_TIMEOUT:
807171169Smlaier		RB_REMOVE(event_tree, &base->timetree, ev);
808171169Smlaier		break;
809171169Smlaier	case EVLIST_INSERTED:
810171169Smlaier		TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
811171169Smlaier		break;
812171169Smlaier	default:
813171169Smlaier		event_errx(1, "%s: unknown queue %x", __func__, queue);
814171169Smlaier	}
815171169Smlaier}
816171169Smlaier
817171169Smlaiervoid
818171169Smlaierevent_queue_insert(struct event_base *base, struct event *ev, int queue)
819171169Smlaier{
820171169Smlaier	int docount = 1;
821171169Smlaier
822171169Smlaier	if (ev->ev_flags & queue) {
823171169Smlaier		/* Double insertion is possible for active events */
824171169Smlaier		if (queue & EVLIST_ACTIVE)
825171169Smlaier			return;
826171169Smlaier
827171169Smlaier		event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
828171169Smlaier			   ev, ev->ev_fd, queue);
829171169Smlaier	}
830171169Smlaier
831171169Smlaier	if (ev->ev_flags & EVLIST_INTERNAL)
832171169Smlaier		docount = 0;
833171169Smlaier
834171169Smlaier	if (docount)
835171169Smlaier		base->event_count++;
836171169Smlaier
837171169Smlaier	ev->ev_flags |= queue;
838171169Smlaier	switch (queue) {
839171169Smlaier	case EVLIST_ACTIVE:
840171169Smlaier		if (docount)
841171169Smlaier			base->event_count_active++;
842171169Smlaier		TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
843171169Smlaier		    ev,ev_active_next);
844171169Smlaier		break;
845171169Smlaier	case EVLIST_SIGNAL:
846171169Smlaier		TAILQ_INSERT_TAIL(&signalqueue, ev, ev_signal_next);
847171169Smlaier		break;
848171169Smlaier	case EVLIST_TIMEOUT: {
849171169Smlaier		struct event *tmp = RB_INSERT(event_tree, &base->timetree, ev);
850171169Smlaier		assert(tmp == NULL);
851171169Smlaier		break;
852171169Smlaier	}
853171169Smlaier	case EVLIST_INSERTED:
854171169Smlaier		TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
855171169Smlaier		break;
856171169Smlaier	default:
857171169Smlaier		event_errx(1, "%s: unknown queue %x", __func__, queue);
858171169Smlaier	}
859171169Smlaier}
860171169Smlaier
861171169Smlaier/* Functions for debugging */
862171169Smlaier
863171169Smlaierconst char *
864171169Smlaierevent_get_version(void)
865171169Smlaier{
866171169Smlaier	return (VERSION);
867171169Smlaier}
868171169Smlaier
869171169Smlaier/*
870171169Smlaier * No thread-safe interface needed - the information should be the same
871171169Smlaier * for all threads.
872171169Smlaier */
873171169Smlaier
874171169Smlaierconst char *
875171169Smlaierevent_get_method(void)
876171169Smlaier{
877171169Smlaier	return (current_base->evsel->name);
878171169Smlaier}
879