1/*	$NetBSD: events.c,v 1.1.1.4 2012/06/09 11:27:25 tron Exp $	*/
2
3/*++
4/* NAME
5/*	events 3
6/* SUMMARY
7/*	event manager
8/* SYNOPSIS
9/*	#include <events.h>
10/*
11/*	time_t	event_time()
12/*
13/*	void	event_loop(delay)
14/*	int	delay;
15/*
16/*	time_t	event_request_timer(callback, context, delay)
17/*	void	(*callback)(int event, char *context);
18/*	char	*context;
19/*	int	delay;
20/*
21/*	int	event_cancel_timer(callback, context)
22/*	void	(*callback)(int event, char *context);
23/*	char	*context;
24/*
25/*	void	event_enable_read(fd, callback, context)
26/*	int	fd;
27/*	void	(*callback)(int event, char *context);
28/*	char	*context;
29/*
30/*	void	event_enable_write(fd, callback, context)
31/*	int	fd;
32/*	void	(*callback)(int event, char *context);
33/*	char	*context;
34/*
35/*	void	event_disable_readwrite(fd)
36/*	int	fd;
37/*
38/*	void	event_drain(time_limit)
39/*	int	time_limit;
40/*
41/*	void	event_fork(void)
42/* DESCRIPTION
43/*	This module delivers I/O and timer events.
44/*	Multiple I/O streams and timers can be monitored simultaneously.
45/*	Events are delivered via callback routines provided by the
46/*	application. When requesting an event, the application can provide
47/*	private context that is passed back when the callback routine is
48/*	executed.
49/*
50/*	event_time() returns a cached value of the current time.
51/*
52/*	event_loop() monitors all I/O channels for which the application has
53/*	expressed interest, and monitors the timer request queue.
54/*	It notifies the application whenever events of interest happen.
55/*	A negative delay value causes the function to pause until something
56/*	happens; a positive delay value causes event_loop() to return when
57/*	the next event happens or when the delay time in seconds is over,
58/*	whatever happens first. A zero delay effectuates a poll.
59/*
60/*	Note: in order to avoid race conditions, event_loop() cannot
61/*	not be called recursively.
62/*
63/*	event_request_timer() causes the specified callback function to
64/*	be called with the specified context argument after \fIdelay\fR
65/*	seconds, or as soon as possible thereafter. The delay should
66/*	not be negative (the manifest EVENT_NULL_DELAY provides for
67/*	convenient zero-delay notification).
68/*	The event argument is equal to EVENT_TIME.
69/*	Only one timer request can be active per (callback, context) pair.
70/*	Calling event_request_timer() with an existing (callback, context)
71/*	pair does not schedule a new event, but updates the time of event
72/*	delivery. The result is the absolute time at which the timer is
73/*	scheduled to go off.
74/*
75/*	event_cancel_timer() cancels the specified (callback, context) request.
76/*	The application is allowed to cancel non-existing requests. The result
77/*	value is the amount of time left before the timer would have gone off,
78/*	or -1 in case of no pending timer.
79/*
80/*	event_enable_read() (event_enable_write()) enables read (write) events
81/*	on the named I/O channel. It is up to the application to assemble
82/*	partial reads or writes.
83/*	An I/O channel cannot handle more than one request at the
84/*	same time. The application is allowed to enable an event that
85/*	is already enabled (same channel, same read or write operation,
86/*	but perhaps a different callback or context). On systems with
87/*	kernel-based event filters this is preferred usage, because
88/*	each disable and enable request would cost a system call.
89/*
90/*	The manifest constants EVENT_NULL_CONTEXT and EVENT_NULL_TYPE
91/*	provide convenient null values.
92/*
93/*	The callback routine has the following arguments:
94/* .IP fd
95/*	The stream on which the event happened.
96/* .IP event
97/*	An indication of the event type:
98/* .RS
99/* .IP EVENT_READ
100/*	read event,
101/* .IP EVENT_WRITE
102/*	write event,
103/* .IP EVENT_XCPT
104/*	exception (actually, any event other than read or write).
105/* .RE
106/* .IP context
107/*	Application context given to event_enable_read() (event_enable_write()).
108/* .PP
109/*	event_disable_readwrite() disables further I/O events on the specified
110/*	I/O channel. The application is allowed to cancel non-existing
111/*	I/O event requests.
112/*
113/*	event_drain() repeatedly calls event_loop() until no more timer
114/*	events or I/O events are pending or until the time limit is reached.
115/*	This routine must not be called from an event_whatever() callback
116/*	routine. Note: this function assumes that no new I/O events
117/*	will be registered.
118/*
119/*	event_fork() must be called by a child process after it is
120/*	created with fork(), to re-initialize event processing.
121/* DIAGNOSTICS
122/*	Panics: interface violations. Fatal errors: out of memory,
123/*	system call failure. Warnings: the number of available
124/*	file descriptors is much less than FD_SETSIZE.
125/* BUGS
126/*	This module is based on event selection. It assumes that the
127/*	event_loop() routine is called frequently. This approach is
128/*	not suitable for applications with compute-bound loops that
129/*	take a significant amount of time.
130/* LICENSE
131/* .ad
132/* .fi
133/*	The Secure Mailer license must be distributed with this software.
134/* AUTHOR(S)
135/*	Wietse Venema
136/*	IBM T.J. Watson Research
137/*	P.O. Box 704
138/*	Yorktown Heights, NY 10598, USA
139/*--*/
140
141/* System libraries. */
142
143#include "sys_defs.h"
144#include <sys/time.h>			/* XXX: 44BSD uses bzero() */
145#include <time.h>
146#include <errno.h>
147#include <unistd.h>
148#include <stddef.h>			/* offsetof() */
149#include <string.h>			/* bzero() prototype for 44BSD */
150#include <limits.h>			/* INT_MAX */
151
152#ifdef USE_SYS_SELECT_H
153#include <sys/select.h>
154#endif
155
156/* Application-specific. */
157
158#include "mymalloc.h"
159#include "msg.h"
160#include "iostuff.h"
161#include "ring.h"
162#include "events.h"
163
164#if !defined(EVENTS_STYLE)
165#error "must define EVENTS_STYLE"
166#endif
167
168 /*
169  * Traditional BSD-style select(2). Works everywhere, but has a built-in
170  * upper bound on the number of file descriptors, and that limit is hard to
171  * change on Linux. Is sometimes emulated with SYSV-style poll(2) which
172  * doesn't have the file descriptor limit, but unfortunately does not help
173  * to improve the performance of servers with lots of connections.
174  */
175#define EVENT_ALLOC_INCR		10
176
177#if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
178typedef fd_set EVENT_MASK;
179
180#define EVENT_MASK_BYTE_COUNT(mask)	sizeof(*(mask))
181#define EVENT_MASK_ZERO(mask)		FD_ZERO(mask)
182#define EVENT_MASK_SET(fd, mask)	FD_SET((fd), (mask))
183#define EVENT_MASK_ISSET(fd, mask)	FD_ISSET((fd), (mask))
184#define EVENT_MASK_CLR(fd, mask)	FD_CLR((fd), (mask))
185#define EVENT_MASK_CMP(m1, m2) memcmp((m1), (m2), EVENT_MASK_BYTE_COUNT(m1))
186#else
187
188 /*
189  * Kernel-based event filters (kqueue, /dev/poll, epoll). We use the
190  * following file descriptor mask structure which is expanded on the fly.
191  */
192typedef struct {
193    char   *data;			/* bit mask */
194    size_t  data_len;			/* data byte count */
195} EVENT_MASK;
196
197 /* Bits per byte, byte in vector, bit offset in byte, bytes per set. */
198#define EVENT_MASK_NBBY		(8)
199#define EVENT_MASK_FD_BYTE(fd, mask) \
200	(((unsigned char *) (mask)->data)[(fd) / EVENT_MASK_NBBY])
201#define EVENT_MASK_FD_BIT(fd)	(1 << ((fd) % EVENT_MASK_NBBY))
202#define EVENT_MASK_BYTES_NEEDED(len) \
203	(((len) + (EVENT_MASK_NBBY -1)) / EVENT_MASK_NBBY)
204#define EVENT_MASK_BYTE_COUNT(mask)	((mask)->data_len)
205
206 /* Memory management. */
207#define EVENT_MASK_ALLOC(mask, bit_len) do { \
208	size_t _byte_len = EVENT_MASK_BYTES_NEEDED(bit_len); \
209	(mask)->data = mymalloc(_byte_len); \
210	memset((mask)->data, 0, _byte_len); \
211	(mask)->data_len = _byte_len; \
212    } while (0)
213#define EVENT_MASK_REALLOC(mask, bit_len) do { \
214	size_t _byte_len = EVENT_MASK_BYTES_NEEDED(bit_len); \
215	size_t _old_len = (mask)->data_len; \
216	(mask)->data = myrealloc((mask)->data, _byte_len); \
217	if (_byte_len > _old_len) \
218	    memset((mask)->data + _old_len, 0, _byte_len - _old_len); \
219	(mask)->data_len = _byte_len; \
220    } while (0)
221#define EVENT_MASK_FREE(mask)	myfree((mask)->data)
222
223 /* Set operations, modeled after FD_ZERO/SET/ISSET/CLR. */
224#define EVENT_MASK_ZERO(mask) \
225	memset((mask)->data, 0, (mask)->data_len)
226#define EVENT_MASK_SET(fd, mask) \
227	(EVENT_MASK_FD_BYTE((fd), (mask)) |= EVENT_MASK_FD_BIT(fd))
228#define EVENT_MASK_ISSET(fd, mask) \
229	(EVENT_MASK_FD_BYTE((fd), (mask)) & EVENT_MASK_FD_BIT(fd))
230#define EVENT_MASK_CLR(fd, mask) \
231	(EVENT_MASK_FD_BYTE((fd), (mask)) &= ~EVENT_MASK_FD_BIT(fd))
232#define EVENT_MASK_CMP(m1, m2) \
233	memcmp((m1)->data, (m2)->data, EVENT_MASK_BYTE_COUNT(m1))
234#endif
235
236 /*
237  * I/O events.
238  */
239typedef struct EVENT_FDTABLE EVENT_FDTABLE;
240
241struct EVENT_FDTABLE {
242    EVENT_NOTIFY_RDWR_FN callback;
243    char   *context;
244};
245static EVENT_MASK event_rmask;		/* enabled read events */
246static EVENT_MASK event_wmask;		/* enabled write events */
247static EVENT_MASK event_xmask;		/* for bad news mostly */
248static int event_fdlimit;		/* per-process open file limit */
249static EVENT_FDTABLE *event_fdtable;	/* one slot per file descriptor */
250static int event_fdslots;		/* number of file descriptor slots */
251static int event_max_fd = -1;		/* highest fd number seen */
252
253 /*
254  * FreeBSD kqueue supports no system call to find out what descriptors are
255  * registered in the kernel-based filter. To implement our own sanity checks
256  * we maintain our own descriptor bitmask.
257  *
258  * FreeBSD kqueue does support application context pointers. Unfortunately,
259  * changing that information would cost a system call, and some of the
260  * competitors don't support application context. To keep the implementation
261  * simple we maintain our own table with call-back information.
262  *
263  * FreeBSD kqueue silently unregisters a descriptor from its filter when the
264  * descriptor is closed, so our information could get out of sync with the
265  * kernel. But that will never happen, because we have to meticulously
266  * unregister a file descriptor before it is closed, to avoid errors on
267  * systems that are built with EVENTS_STYLE == EVENTS_STYLE_SELECT.
268  */
269#if (EVENTS_STYLE == EVENTS_STYLE_KQUEUE)
270#include <sys/event.h>
271
272 /*
273  * Some early FreeBSD implementations don't have the EV_SET macro.
274  */
275#ifndef EV_SET
276#define EV_SET(kp, id, fi, fl, ffl, da, ud) do { \
277        (kp)->ident = (id); \
278        (kp)->filter = (fi); \
279        (kp)->flags = (fl); \
280        (kp)->fflags = (ffl); \
281        (kp)->data = (da); \
282        (kp)->udata = (ud); \
283    } while(0)
284#endif
285
286 /*
287  * Macros to initialize the kernel-based filter; see event_init().
288  */
289static int event_kq;			/* handle to event filter */
290
291#define EVENT_REG_INIT_HANDLE(er, n) do { \
292	er = event_kq = kqueue(); \
293    } while (0)
294#define EVENT_REG_INIT_TEXT	"kqueue"
295
296#define EVENT_REG_FORK_HANDLE(er, n) do { \
297	(void) close(event_kq); \
298	EVENT_REG_INIT_HANDLE(er, (n)); \
299    } while (0)
300
301 /*
302  * Macros to update the kernel-based filter; see event_enable_read(),
303  * event_enable_write() and event_disable_readwrite().
304  */
305#define EVENT_REG_FD_OP(er, fh, ev, op) do { \
306	struct kevent dummy; \
307	EV_SET(&dummy, (fh), (ev), (op), 0, 0, 0); \
308	(er) = kevent(event_kq, &dummy, 1, 0, 0, 0); \
309    } while (0)
310
311#define EVENT_REG_ADD_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EV_ADD)
312#define EVENT_REG_ADD_READ(e, f)   EVENT_REG_ADD_OP((e), (f), EVFILT_READ)
313#define EVENT_REG_ADD_WRITE(e, f)  EVENT_REG_ADD_OP((e), (f), EVFILT_WRITE)
314#define EVENT_REG_ADD_TEXT         "kevent EV_ADD"
315
316#define EVENT_REG_DEL_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EV_DELETE)
317#define EVENT_REG_DEL_READ(e, f)   EVENT_REG_DEL_OP((e), (f), EVFILT_READ)
318#define EVENT_REG_DEL_WRITE(e, f)  EVENT_REG_DEL_OP((e), (f), EVFILT_WRITE)
319#define EVENT_REG_DEL_TEXT         "kevent EV_DELETE"
320
321 /*
322  * Macros to retrieve event buffers from the kernel; see event_loop().
323  */
324typedef struct kevent EVENT_BUFFER;
325
326#define EVENT_BUFFER_READ(event_count, event_buf, buflen, delay) do { \
327	struct timespec ts; \
328	struct timespec *tsp; \
329	if ((delay) < 0) { \
330	    tsp = 0; \
331	} else { \
332	    tsp = &ts; \
333	    ts.tv_nsec = 0; \
334	    ts.tv_sec = (delay); \
335	} \
336	(event_count) = kevent(event_kq, (struct kevent *) 0, 0, (event_buf), \
337			  (buflen), (tsp)); \
338    } while (0)
339#define EVENT_BUFFER_READ_TEXT	"kevent"
340
341 /*
342  * Macros to process event buffers from the kernel; see event_loop().
343  */
344#define EVENT_GET_FD(bp)	((bp)->ident)
345#define EVENT_GET_TYPE(bp)	((bp)->filter)
346#define EVENT_TEST_READ(bp)	(EVENT_GET_TYPE(bp) == EVFILT_READ)
347#define EVENT_TEST_WRITE(bp)	(EVENT_GET_TYPE(bp) == EVFILT_WRITE)
348
349#endif
350
351 /*
352  * Solaris /dev/poll does not support application context, so we have to
353  * maintain our own. This has the benefit of avoiding an expensive system
354  * call just to change a call-back function or argument.
355  *
356  * Solaris /dev/poll does have a way to query if a specific descriptor is
357  * registered. However, we maintain a descriptor mask anyway because a) it
358  * avoids having to make an expensive system call to find out if something
359  * is registered, b) some EVENTS_STYLE_MUMBLE implementations need a
360  * descriptor bitmask anyway and c) we use the bitmask already to implement
361  * sanity checks.
362  */
363#if (EVENTS_STYLE == EVENTS_STYLE_DEVPOLL)
364#include <sys/devpoll.h>
365#include <fcntl.h>
366
367 /*
368  * Macros to initialize the kernel-based filter; see event_init().
369  */
370static int event_pollfd;		/* handle to file descriptor set */
371
372#define EVENT_REG_INIT_HANDLE(er, n) do { \
373	er = event_pollfd = open("/dev/poll", O_RDWR); \
374	if (event_pollfd >= 0) close_on_exec(event_pollfd, CLOSE_ON_EXEC); \
375    } while (0)
376#define EVENT_REG_INIT_TEXT	"open /dev/poll"
377
378#define EVENT_REG_FORK_HANDLE(er, n) do { \
379	(void) close(event_pollfd); \
380	EVENT_REG_INIT_HANDLE(er, (n)); \
381    } while (0)
382
383 /*
384  * Macros to update the kernel-based filter; see event_enable_read(),
385  * event_enable_write() and event_disable_readwrite().
386  */
387#define EVENT_REG_FD_OP(er, fh, ev) do { \
388	struct pollfd dummy; \
389	dummy.fd = (fh); \
390	dummy.events = (ev); \
391	(er) = write(event_pollfd, (char *) &dummy, \
392	    sizeof(dummy)) != sizeof(dummy) ? -1 : 0; \
393    } while (0)
394
395#define EVENT_REG_ADD_READ(e, f)  EVENT_REG_FD_OP((e), (f), POLLIN)
396#define EVENT_REG_ADD_WRITE(e, f) EVENT_REG_FD_OP((e), (f), POLLOUT)
397#define EVENT_REG_ADD_TEXT        "write /dev/poll"
398
399#define EVENT_REG_DEL_BOTH(e, f)  EVENT_REG_FD_OP((e), (f), POLLREMOVE)
400#define EVENT_REG_DEL_TEXT        "write /dev/poll"
401
402 /*
403  * Macros to retrieve event buffers from the kernel; see event_loop().
404  */
405typedef struct pollfd EVENT_BUFFER;
406
407#define EVENT_BUFFER_READ(event_count, event_buf, buflen, delay) do { \
408	struct dvpoll dvpoll; \
409	dvpoll.dp_fds = (event_buf); \
410	dvpoll.dp_nfds = (buflen); \
411	dvpoll.dp_timeout = (delay) < 0 ? -1 : (delay) * 1000; \
412	(event_count) = ioctl(event_pollfd, DP_POLL, &dvpoll); \
413    } while (0)
414#define EVENT_BUFFER_READ_TEXT	"ioctl DP_POLL"
415
416 /*
417  * Macros to process event buffers from the kernel; see event_loop().
418  */
419#define EVENT_GET_FD(bp)	((bp)->fd)
420#define EVENT_GET_TYPE(bp)	((bp)->revents)
421#define EVENT_TEST_READ(bp)	(EVENT_GET_TYPE(bp) & POLLIN)
422#define EVENT_TEST_WRITE(bp)	(EVENT_GET_TYPE(bp) & POLLOUT)
423
424#endif
425
426 /*
427  * Linux epoll supports no system call to find out what descriptors are
428  * registered in the kernel-based filter. To implement our own sanity checks
429  * we maintain our own descriptor bitmask.
430  *
431  * Linux epoll does support application context pointers. Unfortunately,
432  * changing that information would cost a system call, and some of the
433  * competitors don't support application context. To keep the implementation
434  * simple we maintain our own table with call-back information.
435  *
436  * Linux epoll silently unregisters a descriptor from its filter when the
437  * descriptor is closed, so our information could get out of sync with the
438  * kernel. But that will never happen, because we have to meticulously
439  * unregister a file descriptor before it is closed, to avoid errors on
440  * systems that are built with EVENTS_STYLE == EVENTS_STYLE_SELECT.
441  */
442#if (EVENTS_STYLE == EVENTS_STYLE_EPOLL)
443#include <sys/epoll.h>
444
445 /*
446  * Macros to initialize the kernel-based filter; see event_init().
447  */
448static int event_epollfd;		/* epoll handle */
449
450#define EVENT_REG_INIT_HANDLE(er, n) do { \
451	er = event_epollfd = epoll_create(n); \
452	if (event_epollfd >= 0) close_on_exec(event_epollfd, CLOSE_ON_EXEC); \
453    } while (0)
454#define EVENT_REG_INIT_TEXT	"epoll_create"
455
456#define EVENT_REG_FORK_HANDLE(er, n) do { \
457	(void) close(event_epollfd); \
458	EVENT_REG_INIT_HANDLE(er, (n)); \
459    } while (0)
460
461 /*
462  * Macros to update the kernel-based filter; see event_enable_read(),
463  * event_enable_write() and event_disable_readwrite().
464  */
465#define EVENT_REG_FD_OP(er, fh, ev, op) do { \
466	struct epoll_event dummy; \
467	dummy.events = (ev); \
468	dummy.data.fd = (fh); \
469	(er) = epoll_ctl(event_epollfd, (op), (fh), &dummy); \
470    } while (0)
471
472#define EVENT_REG_ADD_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EPOLL_CTL_ADD)
473#define EVENT_REG_ADD_READ(e, f)   EVENT_REG_ADD_OP((e), (f), EPOLLIN)
474#define EVENT_REG_ADD_WRITE(e, f)  EVENT_REG_ADD_OP((e), (f), EPOLLOUT)
475#define EVENT_REG_ADD_TEXT         "epoll_ctl EPOLL_CTL_ADD"
476
477#define EVENT_REG_DEL_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EPOLL_CTL_DEL)
478#define EVENT_REG_DEL_READ(e, f)   EVENT_REG_DEL_OP((e), (f), EPOLLIN)
479#define EVENT_REG_DEL_WRITE(e, f)  EVENT_REG_DEL_OP((e), (f), EPOLLOUT)
480#define EVENT_REG_DEL_TEXT         "epoll_ctl EPOLL_CTL_DEL"
481
482 /*
483  * Macros to retrieve event buffers from the kernel; see event_loop().
484  */
485typedef struct epoll_event EVENT_BUFFER;
486
487#define EVENT_BUFFER_READ(event_count, event_buf, buflen, delay) do { \
488	(event_count) = epoll_wait(event_epollfd, (event_buf), (buflen), \
489				  (delay) < 0 ? -1 : (delay) * 1000); \
490    } while (0)
491#define EVENT_BUFFER_READ_TEXT	"epoll_wait"
492
493 /*
494  * Macros to process event buffers from the kernel; see event_loop().
495  */
496#define EVENT_GET_FD(bp)	((bp)->data.fd)
497#define EVENT_GET_TYPE(bp)	((bp)->events)
498#define EVENT_TEST_READ(bp)	(EVENT_GET_TYPE(bp) & EPOLLIN)
499#define EVENT_TEST_WRITE(bp)	(EVENT_GET_TYPE(bp) & EPOLLOUT)
500
501#endif
502
503 /*
504  * Timer events. Timer requests are kept sorted, in a circular list. We use
505  * the RING abstraction, so we get to use a couple ugly macros.
506  *
507  * When a call-back function adds a timer request, we label the request with
508  * the event_loop() call instance that invoked the call-back. We use this to
509  * prevent zero-delay timer requests from running in a tight loop and
510  * starving I/O events.
511  */
512typedef struct EVENT_TIMER EVENT_TIMER;
513
514struct EVENT_TIMER {
515    time_t  when;			/* when event is wanted */
516    EVENT_NOTIFY_TIME_FN callback;		/* callback function */
517    char   *context;			/* callback context */
518    long    loop_instance;		/* event_loop() call instance */
519    RING    ring;			/* linkage */
520};
521
522static RING event_timer_head;		/* timer queue head */
523static long event_loop_instance;	/* event_loop() call instance */
524
525#define RING_TO_TIMER(r) \
526	((EVENT_TIMER *) ((char *) (r) - offsetof(EVENT_TIMER, ring)))
527
528#define FOREACH_QUEUE_ENTRY(entry, head) \
529	for (entry = ring_succ(head); entry != (head); entry = ring_succ(entry))
530
531#define FIRST_TIMER(head) \
532	(ring_succ(head) != (head) ? RING_TO_TIMER(ring_succ(head)) : 0)
533
534 /*
535  * Other private data structures.
536  */
537static time_t event_present;		/* cached time of day */
538
539#define EVENT_INIT_NEEDED()	(event_present == 0)
540
541/* event_init - set up tables and such */
542
543static void event_init(void)
544{
545    EVENT_FDTABLE *fdp;
546    int     err;
547
548    if (!EVENT_INIT_NEEDED())
549	msg_panic("event_init: repeated call");
550
551    /*
552     * Initialize the file descriptor masks and the call-back table. Where
553     * possible we extend these data structures on the fly. With select(2)
554     * based implementations we can only handle FD_SETSIZE open files.
555     */
556#if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
557    if ((event_fdlimit = open_limit(FD_SETSIZE)) < 0)
558	msg_fatal("unable to determine open file limit");
559#else
560    if ((event_fdlimit = open_limit(INT_MAX)) < 0)
561	msg_fatal("unable to determine open file limit");
562#endif
563    if (event_fdlimit < FD_SETSIZE / 2 && event_fdlimit < 256)
564	msg_warn("could allocate space for only %d open files", event_fdlimit);
565    event_fdslots = EVENT_ALLOC_INCR;
566    event_fdtable = (EVENT_FDTABLE *)
567	mymalloc(sizeof(EVENT_FDTABLE) * event_fdslots);
568    for (fdp = event_fdtable; fdp < event_fdtable + event_fdslots; fdp++) {
569	fdp->callback = 0;
570	fdp->context = 0;
571    }
572
573    /*
574     * Initialize the I/O event request masks.
575     */
576#if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
577    EVENT_MASK_ZERO(&event_rmask);
578    EVENT_MASK_ZERO(&event_wmask);
579    EVENT_MASK_ZERO(&event_xmask);
580#else
581    EVENT_MASK_ALLOC(&event_rmask, event_fdslots);
582    EVENT_MASK_ALLOC(&event_wmask, event_fdslots);
583    EVENT_MASK_ALLOC(&event_xmask, event_fdslots);
584
585    /*
586     * Initialize the kernel-based filter.
587     */
588    EVENT_REG_INIT_HANDLE(err, event_fdslots);
589    if (err < 0)
590	msg_fatal("%s: %m", EVENT_REG_INIT_TEXT);
591#endif
592
593    /*
594     * Initialize timer stuff.
595     */
596    ring_init(&event_timer_head);
597    (void) time(&event_present);
598
599    /*
600     * Avoid an infinite initialization loop.
601     */
602    if (EVENT_INIT_NEEDED())
603	msg_panic("event_init: unable to initialize");
604}
605
606/* event_extend - make room for more descriptor slots */
607
608static void event_extend(int fd)
609{
610    const char *myname = "event_extend";
611    int     old_slots = event_fdslots;
612    int     new_slots = (event_fdslots > fd / 2 ?
613			 2 * old_slots : fd + EVENT_ALLOC_INCR);
614    EVENT_FDTABLE *fdp;
615    int     err;
616
617    if (msg_verbose > 2)
618	msg_info("%s: fd %d", myname, fd);
619    event_fdtable = (EVENT_FDTABLE *)
620	myrealloc((char *) event_fdtable, sizeof(EVENT_FDTABLE) * new_slots);
621    event_fdslots = new_slots;
622    for (fdp = event_fdtable + old_slots;
623	 fdp < event_fdtable + new_slots; fdp++) {
624	fdp->callback = 0;
625	fdp->context = 0;
626    }
627
628    /*
629     * Initialize the I/O event request masks.
630     */
631#if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
632    EVENT_MASK_REALLOC(&event_rmask, new_slots);
633    EVENT_MASK_REALLOC(&event_wmask, new_slots);
634    EVENT_MASK_REALLOC(&event_xmask, new_slots);
635#endif
636#ifdef EVENT_REG_UPD_HANDLE
637    EVENT_REG_UPD_HANDLE(err, new_slots);
638    if (err < 0)
639	msg_fatal("%s: %s: %m", myname, EVENT_REG_UPD_TEXT);
640#endif
641}
642
643/* event_time - look up cached time of day */
644
645time_t  event_time(void)
646{
647    if (EVENT_INIT_NEEDED())
648	event_init();
649
650    return (event_present);
651}
652
653/* event_drain - loop until all pending events are done */
654
655void    event_drain(int time_limit)
656{
657    EVENT_MASK zero_mask;
658    time_t  max_time;
659
660    if (EVENT_INIT_NEEDED())
661	return;
662
663#if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
664    EVENT_MASK_ZERO(&zero_mask);
665#else
666    EVENT_MASK_ALLOC(&zero_mask, event_fdslots);
667#endif
668    (void) time(&event_present);
669    max_time = event_present + time_limit;
670    while (event_present < max_time
671	   && (event_timer_head.pred != &event_timer_head
672	       || EVENT_MASK_CMP(&zero_mask, &event_xmask) != 0)) {
673	event_loop(1);
674#if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
675	if (EVENT_MASK_BYTE_COUNT(&zero_mask)
676	    != EVENT_MASK_BYTES_NEEDED(event_fdslots))
677	    EVENT_MASK_REALLOC(&zero_mask, event_fdslots);
678#endif
679    }
680#if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
681    EVENT_MASK_FREE(&zero_mask);
682#endif
683}
684
685/* event_fork - resume event processing after fork() */
686
687void    event_fork(void)
688{
689#if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
690    EVENT_FDTABLE *fdp;
691    int     err;
692    int     fd;
693
694    /*
695     * No event was ever registered, so there's nothing to be done.
696     */
697    if (EVENT_INIT_NEEDED())
698	return;
699
700    /*
701     * Close the existing filter handle and open a new kernel-based filter.
702     */
703    EVENT_REG_FORK_HANDLE(err, event_fdslots);
704    if (err < 0)
705	msg_fatal("%s: %m", EVENT_REG_INIT_TEXT);
706
707    /*
708     * Populate the new kernel-based filter with events that were registered
709     * in the parent process.
710     */
711    for (fd = 0; fd <= event_max_fd; fd++) {
712	if (EVENT_MASK_ISSET(fd, &event_wmask)) {
713	    EVENT_MASK_CLR(fd, &event_wmask);
714	    fdp = event_fdtable + fd;
715	    event_enable_write(fd, fdp->callback, fdp->context);
716	} else if (EVENT_MASK_ISSET(fd, &event_rmask)) {
717	    EVENT_MASK_CLR(fd, &event_rmask);
718	    fdp = event_fdtable + fd;
719	    event_enable_read(fd, fdp->callback, fdp->context);
720	}
721    }
722#endif
723}
724
725/* event_enable_read - enable read events */
726
727void    event_enable_read(int fd, EVENT_NOTIFY_RDWR_FN callback, char *context)
728{
729    const char *myname = "event_enable_read";
730    EVENT_FDTABLE *fdp;
731    int     err;
732
733    if (EVENT_INIT_NEEDED())
734	event_init();
735
736    /*
737     * Sanity checks.
738     */
739    if (fd < 0 || fd >= event_fdlimit)
740	msg_panic("%s: bad file descriptor: %d", myname, fd);
741
742    if (msg_verbose > 2)
743	msg_info("%s: fd %d", myname, fd);
744
745    if (fd >= event_fdslots)
746	event_extend(fd);
747
748    /*
749     * Disallow mixed (i.e. read and write) requests on the same descriptor.
750     */
751    if (EVENT_MASK_ISSET(fd, &event_wmask))
752	msg_panic("%s: fd %d: read/write I/O request", myname, fd);
753
754    /*
755     * Postfix 2.4 allows multiple event_enable_read() calls on the same
756     * descriptor without requiring event_disable_readwrite() calls between
757     * them. With kernel-based filters (kqueue, /dev/poll, epoll) it's
758     * wasteful to make system calls when we change only application
759     * call-back information. It has a noticeable effect on smtp-source
760     * performance.
761     */
762    if (EVENT_MASK_ISSET(fd, &event_rmask) == 0) {
763	EVENT_MASK_SET(fd, &event_xmask);
764	EVENT_MASK_SET(fd, &event_rmask);
765	if (event_max_fd < fd)
766	    event_max_fd = fd;
767#if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
768	EVENT_REG_ADD_READ(err, fd);
769	if (err < 0)
770	    msg_fatal("%s: %s: %m", myname, EVENT_REG_ADD_TEXT);
771#endif
772    }
773    fdp = event_fdtable + fd;
774    if (fdp->callback != callback || fdp->context != context) {
775	fdp->callback = callback;
776	fdp->context = context;
777    }
778}
779
780/* event_enable_write - enable write events */
781
782void    event_enable_write(int fd, EVENT_NOTIFY_RDWR_FN callback, char *context)
783{
784    const char *myname = "event_enable_write";
785    EVENT_FDTABLE *fdp;
786    int     err;
787
788    if (EVENT_INIT_NEEDED())
789	event_init();
790
791    /*
792     * Sanity checks.
793     */
794    if (fd < 0 || fd >= event_fdlimit)
795	msg_panic("%s: bad file descriptor: %d", myname, fd);
796
797    if (msg_verbose > 2)
798	msg_info("%s: fd %d", myname, fd);
799
800    if (fd >= event_fdslots)
801	event_extend(fd);
802
803    /*
804     * Disallow mixed (i.e. read and write) requests on the same descriptor.
805     */
806    if (EVENT_MASK_ISSET(fd, &event_rmask))
807	msg_panic("%s: fd %d: read/write I/O request", myname, fd);
808
809    /*
810     * Postfix 2.4 allows multiple event_enable_write() calls on the same
811     * descriptor without requiring event_disable_readwrite() calls between
812     * them. With kernel-based filters (kqueue, /dev/poll, epoll) it's
813     * incredibly wasteful to make unregister and register system calls when
814     * we change only application call-back information. It has a noticeable
815     * effect on smtp-source performance.
816     */
817    if (EVENT_MASK_ISSET(fd, &event_wmask) == 0) {
818	EVENT_MASK_SET(fd, &event_xmask);
819	EVENT_MASK_SET(fd, &event_wmask);
820	if (event_max_fd < fd)
821	    event_max_fd = fd;
822#if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
823	EVENT_REG_ADD_WRITE(err, fd);
824	if (err < 0)
825	    msg_fatal("%s: %s: %m", myname, EVENT_REG_ADD_TEXT);
826#endif
827    }
828    fdp = event_fdtable + fd;
829    if (fdp->callback != callback || fdp->context != context) {
830	fdp->callback = callback;
831	fdp->context = context;
832    }
833}
834
835/* event_disable_readwrite - disable request for read or write events */
836
837void    event_disable_readwrite(int fd)
838{
839    const char *myname = "event_disable_readwrite";
840    EVENT_FDTABLE *fdp;
841    int     err;
842
843    if (EVENT_INIT_NEEDED())
844	event_init();
845
846    /*
847     * Sanity checks.
848     */
849    if (fd < 0 || fd >= event_fdlimit)
850	msg_panic("%s: bad file descriptor: %d", myname, fd);
851
852    if (msg_verbose > 2)
853	msg_info("%s: fd %d", myname, fd);
854
855    /*
856     * Don't complain when there is nothing to cancel. The request may have
857     * been canceled from another thread.
858     */
859    if (fd >= event_fdslots)
860	return;
861#if (EVENTS_STYLE != EVENTS_STYLE_SELECT)
862#ifdef EVENT_REG_DEL_BOTH
863    /* XXX Can't seem to disable READ and WRITE events selectively. */
864    if (EVENT_MASK_ISSET(fd, &event_rmask)
865	|| EVENT_MASK_ISSET(fd, &event_wmask)) {
866	EVENT_REG_DEL_BOTH(err, fd);
867	if (err < 0)
868	    msg_fatal("%s: %s: %m", myname, EVENT_REG_DEL_TEXT);
869    }
870#else
871    if (EVENT_MASK_ISSET(fd, &event_rmask)) {
872	EVENT_REG_DEL_READ(err, fd);
873	if (err < 0)
874	    msg_fatal("%s: %s: %m", myname, EVENT_REG_DEL_TEXT);
875    } else if (EVENT_MASK_ISSET(fd, &event_wmask)) {
876	EVENT_REG_DEL_WRITE(err, fd);
877	if (err < 0)
878	    msg_fatal("%s: %s: %m", myname, EVENT_REG_DEL_TEXT);
879    }
880#endif						/* EVENT_REG_DEL_BOTH */
881#endif						/* != EVENTS_STYLE_SELECT */
882    EVENT_MASK_CLR(fd, &event_xmask);
883    EVENT_MASK_CLR(fd, &event_rmask);
884    EVENT_MASK_CLR(fd, &event_wmask);
885    fdp = event_fdtable + fd;
886    fdp->callback = 0;
887    fdp->context = 0;
888}
889
890/* event_request_timer - (re)set timer */
891
892time_t  event_request_timer(EVENT_NOTIFY_TIME_FN callback, char *context, int delay)
893{
894    const char *myname = "event_request_timer";
895    RING   *ring;
896    EVENT_TIMER *timer;
897
898    if (EVENT_INIT_NEEDED())
899	event_init();
900
901    /*
902     * Sanity checks.
903     */
904    if (delay < 0)
905	msg_panic("%s: invalid delay: %d", myname, delay);
906
907    /*
908     * Make sure we schedule this event at the right time.
909     */
910    time(&event_present);
911
912    /*
913     * See if they are resetting an existing timer request. If so, take the
914     * request away from the timer queue so that it can be inserted at the
915     * right place.
916     */
917    FOREACH_QUEUE_ENTRY(ring, &event_timer_head) {
918	timer = RING_TO_TIMER(ring);
919	if (timer->callback == callback && timer->context == context) {
920	    timer->when = event_present + delay;
921	    timer->loop_instance = event_loop_instance;
922	    ring_detach(ring);
923	    if (msg_verbose > 2)
924		msg_info("%s: reset 0x%lx 0x%lx %d", myname,
925			 (long) callback, (long) context, delay);
926	    break;
927	}
928    }
929
930    /*
931     * If not found, schedule a new timer request.
932     */
933    if (ring == &event_timer_head) {
934	timer = (EVENT_TIMER *) mymalloc(sizeof(EVENT_TIMER));
935	timer->when = event_present + delay;
936	timer->callback = callback;
937	timer->context = context;
938	timer->loop_instance = event_loop_instance;
939	if (msg_verbose > 2)
940	    msg_info("%s: set 0x%lx 0x%lx %d", myname,
941		     (long) callback, (long) context, delay);
942    }
943
944    /*
945     * Timer requests are kept sorted to reduce lookup overhead in the event
946     * loop.
947     *
948     * XXX Append the new request after existing requests for the same time
949     * slot. The event_loop() routine depends on this to avoid starving I/O
950     * events when a call-back function schedules a zero-delay timer request.
951     */
952    FOREACH_QUEUE_ENTRY(ring, &event_timer_head) {
953	if (timer->when < RING_TO_TIMER(ring)->when)
954	    break;
955    }
956    ring_prepend(ring, &timer->ring);
957
958    return (timer->when);
959}
960
961/* event_cancel_timer - cancel timer */
962
963int     event_cancel_timer(EVENT_NOTIFY_TIME_FN callback, char *context)
964{
965    const char *myname = "event_cancel_timer";
966    RING   *ring;
967    EVENT_TIMER *timer;
968    int     time_left = -1;
969
970    if (EVENT_INIT_NEEDED())
971	event_init();
972
973    /*
974     * See if they are canceling an existing timer request. Do not complain
975     * when the request is not found. It might have been canceled from some
976     * other thread.
977     */
978    FOREACH_QUEUE_ENTRY(ring, &event_timer_head) {
979	timer = RING_TO_TIMER(ring);
980	if (timer->callback == callback && timer->context == context) {
981	    if ((time_left = timer->when - event_present) < 0)
982		time_left = 0;
983	    ring_detach(ring);
984	    myfree((char *) timer);
985	    break;
986	}
987    }
988    if (msg_verbose > 2)
989	msg_info("%s: 0x%lx 0x%lx %d", myname,
990		 (long) callback, (long) context, time_left);
991    return (time_left);
992}
993
994/* event_loop - wait for the next event */
995
996void    event_loop(int delay)
997{
998    const char *myname = "event_loop";
999    static int nested;
1000
1001#if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
1002    fd_set  rmask;
1003    fd_set  wmask;
1004    fd_set  xmask;
1005    struct timeval tv;
1006    struct timeval *tvp;
1007    int     new_max_fd;
1008
1009#else
1010    EVENT_BUFFER event_buf[100];
1011    EVENT_BUFFER *bp;
1012
1013#endif
1014    int     event_count;
1015    EVENT_TIMER *timer;
1016    int     fd;
1017    EVENT_FDTABLE *fdp;
1018    int     select_delay;
1019
1020    if (EVENT_INIT_NEEDED())
1021	event_init();
1022
1023    /*
1024     * XXX Also print the select() masks?
1025     */
1026    if (msg_verbose > 2) {
1027	RING   *ring;
1028
1029	FOREACH_QUEUE_ENTRY(ring, &event_timer_head) {
1030	    timer = RING_TO_TIMER(ring);
1031	    msg_info("%s: time left %3d for 0x%lx 0x%lx", myname,
1032		     (int) (timer->when - event_present),
1033		     (long) timer->callback, (long) timer->context);
1034	}
1035    }
1036
1037    /*
1038     * Find out when the next timer would go off. Timer requests are sorted.
1039     * If any timer is scheduled, adjust the delay appropriately.
1040     */
1041    if ((timer = FIRST_TIMER(&event_timer_head)) != 0) {
1042	event_present = time((time_t *) 0);
1043	if ((select_delay = timer->when - event_present) < 0) {
1044	    select_delay = 0;
1045	} else if (delay >= 0 && select_delay > delay) {
1046	    select_delay = delay;
1047	}
1048    } else {
1049	select_delay = delay;
1050    }
1051    if (msg_verbose > 2)
1052	msg_info("event_loop: select_delay %d", select_delay);
1053
1054    /*
1055     * Negative delay means: wait until something happens. Zero delay means:
1056     * poll. Positive delay means: wait at most this long.
1057     */
1058#if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
1059    if (select_delay < 0) {
1060	tvp = 0;
1061    } else {
1062	tvp = &tv;
1063	tv.tv_usec = 0;
1064	tv.tv_sec = select_delay;
1065    }
1066
1067    /*
1068     * Pause until the next event happens. When select() has a problem, don't
1069     * go into a tight loop. Allow select() to be interrupted due to the
1070     * arrival of a signal.
1071     */
1072    rmask = event_rmask;
1073    wmask = event_wmask;
1074    xmask = event_xmask;
1075
1076    event_count = select(event_max_fd + 1, &rmask, &wmask, &xmask, tvp);
1077    if (event_count < 0) {
1078	if (errno != EINTR)
1079	    msg_fatal("event_loop: select: %m");
1080	return;
1081    }
1082#else
1083    EVENT_BUFFER_READ(event_count, event_buf,
1084		      sizeof(event_buf) / sizeof(event_buf[0]),
1085		      select_delay);
1086    if (event_count < 0) {
1087	if (errno != EINTR)
1088	    msg_fatal("event_loop: " EVENT_BUFFER_READ_TEXT ": %m");
1089	return;
1090    }
1091#endif
1092
1093    /*
1094     * Before entering the application call-back routines, make sure we
1095     * aren't being called from a call-back routine. Doing so would make us
1096     * vulnerable to all kinds of race conditions.
1097     */
1098    if (nested++ > 0)
1099	msg_panic("event_loop: recursive call");
1100
1101    /*
1102     * Deliver timer events. Allow the application to add/delete timer queue
1103     * requests while it is being called back. Requests are sorted: we keep
1104     * running over the timer request queue from the start, and stop when we
1105     * reach the future or the list end. We also stop when we reach a timer
1106     * request that was added by a call-back that was invoked from this
1107     * event_loop() call instance, for reasons that are explained below.
1108     *
1109     * To avoid dangling pointer problems 1) we must remove a request from the
1110     * timer queue before delivering its event to the application and 2) we
1111     * must look up the next timer request *after* calling the application.
1112     * The latter complicates the handling of zero-delay timer requests that
1113     * are added by event_loop() call-back functions.
1114     *
1115     * XXX When a timer event call-back function adds a new timer request,
1116     * event_request_timer() labels the request with the event_loop() call
1117     * instance that invoked the timer event call-back. We use this instance
1118     * label here to prevent zero-delay timer requests from running in a
1119     * tight loop and starving I/O events. To make this solution work,
1120     * event_request_timer() appends a new request after existing requests
1121     * for the same time slot.
1122     */
1123    event_present = time((time_t *) 0);
1124    event_loop_instance += 1;
1125
1126    while ((timer = FIRST_TIMER(&event_timer_head)) != 0) {
1127	if (timer->when > event_present)
1128	    break;
1129	if (timer->loop_instance == event_loop_instance)
1130	    break;
1131	ring_detach(&timer->ring);		/* first this */
1132	if (msg_verbose > 2)
1133	    msg_info("%s: timer 0x%lx 0x%lx", myname,
1134		     (long) timer->callback, (long) timer->context);
1135	timer->callback(EVENT_TIME, timer->context);	/* then this */
1136	myfree((char *) timer);
1137    }
1138
1139    /*
1140     * Deliver I/O events. Allow the application to cancel event requests
1141     * while it is being called back. To this end, we keep an eye on the
1142     * contents of event_xmask, so that we deliver only events that are still
1143     * wanted. We do not change the event request masks. It is up to the
1144     * application to determine when a read or write is complete.
1145     */
1146#if (EVENTS_STYLE == EVENTS_STYLE_SELECT)
1147    if (event_count > 0) {
1148	for (new_max_fd = 0, fd = 0; fd <= event_max_fd; fd++) {
1149	    if (FD_ISSET(fd, &event_xmask)) {
1150		new_max_fd = fd;
1151		/* In case event_fdtable is updated. */
1152		fdp = event_fdtable + fd;
1153		if (FD_ISSET(fd, &xmask)) {
1154		    if (msg_verbose > 2)
1155			msg_info("%s: exception fd=%d act=0x%lx 0x%lx", myname,
1156			     fd, (long) fdp->callback, (long) fdp->context);
1157		    fdp->callback(EVENT_XCPT, fdp->context);
1158		} else if (FD_ISSET(fd, &wmask)) {
1159		    if (msg_verbose > 2)
1160			msg_info("%s: write fd=%d act=0x%lx 0x%lx", myname,
1161			     fd, (long) fdp->callback, (long) fdp->context);
1162		    fdp->callback(EVENT_WRITE, fdp->context);
1163		} else if (FD_ISSET(fd, &rmask)) {
1164		    if (msg_verbose > 2)
1165			msg_info("%s: read fd=%d act=0x%lx 0x%lx", myname,
1166			     fd, (long) fdp->callback, (long) fdp->context);
1167		    fdp->callback(EVENT_READ, fdp->context);
1168		}
1169	    }
1170	}
1171	event_max_fd = new_max_fd;
1172    }
1173#else
1174    for (bp = event_buf; bp < event_buf + event_count; bp++) {
1175	fd = EVENT_GET_FD(bp);
1176	if (fd < 0 || fd > event_max_fd)
1177	    msg_panic("%s: bad file descriptor: %d", myname, fd);
1178	if (EVENT_MASK_ISSET(fd, &event_xmask)) {
1179	    fdp = event_fdtable + fd;
1180	    if (EVENT_TEST_READ(bp)) {
1181		if (msg_verbose > 2)
1182		    msg_info("%s: read fd=%d act=0x%lx 0x%lx", myname,
1183			     fd, (long) fdp->callback, (long) fdp->context);
1184		fdp->callback(EVENT_READ, fdp->context);
1185	    } else if (EVENT_TEST_WRITE(bp)) {
1186		if (msg_verbose > 2)
1187		    msg_info("%s: write fd=%d act=0x%lx 0x%lx", myname,
1188			     fd, (long) fdp->callback,
1189			     (long) fdp->context);
1190		fdp->callback(EVENT_WRITE, fdp->context);
1191	    } else {
1192		if (msg_verbose > 2)
1193		    msg_info("%s: other fd=%d act=0x%lx 0x%lx", myname,
1194			     fd, (long) fdp->callback, (long) fdp->context);
1195		fdp->callback(EVENT_XCPT, fdp->context);
1196	    }
1197	}
1198    }
1199#endif
1200    nested--;
1201}
1202
1203#ifdef TEST
1204
1205 /*
1206  * Proof-of-concept test program for the event manager. Schedule a series of
1207  * events at one-second intervals and let them happen, while echoing any
1208  * lines read from stdin.
1209  */
1210#include <stdio.h>
1211#include <ctype.h>
1212#include <stdlib.h>
1213
1214/* timer_event - display event */
1215
1216static void timer_event(int unused_event, char *context)
1217{
1218    printf("%ld: %s\n", (long) event_present, context);
1219    fflush(stdout);
1220}
1221
1222/* echo - echo text received on stdin */
1223
1224static void echo(int unused_event, char *unused_context)
1225{
1226    char    buf[BUFSIZ];
1227
1228    if (fgets(buf, sizeof(buf), stdin) == 0)
1229	exit(0);
1230    printf("Result: %s", buf);
1231}
1232
1233/* request - request a bunch of timer events */
1234
1235static void request(int unused_event, char *unused_context)
1236{
1237    event_request_timer(timer_event, "3 first", 3);
1238    event_request_timer(timer_event, "3 second", 3);
1239    event_request_timer(timer_event, "4 first", 4);
1240    event_request_timer(timer_event, "4 second", 4);
1241    event_request_timer(timer_event, "2 first", 2);
1242    event_request_timer(timer_event, "2 second", 2);
1243    event_request_timer(timer_event, "1 first", 1);
1244    event_request_timer(timer_event, "1 second", 1);
1245    event_request_timer(timer_event, "0 first", 0);
1246    event_request_timer(timer_event, "0 second", 0);
1247}
1248
1249int     main(int argc, char **argv)
1250{
1251    if (argv[1])
1252	msg_verbose = atoi(argv[1]);
1253    event_request_timer(request, (char *) 0, 0);
1254    event_enable_read(fileno(stdin), echo, (char *) 0);
1255    event_drain(10);
1256    exit(0);
1257}
1258
1259#endif
1260