1 /* SPDX-License-Identifier: BSD-2-Clause */
3 * eloop - portable event based main loop.
4 * Copyright (c) 2006-2019 Roy Marples <roy@marples.name>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #if (defined(__unix__) || defined(unix)) && !defined(USG)
30 #include <sys/param.h>
43 /* config.h should define HAVE_KQUEUE, HAVE_EPOLL, etc. */
44 #if defined(HAVE_CONFIG_H) && !defined(NO_CONFIG_H)
48 /* Attempt to autodetect kqueue or epoll.
49 * Failing that, fall back to pselect. */
50 #if !defined(HAVE_KQUEUE) && !defined(HAVE_EPOLL) && !defined(HAVE_PSELECT) && \
51 !defined(HAVE_POLLTS) && !defined(HAVE_PPOLL)
53 /* Assume BSD has a working sys/queue.h and kqueue(2) interface. */
54 #define HAVE_SYS_QUEUE_H
57 #elif defined(__linux__) || defined(__sun)
58 /* Assume Linux and Solaris have a working epoll(3) interface. */
62 /* pselect(2) is a POSIX standard. */
68 /* pollts and ppoll require poll.
69 * pselect is wrapped in a pollts/ppoll style interface
70 * and as such require poll as well. */
71 #if defined(HAVE_PSELECT) || defined(HAVE_POLLTS) || defined(HAVE_PPOLL)
75 #if defined(HAVE_POLLTS)
77 #elif defined(HAVE_PPOLL)
80 #define POLLTS eloop_pollts
81 #define ELOOP_NEED_POLLTS
88 #define UNUSED(a) (void)((a))
92 #define __unused __attribute__((__unused__))
99 #define MSEC_PER_SEC 1000L
100 #define NSEC_PER_MSEC 1000000L
103 #if defined(HAVE_KQUEUE)
104 #include <sys/event.h>
107 /* udata is void * except on NetBSD.
108 * lengths are int except on NetBSD. */
109 #define UPTR(x) ((intptr_t)(x))
113 #define LENC(x) ((int)(x))
115 #elif defined(HAVE_EPOLL)
116 #include <sys/epoll.h>
117 #elif defined(HAVE_POLL)
118 #if defined(HAVE_PSELECT)
119 #include <sys/select.h>
125 #if defined(HAVE_KQUEUE)
126 #pragma message("Compiling eloop with kqueue(2) support.")
127 #elif defined(HAVE_EPOLL)
128 #pragma message("Compiling eloop with epoll(7) support.")
129 #elif defined(HAVE_PSELECT)
130 #pragma message("Compiling eloop with pselect(2) support.")
131 #elif defined(HAVE_PPOLL)
132 #pragma message("Compiling eloop with ppoll(2) support.")
133 #elif defined(HAVE_POLLTS)
134 #pragma message("Compiling eloop with pollts(2) support.")
136 #error Unknown select mechanism for eloop
140 /* Our structures require TAILQ macros, which really every libc should
141 * ship as they are useful beyond belief.
142 * Sadly some libc's don't have sys/queue.h and some that do don't have
143 * the TAILQ_FOREACH macro. For those that don't, the application using
144 * this implementation will need to ship a working queue.h somewhere.
145 * If we don't have sys/queue.h found in config.h, then
146 * allow QUEUE_H to override loading queue.h in the current directory. */
147 #ifndef TAILQ_FOREACH
148 #ifdef HAVE_SYS_QUEUE_H
149 #include <sys/queue.h>
150 #elif defined(QUEUE_H)
151 #define __QUEUE_HEADER(x) #x
152 #define _QUEUE_HEADER(x) __QUEUE_HEADER(x)
153 #include _QUEUE_HEADER(QUEUE_H)
160 TAILQ_ENTRY(eloop_event) next;
162 void (*read_cb)(void *);
164 void (*write_cb)(void *);
168 struct eloop_timeout {
169 TAILQ_ENTRY(eloop_timeout) next;
170 struct timespec when;
171 void (*callback)(void *);
178 TAILQ_HEAD (event_head, eloop_event) events;
179 struct event_head free_events;
181 struct eloop_event **event_fds;
183 TAILQ_HEAD (timeout_head, eloop_timeout) timeouts;
184 struct timeout_head free_timeouts;
186 void (*timeout0)(void *);
190 void (*signal_cb)(int, void *);
193 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
195 #elif defined(HAVE_POLL)
204 #ifdef HAVE_REALLOCARRAY
205 #define eloop_realloca reallocarray
207 /* Handy routing to check for potential overflow.
208 * reallocarray(3) and reallocarr(3) are not portable. */
209 #define SQRT_SIZE_MAX (((size_t)1) << (sizeof(size_t) * CHAR_BIT / 2))
211 eloop_realloca(void *ptr, size_t n, size_t size)
214 if ((n | size) >= SQRT_SIZE_MAX && n > SIZE_MAX / size) {
218 return realloc(ptr, n * size);
224 eloop_event_setup_fds(struct eloop *eloop)
226 struct eloop_event *e;
230 TAILQ_FOREACH(e, &eloop->events, next) {
231 eloop->fds[i].fd = e->fd;
232 eloop->fds[i].events = 0;
234 eloop->fds[i].events |= POLLIN;
236 eloop->fds[i].events |= POLLOUT;
237 eloop->fds[i].revents = 0;
242 #ifdef ELOOP_NEED_POLLTS
243 /* Wrapper around pselect, to imitate the NetBSD pollts call. */
245 eloop_pollts(struct pollfd * fds, nfds_t nfds,
246 const struct timespec *ts, const sigset_t *sigmask)
248 fd_set read_fds, write_fds;
255 for (n = 0; n < nfds; n++) {
256 if (fds[n].events & POLLIN) {
257 FD_SET(fds[n].fd, &read_fds);
258 if (fds[n].fd > maxfd)
261 if (fds[n].events & POLLOUT) {
262 FD_SET(fds[n].fd, &write_fds);
263 if (fds[n].fd > maxfd)
268 r = pselect(maxfd + 1, &read_fds, &write_fds, NULL, ts, sigmask);
270 for (n = 0; n < nfds; n++) {
272 FD_ISSET(fds[n].fd, &read_fds) ? POLLIN : 0;
273 if (FD_ISSET(fds[n].fd, &write_fds))
274 fds[n].revents |= POLLOUT;
281 #else /* !HAVE_POLL */
282 #define eloop_event_setup_fds(a) {}
283 #endif /* HAVE_POLL */
286 eloop_event_add_rw(struct eloop *eloop, int fd,
287 void (*read_cb)(void *), void *read_cb_arg,
288 void (*write_cb)(void *), void *write_cb_arg)
290 struct eloop_event *e;
291 #if defined(HAVE_KQUEUE)
293 #elif defined(HAVE_EPOLL)
294 struct epoll_event epe;
295 #elif defined(HAVE_POLL)
299 assert(eloop != NULL);
300 assert(read_cb != NULL || write_cb != NULL);
307 memset(&epe, 0, sizeof(epe));
309 epe.events = EPOLLIN;
311 epe.events |= EPOLLOUT;
314 /* We should only have one callback monitoring the fd. */
315 if (fd <= eloop->events_maxfd) {
316 if ((e = eloop->event_fds[fd]) != NULL) {
319 #if defined(HAVE_KQUEUE)
320 EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ, EV_ADD,
323 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
324 EV_ADD, 0, 0, UPTR(e));
325 else if (e->write_cb)
326 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
327 EV_DELETE, 0, 0, UPTR(e));
328 error = kevent(eloop->poll_fd, ke,
329 e->write_cb || write_cb ? 2 : 1, NULL, 0, NULL);
330 #elif defined(HAVE_EPOLL)
332 error = epoll_ctl(eloop->poll_fd, EPOLL_CTL_MOD,
338 e->read_cb = read_cb;
339 e->read_cb_arg = read_cb_arg;
342 e->write_cb = write_cb;
343 e->write_cb_arg = write_cb_arg;
345 eloop_event_setup_fds(eloop);
349 struct eloop_event **new_fds;
352 /* Reserve ourself and 4 more. */
354 new_fds = eloop_realloca(eloop->event_fds,
355 ((size_t)maxfd + 1), sizeof(*eloop->event_fds));
359 /* set new entries NULL as the fd's may not be contiguous. */
360 for (i = maxfd; i > eloop->events_maxfd; i--)
363 eloop->event_fds = new_fds;
364 eloop->events_maxfd = maxfd;
367 /* Allocate a new event if no free ones already allocated. */
368 if ((e = TAILQ_FIRST(&eloop->free_events))) {
369 TAILQ_REMOVE(&eloop->free_events, e, next);
371 e = malloc(sizeof(*e));
376 /* Ensure we can actually listen to it. */
379 if (eloop->events_len > eloop->fds_len) {
380 nfds = eloop_realloca(eloop->fds,
381 (eloop->fds_len + 5), sizeof(*eloop->fds));
389 /* Now populate the structure and add it to the list. */
391 e->read_cb = read_cb;
392 e->read_cb_arg = read_cb_arg;
393 e->write_cb = write_cb;
394 e->write_cb_arg = write_cb_arg;
396 #if defined(HAVE_KQUEUE)
398 EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ,
399 EV_ADD, 0, 0, UPTR(e));
400 if (write_cb != NULL)
401 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
402 EV_ADD, 0, 0, UPTR(e));
403 if (kevent(eloop->poll_fd, ke, write_cb ? 2 : 1, NULL, 0, NULL) == -1)
405 #elif defined(HAVE_EPOLL)
407 if (epoll_ctl(eloop->poll_fd, EPOLL_CTL_ADD, fd, &epe) == -1)
411 TAILQ_INSERT_HEAD(&eloop->events, e, next);
412 eloop->event_fds[e->fd] = e;
413 eloop_event_setup_fds(eloop);
419 TAILQ_INSERT_TAIL(&eloop->free_events, e, next);
425 eloop_event_add(struct eloop *eloop, int fd,
426 void (*read_cb)(void *), void *read_cb_arg)
429 return eloop_event_add_rw(eloop, fd, read_cb, read_cb_arg, NULL, NULL);
433 eloop_event_add_w(struct eloop *eloop, int fd,
434 void (*write_cb)(void *), void *write_cb_arg)
437 return eloop_event_add_rw(eloop, fd, NULL,NULL, write_cb, write_cb_arg);
441 eloop_event_delete_write(struct eloop *eloop, int fd, int write_only)
443 struct eloop_event *e;
444 #if defined(HAVE_KQUEUE)
446 #elif defined(HAVE_EPOLL)
447 struct epoll_event epe;
450 assert(eloop != NULL);
452 if (fd > eloop->events_maxfd ||
453 (e = eloop->event_fds[fd]) == NULL)
460 if (e->write_cb == NULL)
462 if (e->read_cb == NULL)
465 e->write_cb_arg = NULL;
466 #if defined(HAVE_KQUEUE)
467 EV_SET(&ke[0], (uintptr_t)e->fd,
468 EVFILT_WRITE, EV_DELETE, 0, 0, UPTR(NULL));
469 kevent(eloop->poll_fd, ke, 1, NULL, 0, NULL);
470 #elif defined(HAVE_EPOLL)
471 memset(&epe, 0, sizeof(epe));
474 epe.events = EPOLLIN;
475 epoll_ctl(eloop->poll_fd, EPOLL_CTL_MOD, fd, &epe);
477 eloop_event_setup_fds(eloop);
482 TAILQ_REMOVE(&eloop->events, e, next);
483 eloop->event_fds[e->fd] = NULL;
484 TAILQ_INSERT_TAIL(&eloop->free_events, e, next);
487 #if defined(HAVE_KQUEUE)
488 EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ,
489 EV_DELETE, 0, 0, UPTR(NULL));
491 EV_SET(&ke[1], (uintptr_t)fd,
492 EVFILT_WRITE, EV_DELETE, 0, 0, UPTR(NULL));
493 kevent(eloop->poll_fd, ke, e->write_cb ? 2 : 1, NULL, 0, NULL);
494 #elif defined(HAVE_EPOLL)
495 /* NULL event is safe because we
496 * rely on epoll_pwait which as added
497 * after the delete without event was fixed. */
498 epoll_ctl(eloop->poll_fd, EPOLL_CTL_DEL, fd, NULL);
501 eloop_event_setup_fds(eloop);
506 eloop_q_timeout_add_tv(struct eloop *eloop, int queue,
507 const struct timespec *when, void (*callback)(void *), void *arg)
509 struct timespec now, w;
510 struct eloop_timeout *t, *tt = NULL;
512 assert(eloop != NULL);
513 assert(when != NULL);
514 assert(callback != NULL);
516 clock_gettime(CLOCK_MONOTONIC, &now);
517 timespecadd(&now, when, &w);
518 /* Check for time_t overflow. */
519 if (timespeccmp(&w, &now, <)) {
524 /* Remove existing timeout if present. */
525 TAILQ_FOREACH(t, &eloop->timeouts, next) {
526 if (t->callback == callback && t->arg == arg) {
527 TAILQ_REMOVE(&eloop->timeouts, t, next);
533 /* No existing, so allocate or grab one from the free pool. */
534 if ((t = TAILQ_FIRST(&eloop->free_timeouts))) {
535 TAILQ_REMOVE(&eloop->free_timeouts, t, next);
537 if ((t = malloc(sizeof(*t))) == NULL)
543 t->callback = callback;
547 /* The timeout list should be in chronological order,
549 TAILQ_FOREACH(tt, &eloop->timeouts, next) {
550 if (timespeccmp(&t->when, &tt->when, <)) {
551 TAILQ_INSERT_BEFORE(tt, t, next);
555 TAILQ_INSERT_TAIL(&eloop->timeouts, t, next);
560 eloop_q_timeout_add_sec(struct eloop *eloop, int queue, time_t when,
561 void (*callback)(void *), void *arg)
567 return eloop_q_timeout_add_tv(eloop, queue, &tv, callback, arg);
571 eloop_q_timeout_add_msec(struct eloop *eloop, int queue, long when,
572 void (*callback)(void *), void *arg)
576 tv.tv_sec = when / MSEC_PER_SEC;
577 tv.tv_nsec = (when % MSEC_PER_SEC) * NSEC_PER_MSEC;
578 return eloop_q_timeout_add_tv(eloop, queue, &tv, callback, arg);
581 #if !defined(HAVE_KQUEUE)
583 eloop_timeout_add_now(struct eloop *eloop,
584 void (*callback)(void *), void *arg)
587 assert(eloop->timeout0 == NULL);
588 eloop->timeout0 = callback;
589 eloop->timeout0_arg = arg;
595 eloop_q_timeout_delete(struct eloop *eloop, int queue,
596 void (*callback)(void *), void *arg)
598 struct eloop_timeout *t, *tt;
601 assert(eloop != NULL);
604 TAILQ_FOREACH_SAFE(t, &eloop->timeouts, next, tt) {
605 if ((queue == 0 || t->queue == queue) &&
607 (!callback || t->callback == callback))
609 TAILQ_REMOVE(&eloop->timeouts, t, next);
610 TAILQ_INSERT_TAIL(&eloop->free_timeouts, t, next);
618 eloop_exit(struct eloop *eloop, int code)
621 assert(eloop != NULL);
623 eloop->exitcode = code;
627 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
629 eloop_open(struct eloop *eloop)
632 #if defined(HAVE_KQUEUE1)
633 return (eloop->poll_fd = kqueue1(O_CLOEXEC));
634 #elif defined(HAVE_KQUEUE)
637 if ((eloop->poll_fd = kqueue()) == -1)
639 if ((i = fcntl(eloop->poll_fd, F_GETFD, 0)) == -1 ||
640 fcntl(eloop->poll_fd, F_SETFD, i | FD_CLOEXEC) == -1)
642 close(eloop->poll_fd);
646 return eloop->poll_fd;
647 #elif defined (HAVE_EPOLL)
648 return (eloop->poll_fd = epoll_create1(EPOLL_CLOEXEC));
650 return (eloop->poll_fd = -1);
656 eloop_requeue(struct eloop *eloop)
658 #if defined(HAVE_POLL)
662 #else /* !HAVE_POLL */
663 struct eloop_event *e;
665 #if defined(HAVE_KQUEUE)
668 #elif defined(HAVE_EPOLL)
669 struct epoll_event epe;
672 assert(eloop != NULL);
674 if (eloop->poll_fd != -1)
675 close(eloop->poll_fd);
676 if (eloop_open(eloop) == -1)
678 #if defined (HAVE_KQUEUE)
679 i = eloop->signals_len;
680 TAILQ_FOREACH(e, &eloop->events, next) {
686 if ((ke = malloc(sizeof(*ke) * i)) == NULL)
689 for (i = 0; i < eloop->signals_len; i++)
690 EV_SET(&ke[i], (uintptr_t)eloop->signals[i],
691 EVFILT_SIGNAL, EV_ADD, 0, 0, UPTR(NULL));
693 TAILQ_FOREACH(e, &eloop->events, next) {
694 EV_SET(&ke[i], (uintptr_t)e->fd, EVFILT_READ,
695 EV_ADD, 0, 0, UPTR(e));
698 EV_SET(&ke[i], (uintptr_t)e->fd, EVFILT_WRITE,
699 EV_ADD, 0, 0, UPTR(e));
704 error = kevent(eloop->poll_fd, ke, LENC(i), NULL, 0, NULL);
707 #elif defined(HAVE_EPOLL)
710 TAILQ_FOREACH(e, &eloop->events, next) {
711 memset(&epe, 0, sizeof(epe));
713 epe.events = EPOLLIN;
715 epe.events |= EPOLLOUT;
717 if (epoll_ctl(eloop->poll_fd, EPOLL_CTL_ADD, e->fd, &epe) == -1)
723 #endif /* HAVE_POLL */
727 eloop_signal_set_cb(struct eloop *eloop,
728 const int *signals, size_t signals_len,
729 void (*signal_cb)(int, void *), void *signal_cb_ctx)
732 assert(eloop != NULL);
734 eloop->signals = signals;
735 eloop->signals_len = signals_len;
736 eloop->signal_cb = signal_cb;
737 eloop->signal_cb_ctx = signal_cb_ctx;
738 return eloop_requeue(eloop);
742 struct eloop_siginfo {
746 static struct eloop_siginfo _eloop_siginfo;
747 static struct eloop *_eloop;
750 eloop_signal1(void *arg)
752 struct eloop_siginfo *si = arg;
754 si->eloop->signal_cb(si->sig, si->eloop->signal_cb_ctx);
758 eloop_signal3(int sig, __unused siginfo_t *siginfo, __unused void *arg)
761 /* So that we can operate safely under a signal we instruct
762 * eloop to pass a copy of the siginfo structure to handle_signal1
763 * as the very first thing to do. */
764 _eloop_siginfo.eloop = _eloop;
765 _eloop_siginfo.sig = sig;
766 eloop_timeout_add_now(_eloop_siginfo.eloop,
767 eloop_signal1, &_eloop_siginfo);
772 eloop_signal_mask(struct eloop *eloop, sigset_t *oldset)
780 assert(eloop != NULL);
782 sigemptyset(&newset);
783 for (i = 0; i < eloop->signals_len; i++)
784 sigaddset(&newset, eloop->signals[i]);
785 if (sigprocmask(SIG_SETMASK, &newset, oldset) == -1)
791 memset(&sa, 0, sizeof(sa));
792 sa.sa_sigaction = eloop_signal3;
793 sa.sa_flags = SA_SIGINFO;
794 sigemptyset(&sa.sa_mask);
796 for (i = 0; i < eloop->signals_len; i++) {
797 if (sigaction(eloop->signals[i], &sa, NULL) == -1)
810 /* Check we have a working monotonic clock. */
811 if (clock_gettime(CLOCK_MONOTONIC, &now) == -1)
814 eloop = calloc(1, sizeof(*eloop));
816 TAILQ_INIT(&eloop->events);
817 eloop->events_maxfd = -1;
818 TAILQ_INIT(&eloop->free_events);
819 TAILQ_INIT(&eloop->timeouts);
820 TAILQ_INIT(&eloop->free_timeouts);
821 eloop->exitcode = EXIT_FAILURE;
822 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
823 if (eloop_open(eloop) == -1) {
833 void eloop_free(struct eloop *eloop)
835 struct eloop_event *e;
836 struct eloop_timeout *t;
841 free(eloop->event_fds);
842 while ((e = TAILQ_FIRST(&eloop->events))) {
843 TAILQ_REMOVE(&eloop->events, e, next);
846 while ((e = TAILQ_FIRST(&eloop->free_events))) {
847 TAILQ_REMOVE(&eloop->free_events, e, next);
850 while ((t = TAILQ_FIRST(&eloop->timeouts))) {
851 TAILQ_REMOVE(&eloop->timeouts, t, next);
854 while ((t = TAILQ_FIRST(&eloop->free_timeouts))) {
855 TAILQ_REMOVE(&eloop->free_timeouts, t, next);
858 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
859 close(eloop->poll_fd);
860 #elif defined(HAVE_POLL)
867 eloop_start(struct eloop *eloop, sigset_t *signals)
870 struct eloop_event *e;
871 struct eloop_timeout *t;
872 struct timespec now, ts, *tsp;
874 #if defined(HAVE_KQUEUE)
877 #elif defined(HAVE_EPOLL)
878 struct epoll_event epe;
884 assert(eloop != NULL);
891 /* Run all timeouts first. */
892 if (eloop->timeout0) {
893 t0 = eloop->timeout0;
894 eloop->timeout0 = NULL;
895 t0(eloop->timeout0_arg);
898 if ((t = TAILQ_FIRST(&eloop->timeouts))) {
899 clock_gettime(CLOCK_MONOTONIC, &now);
900 if (timespeccmp(&now, &t->when, >)) {
901 TAILQ_REMOVE(&eloop->timeouts, t, next);
903 TAILQ_INSERT_TAIL(&eloop->free_timeouts, t, next);
906 timespecsub(&t->when, &now, &ts);
909 /* No timeouts, so wait forever. */
912 if (tsp == NULL && eloop->events_len == 0)
918 else if (tsp->tv_sec > INT_MAX / 1000 ||
919 (tsp->tv_sec == INT_MAX / 1000 &&
920 (tsp->tv_nsec + 999999) / 1000000 > INT_MAX % 1000000))
923 timeout = (int)(tsp->tv_sec * 1000 +
924 (tsp->tv_nsec + 999999) / 1000000);
927 #if defined(HAVE_KQUEUE)
928 n = kevent(eloop->poll_fd, NULL, 0, &ke, 1, tsp);
929 #elif defined(HAVE_EPOLL)
931 n = epoll_pwait(eloop->poll_fd, &epe, 1,
934 n = epoll_wait(eloop->poll_fd, &epe, 1, timeout);
935 #elif defined(HAVE_POLL)
937 n = POLLTS(eloop->fds, (nfds_t)eloop->events_len,
940 n = poll(eloop->fds, (nfds_t)eloop->events_len,
949 /* Process any triggered events.
950 * We go back to the start after calling each callback incase
951 * the current event or next event is removed. */
952 #if defined(HAVE_KQUEUE)
954 if (ke.filter == EVFILT_SIGNAL) {
955 eloop->signal_cb((int)ke.ident,
956 eloop->signal_cb_ctx);
959 e = (struct eloop_event *)ke.udata;
960 if (ke.filter == EVFILT_WRITE) {
961 e->write_cb(e->write_cb_arg);
963 } else if (ke.filter == EVFILT_READ) {
964 e->read_cb(e->read_cb_arg);
968 #elif defined(HAVE_EPOLL)
970 e = (struct eloop_event *)epe.data.ptr;
971 if (epe.events & EPOLLOUT && e->write_cb != NULL) {
972 e->write_cb(e->write_cb_arg);
976 (EPOLLIN | EPOLLERR | EPOLLHUP) &&
979 e->read_cb(e->read_cb_arg);
983 #elif defined(HAVE_POLL)
987 for (i = 0; i < eloop->events_len; i++) {
988 if (eloop->fds[i].revents & POLLOUT) {
989 e = eloop->event_fds[eloop->fds[i].fd];
990 if (e->write_cb != NULL) {
991 e->write_cb(e->write_cb_arg);
995 if (eloop->fds[i].revents) {
996 e = eloop->event_fds[eloop->fds[i].fd];
997 if (e->read_cb != NULL) {
998 e->read_cb(e->read_cb_arg);
1007 return eloop->exitcode;