1 /* SPDX-License-Identifier: BSD-2-Clause */
3 * eloop - portable event based main loop.
4 * Copyright (c) 2006-2020 Roy Marples <roy@marples.name>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #if (defined(__unix__) || defined(unix)) && !defined(USG)
30 #include <sys/param.h>
43 /* config.h should define HAVE_KQUEUE, HAVE_EPOLL, etc. */
44 #if defined(HAVE_CONFIG_H) && !defined(NO_CONFIG_H)
48 /* Attempt to autodetect kqueue or epoll.
49 * Failing that, fall back to pselect. */
50 #if !defined(HAVE_KQUEUE) && !defined(HAVE_EPOLL) && !defined(HAVE_PSELECT) && \
51 !defined(HAVE_POLLTS) && !defined(HAVE_PPOLL)
53 /* Assume BSD has a working sys/queue.h and kqueue(2) interface. */
54 #define HAVE_SYS_QUEUE_H
57 #elif defined(__linux__) || defined(__sun)
58 /* Assume Linux and Solaris have a working epoll(3) interface. */
62 /* pselect(2) is a POSIX standard. */
68 /* pollts and ppoll require poll.
69 * pselect is wrapped in a pollts/ppoll style interface
70 * and as such require poll as well. */
71 #if defined(HAVE_PSELECT) || defined(HAVE_POLLTS) || defined(HAVE_PPOLL)
75 #if defined(HAVE_POLLTS)
77 #elif defined(HAVE_PPOLL)
80 #define POLLTS eloop_pollts
81 #define ELOOP_NEED_POLLTS
88 #define UNUSED(a) (void)((a))
92 #define __unused __attribute__((__unused__))
98 #if defined(HAVE_KQUEUE)
99 #include <sys/event.h>
102 /* udata is void * except on NetBSD.
103 * lengths are int except on NetBSD. */
104 #define UPTR(x) ((intptr_t)(x))
108 #define LENC(x) ((int)(x))
110 #elif defined(HAVE_EPOLL)
111 #include <sys/epoll.h>
112 #elif defined(HAVE_POLL)
113 #if defined(HAVE_PSELECT)
114 #include <sys/select.h>
120 #if defined(HAVE_KQUEUE)
121 #pragma message("Compiling eloop with kqueue(2) support.")
122 #elif defined(HAVE_EPOLL)
123 #pragma message("Compiling eloop with epoll(7) support.")
124 #elif defined(HAVE_PSELECT)
125 #pragma message("Compiling eloop with pselect(2) support.")
126 #elif defined(HAVE_PPOLL)
127 #pragma message("Compiling eloop with ppoll(2) support.")
128 #elif defined(HAVE_POLLTS)
129 #pragma message("Compiling eloop with pollts(2) support.")
131 #error Unknown select mechanism for eloop
135 /* Our structures require TAILQ macros, which really every libc should
136 * ship as they are useful beyond belief.
137 * Sadly some libc's don't have sys/queue.h and some that do don't have
138 * the TAILQ_FOREACH macro. For those that don't, the application using
139 * this implementation will need to ship a working queue.h somewhere.
140 * If we don't have sys/queue.h found in config.h, then
141 * allow QUEUE_H to override loading queue.h in the current directory. */
142 #ifndef TAILQ_FOREACH
143 #ifdef HAVE_SYS_QUEUE_H
144 #include <sys/queue.h>
145 #elif defined(QUEUE_H)
146 #define __QUEUE_HEADER(x) #x
147 #define _QUEUE_HEADER(x) __QUEUE_HEADER(x)
148 #include _QUEUE_HEADER(QUEUE_H)
155 * time_t is a signed integer of an unspecified size.
156 * To adjust for time_t wrapping, we need to work the maximum signed
157 * value and use that as a maximum.
160 #define TIME_MAX ((1ULL << (sizeof(time_t) * NBBY - 1)) - 1)
162 /* The unsigned maximum is then simple - multiply by two and add one. */
164 #define UTIME_MAX (TIME_MAX * 2) + 1
168 TAILQ_ENTRY(eloop_event) next;
170 void (*read_cb)(void *);
172 void (*write_cb)(void *);
176 struct eloop_timeout {
177 TAILQ_ENTRY(eloop_timeout) next;
178 unsigned int seconds;
179 unsigned int nseconds;
180 void (*callback)(void *);
187 TAILQ_HEAD (event_head, eloop_event) events;
188 struct event_head free_events;
190 struct eloop_event **event_fds;
193 TAILQ_HEAD (timeout_head, eloop_timeout) timeouts;
194 struct timeout_head free_timeouts;
196 void (*timeout0)(void *);
200 void (*signal_cb)(int, void *);
203 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
205 #elif defined(HAVE_POLL)
214 #ifdef HAVE_REALLOCARRAY
215 #define eloop_realloca reallocarray
217 /* Handy routing to check for potential overflow.
218 * reallocarray(3) and reallocarr(3) are not portable. */
219 #define SQRT_SIZE_MAX (((size_t)1) << (sizeof(size_t) * CHAR_BIT / 2))
221 eloop_realloca(void *ptr, size_t n, size_t size)
224 if ((n | size) >= SQRT_SIZE_MAX && n > SIZE_MAX / size) {
228 return realloc(ptr, n * size);
234 eloop_event_setup_fds(struct eloop *eloop)
236 struct eloop_event *e;
240 TAILQ_FOREACH(e, &eloop->events, next) {
241 eloop->fds[i].fd = e->fd;
242 eloop->fds[i].events = 0;
244 eloop->fds[i].events |= POLLIN;
246 eloop->fds[i].events |= POLLOUT;
247 eloop->fds[i].revents = 0;
252 #ifdef ELOOP_NEED_POLLTS
253 /* Wrapper around pselect, to imitate the NetBSD pollts call. */
255 eloop_pollts(struct pollfd * fds, nfds_t nfds,
256 const struct timespec *ts, const sigset_t *sigmask)
258 fd_set read_fds, write_fds;
265 for (n = 0; n < nfds; n++) {
266 if (fds[n].events & POLLIN) {
267 FD_SET(fds[n].fd, &read_fds);
268 if (fds[n].fd > maxfd)
271 if (fds[n].events & POLLOUT) {
272 FD_SET(fds[n].fd, &write_fds);
273 if (fds[n].fd > maxfd)
278 r = pselect(maxfd + 1, &read_fds, &write_fds, NULL, ts, sigmask);
280 for (n = 0; n < nfds; n++) {
282 FD_ISSET(fds[n].fd, &read_fds) ? POLLIN : 0;
283 if (FD_ISSET(fds[n].fd, &write_fds))
284 fds[n].revents |= POLLOUT;
291 #else /* !HAVE_POLL */
292 #define eloop_event_setup_fds(a) {}
293 #endif /* HAVE_POLL */
296 eloop_timespec_diff(const struct timespec *tsp, const struct timespec *usp,
299 unsigned long long tsecs, usecs, secs;
302 if (tsp->tv_sec < 0) /* time wreapped */
303 tsecs = UTIME_MAX - (unsigned long long)(-tsp->tv_sec);
305 tsecs = (unsigned long long)tsp->tv_sec;
306 if (usp->tv_sec < 0) /* time wrapped */
307 usecs = UTIME_MAX - (unsigned long long)(-usp->tv_sec);
309 usecs = (unsigned long long)usp->tv_sec;
311 if (usecs > tsecs) /* time wrapped */
312 secs = (UTIME_MAX - usecs) + tsecs;
314 secs = tsecs - usecs;
316 nsecs = tsp->tv_nsec - usp->tv_nsec;
322 nsecs += NSEC_PER_SEC;
326 *nsp = (unsigned int)nsecs;
331 eloop_reduce_timers(struct eloop *eloop)
334 unsigned long long secs;
336 struct eloop_timeout *t;
338 clock_gettime(CLOCK_MONOTONIC, &now);
339 secs = eloop_timespec_diff(&now, &eloop->now, &nsecs);
341 TAILQ_FOREACH(t, &eloop->timeouts, next) {
342 if (secs > t->seconds) {
346 t->seconds -= (unsigned int)secs;
347 if (nsecs > t->nseconds) {
352 t->nseconds = NSEC_PER_SEC
353 - (nsecs - t->nseconds);
356 t->nseconds -= nsecs;
364 eloop_event_add_rw(struct eloop *eloop, int fd,
365 void (*read_cb)(void *), void *read_cb_arg,
366 void (*write_cb)(void *), void *write_cb_arg)
368 struct eloop_event *e;
369 #if defined(HAVE_KQUEUE)
371 #elif defined(HAVE_EPOLL)
372 struct epoll_event epe;
373 #elif defined(HAVE_POLL)
377 assert(eloop != NULL);
378 assert(read_cb != NULL || write_cb != NULL);
385 memset(&epe, 0, sizeof(epe));
387 epe.events = EPOLLIN;
389 epe.events |= EPOLLOUT;
392 /* We should only have one callback monitoring the fd. */
393 if (fd <= eloop->events_maxfd) {
394 if ((e = eloop->event_fds[fd]) != NULL) {
397 #if defined(HAVE_KQUEUE)
398 EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ, EV_ADD,
401 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
402 EV_ADD, 0, 0, UPTR(e));
403 else if (e->write_cb)
404 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
405 EV_DELETE, 0, 0, UPTR(e));
406 error = kevent(eloop->poll_fd, ke,
407 e->write_cb || write_cb ? 2 : 1, NULL, 0, NULL);
408 #elif defined(HAVE_EPOLL)
410 error = epoll_ctl(eloop->poll_fd, EPOLL_CTL_MOD,
416 e->read_cb = read_cb;
417 e->read_cb_arg = read_cb_arg;
420 e->write_cb = write_cb;
421 e->write_cb_arg = write_cb_arg;
423 eloop_event_setup_fds(eloop);
427 struct eloop_event **new_fds;
430 /* Reserve ourself and 4 more. */
432 new_fds = eloop_realloca(eloop->event_fds,
433 ((size_t)maxfd + 1), sizeof(*eloop->event_fds));
437 /* set new entries NULL as the fd's may not be contiguous. */
438 for (i = maxfd; i > eloop->events_maxfd; i--)
441 eloop->event_fds = new_fds;
442 eloop->events_maxfd = maxfd;
445 /* Allocate a new event if no free ones already allocated. */
446 if ((e = TAILQ_FIRST(&eloop->free_events))) {
447 TAILQ_REMOVE(&eloop->free_events, e, next);
449 e = malloc(sizeof(*e));
454 /* Ensure we can actually listen to it. */
457 if (eloop->events_len > eloop->fds_len) {
458 nfds = eloop_realloca(eloop->fds,
459 (eloop->fds_len + 5), sizeof(*eloop->fds));
467 /* Now populate the structure and add it to the list. */
469 e->read_cb = read_cb;
470 e->read_cb_arg = read_cb_arg;
471 e->write_cb = write_cb;
472 e->write_cb_arg = write_cb_arg;
474 #if defined(HAVE_KQUEUE)
476 EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ,
477 EV_ADD, 0, 0, UPTR(e));
478 if (write_cb != NULL)
479 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
480 EV_ADD, 0, 0, UPTR(e));
481 if (kevent(eloop->poll_fd, ke, write_cb ? 2 : 1, NULL, 0, NULL) == -1)
483 #elif defined(HAVE_EPOLL)
485 if (epoll_ctl(eloop->poll_fd, EPOLL_CTL_ADD, fd, &epe) == -1)
489 TAILQ_INSERT_HEAD(&eloop->events, e, next);
490 eloop->event_fds[e->fd] = e;
491 eloop_event_setup_fds(eloop);
497 TAILQ_INSERT_TAIL(&eloop->free_events, e, next);
503 eloop_event_add(struct eloop *eloop, int fd,
504 void (*read_cb)(void *), void *read_cb_arg)
507 return eloop_event_add_rw(eloop, fd, read_cb, read_cb_arg, NULL, NULL);
511 eloop_event_add_w(struct eloop *eloop, int fd,
512 void (*write_cb)(void *), void *write_cb_arg)
515 return eloop_event_add_rw(eloop, fd, NULL,NULL, write_cb, write_cb_arg);
519 eloop_event_delete_write(struct eloop *eloop, int fd, int write_only)
521 struct eloop_event *e;
522 #if defined(HAVE_KQUEUE)
524 #elif defined(HAVE_EPOLL)
525 struct epoll_event epe;
528 assert(eloop != NULL);
530 if (fd > eloop->events_maxfd ||
531 (e = eloop->event_fds[fd]) == NULL)
538 if (e->write_cb == NULL)
540 if (e->read_cb == NULL)
543 e->write_cb_arg = NULL;
544 #if defined(HAVE_KQUEUE)
545 EV_SET(&ke[0], (uintptr_t)e->fd,
546 EVFILT_WRITE, EV_DELETE, 0, 0, UPTR(NULL));
547 kevent(eloop->poll_fd, ke, 1, NULL, 0, NULL);
548 #elif defined(HAVE_EPOLL)
549 memset(&epe, 0, sizeof(epe));
552 epe.events = EPOLLIN;
553 epoll_ctl(eloop->poll_fd, EPOLL_CTL_MOD, fd, &epe);
555 eloop_event_setup_fds(eloop);
560 TAILQ_REMOVE(&eloop->events, e, next);
561 eloop->event_fds[e->fd] = NULL;
562 TAILQ_INSERT_TAIL(&eloop->free_events, e, next);
565 #if defined(HAVE_KQUEUE)
566 EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ,
567 EV_DELETE, 0, 0, UPTR(NULL));
569 EV_SET(&ke[1], (uintptr_t)fd,
570 EVFILT_WRITE, EV_DELETE, 0, 0, UPTR(NULL));
571 kevent(eloop->poll_fd, ke, e->write_cb ? 2 : 1, NULL, 0, NULL);
572 #elif defined(HAVE_EPOLL)
573 /* NULL event is safe because we
574 * rely on epoll_pwait which as added
575 * after the delete without event was fixed. */
576 epoll_ctl(eloop->poll_fd, EPOLL_CTL_DEL, fd, NULL);
579 eloop_event_setup_fds(eloop);
584 * This implementation should cope with UINT_MAX seconds on a system
585 * where time_t is INT32_MAX. It should also cope with the monotonic timer
586 * wrapping, although this is highly unlikely.
587 * unsigned int should match or be greater than any on wire specified timeout.
590 eloop_q_timeout_add(struct eloop *eloop, int queue,
591 unsigned int seconds, unsigned int nseconds,
592 void (*callback)(void *), void *arg)
594 struct eloop_timeout *t, *tt = NULL;
596 assert(eloop != NULL);
597 assert(callback != NULL);
598 assert(nseconds <= NSEC_PER_SEC);
600 /* Remove existing timeout if present. */
601 TAILQ_FOREACH(t, &eloop->timeouts, next) {
602 if (t->callback == callback && t->arg == arg) {
603 TAILQ_REMOVE(&eloop->timeouts, t, next);
609 /* No existing, so allocate or grab one from the free pool. */
610 if ((t = TAILQ_FIRST(&eloop->free_timeouts))) {
611 TAILQ_REMOVE(&eloop->free_timeouts, t, next);
613 if ((t = malloc(sizeof(*t))) == NULL)
618 eloop_reduce_timers(eloop);
620 t->seconds = seconds;
621 t->nseconds = nseconds;
622 t->callback = callback;
626 /* The timeout list should be in chronological order,
628 TAILQ_FOREACH(tt, &eloop->timeouts, next) {
629 if (t->seconds < tt->seconds ||
630 (t->seconds == tt->seconds && t->nseconds < tt->nseconds))
632 TAILQ_INSERT_BEFORE(tt, t, next);
636 TAILQ_INSERT_TAIL(&eloop->timeouts, t, next);
641 eloop_q_timeout_add_tv(struct eloop *eloop, int queue,
642 const struct timespec *when, void (*callback)(void *), void *arg)
645 if (when->tv_sec < 0 || (unsigned long)when->tv_sec > UINT_MAX) {
649 if (when->tv_nsec < 0 || when->tv_nsec > NSEC_PER_SEC) {
654 return eloop_q_timeout_add(eloop, queue,
655 (unsigned int)when->tv_sec, (unsigned int)when->tv_sec,
660 eloop_q_timeout_add_sec(struct eloop *eloop, int queue, unsigned int seconds,
661 void (*callback)(void *), void *arg)
664 return eloop_q_timeout_add(eloop, queue, seconds, 0, callback, arg);
668 eloop_q_timeout_add_msec(struct eloop *eloop, int queue, unsigned long when,
669 void (*callback)(void *), void *arg)
671 unsigned long seconds, nseconds;
673 seconds = when / MSEC_PER_SEC;
674 if (seconds > UINT_MAX) {
679 nseconds = (when % MSEC_PER_SEC) * NSEC_PER_MSEC;
680 return eloop_q_timeout_add(eloop, queue,
681 (unsigned int)seconds, (unsigned int)nseconds, callback, arg);
684 #if !defined(HAVE_KQUEUE)
686 eloop_timeout_add_now(struct eloop *eloop,
687 void (*callback)(void *), void *arg)
690 assert(eloop->timeout0 == NULL);
691 eloop->timeout0 = callback;
692 eloop->timeout0_arg = arg;
698 eloop_q_timeout_delete(struct eloop *eloop, int queue,
699 void (*callback)(void *), void *arg)
701 struct eloop_timeout *t, *tt;
704 assert(eloop != NULL);
707 TAILQ_FOREACH_SAFE(t, &eloop->timeouts, next, tt) {
708 if ((queue == 0 || t->queue == queue) &&
710 (!callback || t->callback == callback))
712 TAILQ_REMOVE(&eloop->timeouts, t, next);
713 TAILQ_INSERT_TAIL(&eloop->free_timeouts, t, next);
721 eloop_exit(struct eloop *eloop, int code)
724 assert(eloop != NULL);
726 eloop->exitcode = code;
730 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
732 eloop_open(struct eloop *eloop)
735 #if defined(HAVE_KQUEUE1)
736 return (eloop->poll_fd = kqueue1(O_CLOEXEC));
737 #elif defined(HAVE_KQUEUE)
740 if ((eloop->poll_fd = kqueue()) == -1)
742 if ((i = fcntl(eloop->poll_fd, F_GETFD, 0)) == -1 ||
743 fcntl(eloop->poll_fd, F_SETFD, i | FD_CLOEXEC) == -1)
745 close(eloop->poll_fd);
749 return eloop->poll_fd;
750 #elif defined (HAVE_EPOLL)
751 return (eloop->poll_fd = epoll_create1(EPOLL_CLOEXEC));
753 return (eloop->poll_fd = -1);
759 eloop_requeue(struct eloop *eloop)
761 #if defined(HAVE_POLL)
765 #else /* !HAVE_POLL */
766 struct eloop_event *e;
768 #if defined(HAVE_KQUEUE)
771 #elif defined(HAVE_EPOLL)
772 struct epoll_event epe;
775 assert(eloop != NULL);
777 if (eloop->poll_fd != -1)
778 close(eloop->poll_fd);
779 if (eloop_open(eloop) == -1)
781 #if defined (HAVE_KQUEUE)
782 i = eloop->signals_len;
783 TAILQ_FOREACH(e, &eloop->events, next) {
789 if ((ke = malloc(sizeof(*ke) * i)) == NULL)
792 for (i = 0; i < eloop->signals_len; i++)
793 EV_SET(&ke[i], (uintptr_t)eloop->signals[i],
794 EVFILT_SIGNAL, EV_ADD, 0, 0, UPTR(NULL));
796 TAILQ_FOREACH(e, &eloop->events, next) {
797 EV_SET(&ke[i], (uintptr_t)e->fd, EVFILT_READ,
798 EV_ADD, 0, 0, UPTR(e));
801 EV_SET(&ke[i], (uintptr_t)e->fd, EVFILT_WRITE,
802 EV_ADD, 0, 0, UPTR(e));
807 error = kevent(eloop->poll_fd, ke, LENC(i), NULL, 0, NULL);
810 #elif defined(HAVE_EPOLL)
813 TAILQ_FOREACH(e, &eloop->events, next) {
814 memset(&epe, 0, sizeof(epe));
816 epe.events = EPOLLIN;
818 epe.events |= EPOLLOUT;
820 if (epoll_ctl(eloop->poll_fd, EPOLL_CTL_ADD, e->fd, &epe) == -1)
826 #endif /* HAVE_POLL */
830 eloop_signal_set_cb(struct eloop *eloop,
831 const int *signals, size_t signals_len,
832 void (*signal_cb)(int, void *), void *signal_cb_ctx)
835 assert(eloop != NULL);
837 eloop->signals = signals;
838 eloop->signals_len = signals_len;
839 eloop->signal_cb = signal_cb;
840 eloop->signal_cb_ctx = signal_cb_ctx;
841 return eloop_requeue(eloop);
845 struct eloop_siginfo {
849 static struct eloop_siginfo _eloop_siginfo;
850 static struct eloop *_eloop;
853 eloop_signal1(void *arg)
855 struct eloop_siginfo *si = arg;
857 si->eloop->signal_cb(si->sig, si->eloop->signal_cb_ctx);
861 eloop_signal3(int sig, __unused siginfo_t *siginfo, __unused void *arg)
864 /* So that we can operate safely under a signal we instruct
865 * eloop to pass a copy of the siginfo structure to handle_signal1
866 * as the very first thing to do. */
867 _eloop_siginfo.eloop = _eloop;
868 _eloop_siginfo.sig = sig;
869 eloop_timeout_add_now(_eloop_siginfo.eloop,
870 eloop_signal1, &_eloop_siginfo);
875 eloop_signal_mask(struct eloop *eloop, sigset_t *oldset)
883 assert(eloop != NULL);
885 sigemptyset(&newset);
886 for (i = 0; i < eloop->signals_len; i++)
887 sigaddset(&newset, eloop->signals[i]);
888 if (sigprocmask(SIG_SETMASK, &newset, oldset) == -1)
894 memset(&sa, 0, sizeof(sa));
895 sa.sa_sigaction = eloop_signal3;
896 sa.sa_flags = SA_SIGINFO;
897 sigemptyset(&sa.sa_mask);
899 for (i = 0; i < eloop->signals_len; i++) {
900 if (sigaction(eloop->signals[i], &sa, NULL) == -1)
912 eloop = calloc(1, sizeof(*eloop));
916 /* Check we have a working monotonic clock. */
917 if (clock_gettime(CLOCK_MONOTONIC, &eloop->now) == -1) {
922 TAILQ_INIT(&eloop->events);
923 eloop->events_maxfd = -1;
924 TAILQ_INIT(&eloop->free_events);
925 TAILQ_INIT(&eloop->timeouts);
926 TAILQ_INIT(&eloop->free_timeouts);
927 eloop->exitcode = EXIT_FAILURE;
929 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
930 if (eloop_open(eloop) == -1) {
940 eloop_clear(struct eloop *eloop)
942 struct eloop_event *e;
943 struct eloop_timeout *t;
948 free(eloop->event_fds);
949 eloop->event_fds = NULL;
950 eloop->events_len = 0;
951 eloop->events_maxfd = -1;
952 eloop->signals = NULL;
953 eloop->signals_len = 0;
955 while ((e = TAILQ_FIRST(&eloop->events))) {
956 TAILQ_REMOVE(&eloop->events, e, next);
959 while ((e = TAILQ_FIRST(&eloop->free_events))) {
960 TAILQ_REMOVE(&eloop->free_events, e, next);
963 while ((t = TAILQ_FIRST(&eloop->timeouts))) {
964 TAILQ_REMOVE(&eloop->timeouts, t, next);
967 while ((t = TAILQ_FIRST(&eloop->free_timeouts))) {
968 TAILQ_REMOVE(&eloop->free_timeouts, t, next);
972 #if defined(HAVE_POLL)
980 eloop_free(struct eloop *eloop)
983 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
985 close(eloop->poll_fd);
992 eloop_start(struct eloop *eloop, sigset_t *signals)
995 struct eloop_event *e;
996 struct eloop_timeout *t;
998 #if defined(HAVE_KQUEUE)
1001 #elif defined(HAVE_EPOLL)
1002 struct epoll_event epe;
1004 #if defined(HAVE_KQUEUE) || defined(HAVE_POLL)
1005 struct timespec ts, *tsp;
1011 assert(eloop != NULL);
1018 /* Run all timeouts first. */
1019 if (eloop->timeout0) {
1020 t0 = eloop->timeout0;
1021 eloop->timeout0 = NULL;
1022 t0(eloop->timeout0_arg);
1026 t = TAILQ_FIRST(&eloop->timeouts);
1027 if (t == NULL && eloop->events_len == 0)
1031 eloop_reduce_timers(eloop);
1033 if (t != NULL && t->seconds == 0 && t->nseconds == 0) {
1034 TAILQ_REMOVE(&eloop->timeouts, t, next);
1035 t->callback(t->arg);
1036 TAILQ_INSERT_TAIL(&eloop->free_timeouts, t, next);
1041 #if defined(HAVE_KQUEUE) || defined(HAVE_POLL)
1042 if (t->seconds > INT_MAX) {
1043 ts.tv_sec = (time_t)INT_MAX;
1046 ts.tv_sec = (time_t)t->seconds;
1047 ts.tv_nsec = (long)t->nseconds;
1053 if (t->seconds > INT_MAX / 1000 ||
1054 (t->seconds == INT_MAX / 1000 &&
1055 ((t->nseconds + 999999) / 1000000
1056 > INT_MAX % 1000000)))
1059 timeout = (int)(t->seconds * 1000 +
1060 (t->nseconds + 999999) / 1000000);
1063 #if defined(HAVE_KQUEUE) || defined(HAVE_POLL)
1071 #if defined(HAVE_KQUEUE)
1072 n = kevent(eloop->poll_fd, NULL, 0, &ke, 1, tsp);
1073 #elif defined(HAVE_EPOLL)
1075 n = epoll_pwait(eloop->poll_fd, &epe, 1,
1078 n = epoll_wait(eloop->poll_fd, &epe, 1, timeout);
1079 #elif defined(HAVE_POLL)
1081 n = POLLTS(eloop->fds, (nfds_t)eloop->events_len,
1084 n = poll(eloop->fds, (nfds_t)eloop->events_len,
1095 /* Process any triggered events.
1096 * We go back to the start after calling each callback incase
1097 * the current event or next event is removed. */
1098 #if defined(HAVE_KQUEUE)
1099 if (ke.filter == EVFILT_SIGNAL) {
1100 eloop->signal_cb((int)ke.ident,
1101 eloop->signal_cb_ctx);
1103 e = (struct eloop_event *)ke.udata;
1104 if (ke.filter == EVFILT_WRITE && e->write_cb != NULL)
1105 e->write_cb(e->write_cb_arg);
1106 else if (ke.filter == EVFILT_READ && e->read_cb != NULL)
1107 e->read_cb(e->read_cb_arg);
1109 #elif defined(HAVE_EPOLL)
1110 e = (struct eloop_event *)epe.data.ptr;
1111 if (epe.events & EPOLLOUT && e->write_cb != NULL)
1112 e->write_cb(e->write_cb_arg);
1113 else if (epe.events & (EPOLLIN | EPOLLERR | EPOLLHUP) &&
1115 e->read_cb(e->read_cb_arg);
1116 #elif defined(HAVE_POLL)
1119 for (i = 0; i < eloop->events_len; i++) {
1120 if (eloop->fds[i].revents & POLLOUT) {
1121 e = eloop->event_fds[eloop->fds[i].fd];
1122 if (e->write_cb != NULL) {
1123 e->write_cb(e->write_cb_arg);
1127 if (eloop->fds[i].revents) {
1128 e = eloop->event_fds[eloop->fds[i].fd];
1129 if (e->read_cb != NULL) {
1130 e->read_cb(e->read_cb_arg);
1138 return eloop->exitcode;