Update to dhcpcd-9.1.0 with the following changes:
[dragonfly.git] / contrib / dhcpcd / src / eloop.c
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * eloop - portable event based main loop.
4  * Copyright (c) 2006-2020 Roy Marples <roy@marples.name>
5  * All rights reserved.
6
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #if (defined(__unix__) || defined(unix)) && !defined(USG)
30 #include <sys/param.h>
31 #endif
32 #include <sys/time.h>
33
34 #include <assert.h>
35 #include <errno.h>
36 #include <limits.h>
37 #include <signal.h>
38 #include <stdint.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <unistd.h>
42
43 /* config.h should define HAVE_KQUEUE, HAVE_EPOLL, etc. */
44 #if defined(HAVE_CONFIG_H) && !defined(NO_CONFIG_H)
45 #include "config.h"
46 #endif
47
48 /* Attempt to autodetect kqueue or epoll.
49  * Failing that, fall back to pselect. */
50 #if !defined(HAVE_KQUEUE) && !defined(HAVE_EPOLL) && !defined(HAVE_PSELECT) && \
51     !defined(HAVE_POLLTS) && !defined(HAVE_PPOLL)
52 #if defined(BSD)
53 /* Assume BSD has a working sys/queue.h and kqueue(2) interface. */
54 #define HAVE_SYS_QUEUE_H
55 #define HAVE_KQUEUE
56 #define WARN_SELECT
57 #elif defined(__linux__) || defined(__sun)
58 /* Assume Linux and Solaris have a working epoll(3) interface. */
59 #define HAVE_EPOLL
60 #define WARN_SELECT
61 #else
62 /* pselect(2) is a POSIX standard. */
63 #define HAVE_PSELECT
64 #define WARN_SELECT
65 #endif
66 #endif
67
68 /* pollts and ppoll require poll.
69  * pselect is wrapped in a pollts/ppoll style interface
70  * and as such require poll as well. */
71 #if defined(HAVE_PSELECT) || defined(HAVE_POLLTS) || defined(HAVE_PPOLL)
72 #ifndef HAVE_POLL
73 #define HAVE_POLL
74 #endif
75 #if defined(HAVE_POLLTS)
76 #define POLLTS pollts
77 #elif defined(HAVE_PPOLL)
78 #define POLLTS ppoll
79 #else
80 #define POLLTS eloop_pollts
81 #define ELOOP_NEED_POLLTS
82 #endif
83 #endif
84
85 #include "eloop.h"
86
87 #ifndef UNUSED
88 #define UNUSED(a) (void)((a))
89 #endif
90 #ifndef __unused
91 #ifdef __GNUC__
92 #define __unused   __attribute__((__unused__))
93 #else
94 #define __unused
95 #endif
96 #endif
97
98 #if defined(HAVE_KQUEUE)
99 #include <sys/event.h>
100 #include <fcntl.h>
101 #ifdef __NetBSD__
102 /* udata is void * except on NetBSD.
103  * lengths are int except on NetBSD. */
104 #define UPTR(x) ((intptr_t)(x))
105 #define LENC(x) (x)
106 #else
107 #define UPTR(x) (x)
108 #define LENC(x) ((int)(x))
109 #endif
110 #elif defined(HAVE_EPOLL)
111 #include <sys/epoll.h>
112 #elif defined(HAVE_POLL)
113 #if defined(HAVE_PSELECT)
114 #include <sys/select.h>
115 #endif
116 #include <poll.h>
117 #endif
118
119 #ifdef WARN_SELECT
120 #if defined(HAVE_KQUEUE)
121 #pragma message("Compiling eloop with kqueue(2) support.")
122 #elif defined(HAVE_EPOLL)
123 #pragma message("Compiling eloop with epoll(7) support.")
124 #elif defined(HAVE_PSELECT)
125 #pragma message("Compiling eloop with pselect(2) support.")
126 #elif defined(HAVE_PPOLL)
127 #pragma message("Compiling eloop with ppoll(2) support.")
128 #elif defined(HAVE_POLLTS)
129 #pragma message("Compiling eloop with pollts(2) support.")
130 #else
131 #error Unknown select mechanism for eloop
132 #endif
133 #endif
134
135 /* Our structures require TAILQ macros, which really every libc should
136  * ship as they are useful beyond belief.
137  * Sadly some libc's don't have sys/queue.h and some that do don't have
138  * the TAILQ_FOREACH macro. For those that don't, the application using
139  * this implementation will need to ship a working queue.h somewhere.
140  * If we don't have sys/queue.h found in config.h, then
141  * allow QUEUE_H to override loading queue.h in the current directory. */
142 #ifndef TAILQ_FOREACH
143 #ifdef HAVE_SYS_QUEUE_H
144 #include <sys/queue.h>
145 #elif defined(QUEUE_H)
146 #define __QUEUE_HEADER(x) #x
147 #define _QUEUE_HEADER(x) __QUEUE_HEADER(x)
148 #include _QUEUE_HEADER(QUEUE_H)
149 #else
150 #include "queue.h"
151 #endif
152 #endif
153
154 /*
155  * time_t is a signed integer of an unspecified size.
156  * To adjust for time_t wrapping, we need to work the maximum signed
157  * value and use that as a maximum.
158  */
159 #ifndef TIME_MAX
160 #define TIME_MAX        ((1ULL << (sizeof(time_t) * NBBY - 1)) - 1)
161 #endif
162 /* The unsigned maximum is then simple - multiply by two and add one. */
163 #ifndef UTIME_MAX
164 #define UTIME_MAX       (TIME_MAX * 2) + 1
165 #endif
166
167 struct eloop_event {
168         TAILQ_ENTRY(eloop_event) next;
169         int fd;
170         void (*read_cb)(void *);
171         void *read_cb_arg;
172         void (*write_cb)(void *);
173         void *write_cb_arg;
174 };
175
176 struct eloop_timeout {
177         TAILQ_ENTRY(eloop_timeout) next;
178         unsigned int seconds;
179         unsigned int nseconds;
180         void (*callback)(void *);
181         void *arg;
182         int queue;
183 };
184
185 struct eloop {
186         size_t events_len;
187         TAILQ_HEAD (event_head, eloop_event) events;
188         struct event_head free_events;
189         int events_maxfd;
190         struct eloop_event **event_fds;
191
192         struct timespec now;
193         TAILQ_HEAD (timeout_head, eloop_timeout) timeouts;
194         struct timeout_head free_timeouts;
195
196         void (*timeout0)(void *);
197         void *timeout0_arg;
198         const int *signals;
199         size_t signals_len;
200         void (*signal_cb)(int, void *);
201         void *signal_cb_ctx;
202
203 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
204         int poll_fd;
205 #elif defined(HAVE_POLL)
206         struct pollfd *fds;
207         size_t fds_len;
208 #endif
209
210         int exitnow;
211         int exitcode;
212 };
213
214 #ifdef HAVE_REALLOCARRAY
215 #define eloop_realloca  reallocarray
216 #else
217 /* Handy routing to check for potential overflow.
218  * reallocarray(3) and reallocarr(3) are not portable. */
219 #define SQRT_SIZE_MAX (((size_t)1) << (sizeof(size_t) * CHAR_BIT / 2))
220 static void *
221 eloop_realloca(void *ptr, size_t n, size_t size)
222 {
223
224         if ((n | size) >= SQRT_SIZE_MAX && n > SIZE_MAX / size) {
225                 errno = EOVERFLOW;
226                 return NULL;
227         }
228         return realloc(ptr, n * size);
229 }
230 #endif
231
232 #ifdef HAVE_POLL
233 static void
234 eloop_event_setup_fds(struct eloop *eloop)
235 {
236         struct eloop_event *e;
237         size_t i;
238
239         i = 0;
240         TAILQ_FOREACH(e, &eloop->events, next) {
241                 eloop->fds[i].fd = e->fd;
242                 eloop->fds[i].events = 0;
243                 if (e->read_cb)
244                         eloop->fds[i].events |= POLLIN;
245                 if (e->write_cb)
246                         eloop->fds[i].events |= POLLOUT;
247                 eloop->fds[i].revents = 0;
248                 i++;
249         }
250 }
251
252 #ifdef ELOOP_NEED_POLLTS
253 /* Wrapper around pselect, to imitate the NetBSD pollts call. */
254 static int
255 eloop_pollts(struct pollfd * fds, nfds_t nfds,
256     const struct timespec *ts, const sigset_t *sigmask)
257 {
258         fd_set read_fds, write_fds;
259         nfds_t n;
260         int maxfd, r;
261
262         FD_ZERO(&read_fds);
263         FD_ZERO(&write_fds);
264         maxfd = 0;
265         for (n = 0; n < nfds; n++) {
266                 if (fds[n].events & POLLIN) {
267                         FD_SET(fds[n].fd, &read_fds);
268                         if (fds[n].fd > maxfd)
269                                 maxfd = fds[n].fd;
270                 }
271                 if (fds[n].events & POLLOUT) {
272                         FD_SET(fds[n].fd, &write_fds);
273                         if (fds[n].fd > maxfd)
274                                 maxfd = fds[n].fd;
275                 }
276         }
277
278         r = pselect(maxfd + 1, &read_fds, &write_fds, NULL, ts, sigmask);
279         if (r > 0) {
280                 for (n = 0; n < nfds; n++) {
281                         fds[n].revents =
282                             FD_ISSET(fds[n].fd, &read_fds) ? POLLIN : 0;
283                         if (FD_ISSET(fds[n].fd, &write_fds))
284                                 fds[n].revents |= POLLOUT;
285                 }
286         }
287
288         return r;
289 }
290 #endif /* pollts */
291 #else /* !HAVE_POLL */
292 #define eloop_event_setup_fds(a) {}
293 #endif /* HAVE_POLL */
294
295 unsigned long long
296 eloop_timespec_diff(const struct timespec *tsp, const struct timespec *usp,
297     unsigned int *nsp)
298 {
299         unsigned long long tsecs, usecs, secs;
300         long nsecs;
301
302         if (tsp->tv_sec < 0) /* time wreapped */
303                 tsecs = UTIME_MAX - (unsigned long long)(-tsp->tv_sec);
304         else
305                 tsecs = (unsigned long long)tsp->tv_sec;
306         if (usp->tv_sec < 0) /* time wrapped */
307                 usecs = UTIME_MAX - (unsigned long long)(-usp->tv_sec);
308         else
309                 usecs = (unsigned long long)usp->tv_sec;
310
311         if (usecs > tsecs) /* time wrapped */
312                 secs = (UTIME_MAX - usecs) + tsecs;
313         else
314                 secs = tsecs - usecs;
315
316         nsecs = tsp->tv_nsec - usp->tv_nsec;
317         if (nsecs < 0) {
318                 if (secs == 0)
319                         nsecs = 0;
320                 else {
321                         secs--;
322                         nsecs += NSEC_PER_SEC;
323                 }
324         }
325         if (nsp != NULL)
326                 *nsp = (unsigned int)nsecs;
327         return secs;
328 }
329
330 static void
331 eloop_reduce_timers(struct eloop *eloop)
332 {
333         struct timespec now;
334         unsigned long long secs;
335         unsigned int nsecs;
336         struct eloop_timeout *t;
337
338         clock_gettime(CLOCK_MONOTONIC, &now);
339         secs = eloop_timespec_diff(&now, &eloop->now, &nsecs);
340
341         TAILQ_FOREACH(t, &eloop->timeouts, next) {
342                 if (secs > t->seconds) {
343                         t->seconds = 0;
344                         t->nseconds = 0;
345                 } else {
346                         t->seconds -= (unsigned int)secs;
347                         if (nsecs > t->nseconds) {
348                                 if (t->seconds == 0)
349                                         t->nseconds = 0;
350                                 else {
351                                         t->seconds--;
352                                         t->nseconds = NSEC_PER_SEC
353                                             - (nsecs - t->nseconds);
354                                 }
355                         } else
356                                 t->nseconds -= nsecs;
357                 }
358         }
359
360         eloop->now = now;
361 }
362
363 int
364 eloop_event_add_rw(struct eloop *eloop, int fd,
365     void (*read_cb)(void *), void *read_cb_arg,
366     void (*write_cb)(void *), void *write_cb_arg)
367 {
368         struct eloop_event *e;
369 #if defined(HAVE_KQUEUE)
370         struct kevent ke[2];
371 #elif defined(HAVE_EPOLL)
372         struct epoll_event epe;
373 #elif defined(HAVE_POLL)
374         struct pollfd *nfds;
375 #endif
376
377         assert(eloop != NULL);
378         assert(read_cb != NULL || write_cb != NULL);
379         if (fd == -1) {
380                 errno = EINVAL;
381                 return -1;
382         }
383
384 #ifdef HAVE_EPOLL
385         memset(&epe, 0, sizeof(epe));
386         epe.data.fd = fd;
387         epe.events = EPOLLIN;
388         if (write_cb)
389                 epe.events |= EPOLLOUT;
390 #endif
391
392         /* We should only have one callback monitoring the fd. */
393         if (fd <= eloop->events_maxfd) {
394                 if ((e = eloop->event_fds[fd]) != NULL) {
395                         int error;
396
397 #if defined(HAVE_KQUEUE)
398                         EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ, EV_ADD,
399                             0, 0, UPTR(e));
400                         if (write_cb)
401                                 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
402                                     EV_ADD, 0, 0, UPTR(e));
403                         else if (e->write_cb)
404                                 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
405                                     EV_DELETE, 0, 0, UPTR(e));
406                         error = kevent(eloop->poll_fd, ke,
407                             e->write_cb || write_cb ? 2 : 1, NULL, 0, NULL);
408 #elif defined(HAVE_EPOLL)
409                         epe.data.ptr = e;
410                         error = epoll_ctl(eloop->poll_fd, EPOLL_CTL_MOD,
411                             fd, &epe);
412 #else
413                         error = 0;
414 #endif
415                         if (read_cb) {
416                                 e->read_cb = read_cb;
417                                 e->read_cb_arg = read_cb_arg;
418                         }
419                         if (write_cb) {
420                                 e->write_cb = write_cb;
421                                 e->write_cb_arg = write_cb_arg;
422                         }
423                         eloop_event_setup_fds(eloop);
424                         return error;
425                 }
426         } else {
427                 struct eloop_event **new_fds;
428                 int maxfd, i;
429
430                 /* Reserve ourself and 4 more. */
431                 maxfd = fd + 4;
432                 new_fds = eloop_realloca(eloop->event_fds,
433                     ((size_t)maxfd + 1), sizeof(*eloop->event_fds));
434                 if (new_fds == NULL)
435                         return -1;
436
437                 /* set new entries NULL as the fd's may not be contiguous. */
438                 for (i = maxfd; i > eloop->events_maxfd; i--)
439                         new_fds[i] = NULL;
440
441                 eloop->event_fds = new_fds;
442                 eloop->events_maxfd = maxfd;
443         }
444
445         /* Allocate a new event if no free ones already allocated. */
446         if ((e = TAILQ_FIRST(&eloop->free_events))) {
447                 TAILQ_REMOVE(&eloop->free_events, e, next);
448         } else {
449                 e = malloc(sizeof(*e));
450                 if (e == NULL)
451                         goto err;
452         }
453
454         /* Ensure we can actually listen to it. */
455         eloop->events_len++;
456 #ifdef HAVE_POLL
457         if (eloop->events_len > eloop->fds_len) {
458                 nfds = eloop_realloca(eloop->fds,
459                     (eloop->fds_len + 5), sizeof(*eloop->fds));
460                 if (nfds == NULL)
461                         goto err;
462                 eloop->fds_len += 5;
463                 eloop->fds = nfds;
464         }
465 #endif
466
467         /* Now populate the structure and add it to the list. */
468         e->fd = fd;
469         e->read_cb = read_cb;
470         e->read_cb_arg = read_cb_arg;
471         e->write_cb = write_cb;
472         e->write_cb_arg = write_cb_arg;
473
474 #if defined(HAVE_KQUEUE)
475         if (read_cb != NULL)
476                 EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ,
477                     EV_ADD, 0, 0, UPTR(e));
478         if (write_cb != NULL)
479                 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
480                     EV_ADD, 0, 0, UPTR(e));
481         if (kevent(eloop->poll_fd, ke, write_cb ? 2 : 1, NULL, 0, NULL) == -1)
482                 goto err;
483 #elif defined(HAVE_EPOLL)
484         epe.data.ptr = e;
485         if (epoll_ctl(eloop->poll_fd, EPOLL_CTL_ADD, fd, &epe) == -1)
486                 goto err;
487 #endif
488
489         TAILQ_INSERT_HEAD(&eloop->events, e, next);
490         eloop->event_fds[e->fd] = e;
491         eloop_event_setup_fds(eloop);
492         return 0;
493
494 err:
495         if (e) {
496                 eloop->events_len--;
497                 TAILQ_INSERT_TAIL(&eloop->free_events, e, next);
498         }
499         return -1;
500 }
501
502 int
503 eloop_event_add(struct eloop *eloop, int fd,
504     void (*read_cb)(void *), void *read_cb_arg)
505 {
506
507         return eloop_event_add_rw(eloop, fd, read_cb, read_cb_arg, NULL, NULL);
508 }
509
510 int
511 eloop_event_add_w(struct eloop *eloop, int fd,
512     void (*write_cb)(void *), void *write_cb_arg)
513 {
514
515         return eloop_event_add_rw(eloop, fd, NULL,NULL, write_cb, write_cb_arg);
516 }
517
518 int
519 eloop_event_delete_write(struct eloop *eloop, int fd, int write_only)
520 {
521         struct eloop_event *e;
522 #if defined(HAVE_KQUEUE)
523         struct kevent ke[2];
524 #elif defined(HAVE_EPOLL)
525         struct epoll_event epe;
526 #endif
527
528         assert(eloop != NULL);
529
530         if (fd > eloop->events_maxfd ||
531             (e = eloop->event_fds[fd]) == NULL)
532         {
533                 errno = ENOENT;
534                 return -1;
535         }
536
537         if (write_only) {
538                 if (e->write_cb == NULL)
539                         return 0;
540                 if (e->read_cb == NULL)
541                         goto remove;
542                 e->write_cb = NULL;
543                 e->write_cb_arg = NULL;
544 #if defined(HAVE_KQUEUE)
545                 EV_SET(&ke[0], (uintptr_t)e->fd,
546                     EVFILT_WRITE, EV_DELETE, 0, 0, UPTR(NULL));
547                 kevent(eloop->poll_fd, ke, 1, NULL, 0, NULL);
548 #elif defined(HAVE_EPOLL)
549                 memset(&epe, 0, sizeof(epe));
550                 epe.data.fd = e->fd;
551                 epe.data.ptr = e;
552                 epe.events = EPOLLIN;
553                 epoll_ctl(eloop->poll_fd, EPOLL_CTL_MOD, fd, &epe);
554 #endif
555                 eloop_event_setup_fds(eloop);
556                 return 1;
557         }
558
559 remove:
560         TAILQ_REMOVE(&eloop->events, e, next);
561         eloop->event_fds[e->fd] = NULL;
562         TAILQ_INSERT_TAIL(&eloop->free_events, e, next);
563         eloop->events_len--;
564
565 #if defined(HAVE_KQUEUE)
566         EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ,
567             EV_DELETE, 0, 0, UPTR(NULL));
568         if (e->write_cb)
569                 EV_SET(&ke[1], (uintptr_t)fd,
570                     EVFILT_WRITE, EV_DELETE, 0, 0, UPTR(NULL));
571         kevent(eloop->poll_fd, ke, e->write_cb ? 2 : 1, NULL, 0, NULL);
572 #elif defined(HAVE_EPOLL)
573         /* NULL event is safe because we
574          * rely on epoll_pwait which as added
575          * after the delete without event was fixed. */
576         epoll_ctl(eloop->poll_fd, EPOLL_CTL_DEL, fd, NULL);
577 #endif
578
579         eloop_event_setup_fds(eloop);
580         return 1;
581 }
582
583 /*
584  * This implementation should cope with UINT_MAX seconds on a system
585  * where time_t is INT32_MAX. It should also cope with the monotonic timer
586  * wrapping, although this is highly unlikely.
587  * unsigned int should match or be greater than any on wire specified timeout.
588  */
589 static int
590 eloop_q_timeout_add(struct eloop *eloop, int queue,
591     unsigned int seconds, unsigned int nseconds,
592     void (*callback)(void *), void *arg)
593 {
594         struct eloop_timeout *t, *tt = NULL;
595
596         assert(eloop != NULL);
597         assert(callback != NULL);
598         assert(nseconds <= NSEC_PER_SEC);
599
600         /* Remove existing timeout if present. */
601         TAILQ_FOREACH(t, &eloop->timeouts, next) {
602                 if (t->callback == callback && t->arg == arg) {
603                         TAILQ_REMOVE(&eloop->timeouts, t, next);
604                         break;
605                 }
606         }
607
608         if (t == NULL) {
609                 /* No existing, so allocate or grab one from the free pool. */
610                 if ((t = TAILQ_FIRST(&eloop->free_timeouts))) {
611                         TAILQ_REMOVE(&eloop->free_timeouts, t, next);
612                 } else {
613                         if ((t = malloc(sizeof(*t))) == NULL)
614                                 return -1;
615                 }
616         }
617
618         eloop_reduce_timers(eloop);
619
620         t->seconds = seconds;
621         t->nseconds = nseconds;
622         t->callback = callback;
623         t->arg = arg;
624         t->queue = queue;
625
626         /* The timeout list should be in chronological order,
627          * soonest first. */
628         TAILQ_FOREACH(tt, &eloop->timeouts, next) {
629                 if (t->seconds < tt->seconds ||
630                     (t->seconds == tt->seconds && t->nseconds < tt->nseconds))
631                 {
632                         TAILQ_INSERT_BEFORE(tt, t, next);
633                         return 0;
634                 }
635         }
636         TAILQ_INSERT_TAIL(&eloop->timeouts, t, next);
637         return 0;
638 }
639
640 int
641 eloop_q_timeout_add_tv(struct eloop *eloop, int queue,
642     const struct timespec *when, void (*callback)(void *), void *arg)
643 {
644
645         if (when->tv_sec < 0 || (unsigned long)when->tv_sec > UINT_MAX) {
646                 errno = EINVAL;
647                 return -1;
648         }
649         if (when->tv_nsec < 0 || when->tv_nsec > NSEC_PER_SEC) {
650                 errno = EINVAL;
651                 return -1;
652         }
653
654         return eloop_q_timeout_add(eloop, queue,
655             (unsigned int)when->tv_sec, (unsigned int)when->tv_sec,
656             callback, arg);
657 }
658
659 int
660 eloop_q_timeout_add_sec(struct eloop *eloop, int queue, unsigned int seconds,
661     void (*callback)(void *), void *arg)
662 {
663
664         return eloop_q_timeout_add(eloop, queue, seconds, 0, callback, arg);
665 }
666
667 int
668 eloop_q_timeout_add_msec(struct eloop *eloop, int queue, unsigned long when,
669     void (*callback)(void *), void *arg)
670 {
671         unsigned long seconds, nseconds;
672
673         seconds = when / MSEC_PER_SEC;
674         if (seconds > UINT_MAX) {
675                 errno = EINVAL;
676                 return -1;
677         }
678
679         nseconds = (when % MSEC_PER_SEC) * NSEC_PER_MSEC;
680         return eloop_q_timeout_add(eloop, queue,
681                 (unsigned int)seconds, (unsigned int)nseconds, callback, arg);
682 }
683
684 #if !defined(HAVE_KQUEUE)
685 static int
686 eloop_timeout_add_now(struct eloop *eloop,
687     void (*callback)(void *), void *arg)
688 {
689
690         assert(eloop->timeout0 == NULL);
691         eloop->timeout0 = callback;
692         eloop->timeout0_arg = arg;
693         return 0;
694 }
695 #endif
696
697 int
698 eloop_q_timeout_delete(struct eloop *eloop, int queue,
699     void (*callback)(void *), void *arg)
700 {
701         struct eloop_timeout *t, *tt;
702         int n;
703
704         assert(eloop != NULL);
705
706         n = 0;
707         TAILQ_FOREACH_SAFE(t, &eloop->timeouts, next, tt) {
708                 if ((queue == 0 || t->queue == queue) &&
709                     t->arg == arg &&
710                     (!callback || t->callback == callback))
711                 {
712                         TAILQ_REMOVE(&eloop->timeouts, t, next);
713                         TAILQ_INSERT_TAIL(&eloop->free_timeouts, t, next);
714                         n++;
715                 }
716         }
717         return n;
718 }
719
720 void
721 eloop_exit(struct eloop *eloop, int code)
722 {
723
724         assert(eloop != NULL);
725
726         eloop->exitcode = code;
727         eloop->exitnow = 1;
728 }
729
730 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
731 static int
732 eloop_open(struct eloop *eloop)
733 {
734
735 #if defined(HAVE_KQUEUE1)
736         return (eloop->poll_fd = kqueue1(O_CLOEXEC));
737 #elif defined(HAVE_KQUEUE)
738         int i;
739
740         if ((eloop->poll_fd = kqueue()) == -1)
741                 return -1;
742         if ((i = fcntl(eloop->poll_fd, F_GETFD, 0)) == -1 ||
743             fcntl(eloop->poll_fd, F_SETFD, i | FD_CLOEXEC) == -1)
744         {
745                 close(eloop->poll_fd);
746                 eloop->poll_fd = -1;
747         }
748
749         return eloop->poll_fd;
750 #elif defined (HAVE_EPOLL)
751         return (eloop->poll_fd = epoll_create1(EPOLL_CLOEXEC));
752 #else
753         return (eloop->poll_fd = -1);
754 #endif
755 }
756 #endif
757
758 int
759 eloop_requeue(struct eloop *eloop)
760 {
761 #if defined(HAVE_POLL)
762
763         UNUSED(eloop);
764         return 0;
765 #else /* !HAVE_POLL */
766         struct eloop_event *e;
767         int error;
768 #if defined(HAVE_KQUEUE)
769         size_t i;
770         struct kevent *ke;
771 #elif defined(HAVE_EPOLL)
772         struct epoll_event epe;
773 #endif
774
775         assert(eloop != NULL);
776
777         if (eloop->poll_fd != -1)
778                 close(eloop->poll_fd);
779         if (eloop_open(eloop) == -1)
780                 return -1;
781 #if defined (HAVE_KQUEUE)
782         i = eloop->signals_len;
783         TAILQ_FOREACH(e, &eloop->events, next) {
784                 i++;
785                 if (e->write_cb)
786                         i++;
787         }
788
789         if ((ke = malloc(sizeof(*ke) * i)) == NULL)
790                 return -1;
791
792         for (i = 0; i < eloop->signals_len; i++)
793                 EV_SET(&ke[i], (uintptr_t)eloop->signals[i],
794                     EVFILT_SIGNAL, EV_ADD, 0, 0, UPTR(NULL));
795
796         TAILQ_FOREACH(e, &eloop->events, next) {
797                 EV_SET(&ke[i], (uintptr_t)e->fd, EVFILT_READ,
798                     EV_ADD, 0, 0, UPTR(e));
799                 i++;
800                 if (e->write_cb) {
801                         EV_SET(&ke[i], (uintptr_t)e->fd, EVFILT_WRITE,
802                             EV_ADD, 0, 0, UPTR(e));
803                         i++;
804                 }
805         }
806
807         error =  kevent(eloop->poll_fd, ke, LENC(i), NULL, 0, NULL);
808         free(ke);
809
810 #elif defined(HAVE_EPOLL)
811
812         error = 0;
813         TAILQ_FOREACH(e, &eloop->events, next) {
814                 memset(&epe, 0, sizeof(epe));
815                 epe.data.fd = e->fd;
816                 epe.events = EPOLLIN;
817                 if (e->write_cb)
818                         epe.events |= EPOLLOUT;
819                 epe.data.ptr = e;
820                 if (epoll_ctl(eloop->poll_fd, EPOLL_CTL_ADD, e->fd, &epe) == -1)
821                         error = -1;
822         }
823 #endif
824
825         return error;
826 #endif /* HAVE_POLL */
827 }
828
829 int
830 eloop_signal_set_cb(struct eloop *eloop,
831     const int *signals, size_t signals_len,
832     void (*signal_cb)(int, void *), void *signal_cb_ctx)
833 {
834
835         assert(eloop != NULL);
836
837         eloop->signals = signals;
838         eloop->signals_len = signals_len;
839         eloop->signal_cb = signal_cb;
840         eloop->signal_cb_ctx = signal_cb_ctx;
841         return eloop_requeue(eloop);
842 }
843
844 #ifndef HAVE_KQUEUE
845 struct eloop_siginfo {
846         int sig;
847         struct eloop *eloop;
848 };
849 static struct eloop_siginfo _eloop_siginfo;
850 static struct eloop *_eloop;
851
852 static void
853 eloop_signal1(void *arg)
854 {
855         struct eloop_siginfo *si = arg;
856
857         si->eloop->signal_cb(si->sig, si->eloop->signal_cb_ctx);
858 }
859
860 static void
861 eloop_signal3(int sig, __unused siginfo_t *siginfo, __unused void *arg)
862 {
863
864         /* So that we can operate safely under a signal we instruct
865          * eloop to pass a copy of the siginfo structure to handle_signal1
866          * as the very first thing to do. */
867         _eloop_siginfo.eloop = _eloop;
868         _eloop_siginfo.sig = sig;
869         eloop_timeout_add_now(_eloop_siginfo.eloop,
870             eloop_signal1, &_eloop_siginfo);
871 }
872 #endif
873
874 int
875 eloop_signal_mask(struct eloop *eloop, sigset_t *oldset)
876 {
877         sigset_t newset;
878         size_t i;
879 #ifndef HAVE_KQUEUE
880         struct sigaction sa;
881 #endif
882
883         assert(eloop != NULL);
884
885         sigemptyset(&newset);
886         for (i = 0; i < eloop->signals_len; i++)
887                 sigaddset(&newset, eloop->signals[i]);
888         if (sigprocmask(SIG_SETMASK, &newset, oldset) == -1)
889                 return -1;
890
891 #ifndef HAVE_KQUEUE
892         _eloop = eloop;
893
894         memset(&sa, 0, sizeof(sa));
895         sa.sa_sigaction = eloop_signal3;
896         sa.sa_flags = SA_SIGINFO;
897         sigemptyset(&sa.sa_mask);
898
899         for (i = 0; i < eloop->signals_len; i++) {
900                 if (sigaction(eloop->signals[i], &sa, NULL) == -1)
901                         return -1;
902         }
903 #endif
904         return 0;
905 }
906
907 struct eloop *
908 eloop_new(void)
909 {
910         struct eloop *eloop;
911
912         eloop = calloc(1, sizeof(*eloop));
913         if (eloop == NULL)
914                 return NULL;
915
916         /* Check we have a working monotonic clock. */
917         if (clock_gettime(CLOCK_MONOTONIC, &eloop->now) == -1) {
918                 free(eloop);
919                 return NULL;
920         }
921
922         TAILQ_INIT(&eloop->events);
923         eloop->events_maxfd = -1;
924         TAILQ_INIT(&eloop->free_events);
925         TAILQ_INIT(&eloop->timeouts);
926         TAILQ_INIT(&eloop->free_timeouts);
927         eloop->exitcode = EXIT_FAILURE;
928
929 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
930         if (eloop_open(eloop) == -1) {
931                 eloop_free(eloop);
932                 return NULL;
933         }
934 #endif
935
936         return eloop;
937 }
938
939 void
940 eloop_clear(struct eloop *eloop)
941 {
942         struct eloop_event *e;
943         struct eloop_timeout *t;
944
945         if (eloop == NULL)
946                 return;
947
948         free(eloop->event_fds);
949         eloop->event_fds = NULL;
950         eloop->events_len = 0;
951         eloop->events_maxfd = -1;
952         eloop->signals = NULL;
953         eloop->signals_len = 0;
954
955         while ((e = TAILQ_FIRST(&eloop->events))) {
956                 TAILQ_REMOVE(&eloop->events, e, next);
957                 free(e);
958         }
959         while ((e = TAILQ_FIRST(&eloop->free_events))) {
960                 TAILQ_REMOVE(&eloop->free_events, e, next);
961                 free(e);
962         }
963         while ((t = TAILQ_FIRST(&eloop->timeouts))) {
964                 TAILQ_REMOVE(&eloop->timeouts, t, next);
965                 free(t);
966         }
967         while ((t = TAILQ_FIRST(&eloop->free_timeouts))) {
968                 TAILQ_REMOVE(&eloop->free_timeouts, t, next);
969                 free(t);
970         }
971
972 #if defined(HAVE_POLL)
973         free(eloop->fds);
974         eloop->fds = NULL;
975         eloop->fds_len = 0;
976 #endif
977 }
978
979 void
980 eloop_free(struct eloop *eloop)
981 {
982
983 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
984         if (eloop != NULL)
985                 close(eloop->poll_fd);
986 #endif
987         eloop_clear(eloop);
988         free(eloop);
989 }
990
991 int
992 eloop_start(struct eloop *eloop, sigset_t *signals)
993 {
994         int n;
995         struct eloop_event *e;
996         struct eloop_timeout *t;
997         void (*t0)(void *);
998 #if defined(HAVE_KQUEUE)
999         struct kevent ke;
1000         UNUSED(signals);
1001 #elif defined(HAVE_EPOLL)
1002         struct epoll_event epe;
1003 #endif
1004 #if defined(HAVE_KQUEUE) || defined(HAVE_POLL)
1005         struct timespec ts, *tsp;
1006 #endif
1007 #ifndef HAVE_KQUEUE
1008         int timeout;
1009 #endif
1010
1011         assert(eloop != NULL);
1012
1013         eloop->exitnow = 0;
1014         for (;;) {
1015                 if (eloop->exitnow)
1016                         break;
1017
1018                 /* Run all timeouts first. */
1019                 if (eloop->timeout0) {
1020                         t0 = eloop->timeout0;
1021                         eloop->timeout0 = NULL;
1022                         t0(eloop->timeout0_arg);
1023                         continue;
1024                 }
1025
1026                 t = TAILQ_FIRST(&eloop->timeouts);
1027                 if (t == NULL && eloop->events_len == 0)
1028                         break;
1029
1030                 if (t != NULL)
1031                         eloop_reduce_timers(eloop);
1032
1033                 if (t != NULL && t->seconds == 0 && t->nseconds == 0) {
1034                         TAILQ_REMOVE(&eloop->timeouts, t, next);
1035                         t->callback(t->arg);
1036                         TAILQ_INSERT_TAIL(&eloop->free_timeouts, t, next);
1037                         continue;
1038                 }
1039
1040                 if (t != NULL) {
1041 #if defined(HAVE_KQUEUE) || defined(HAVE_POLL)
1042                         if (t->seconds > INT_MAX) {
1043                                 ts.tv_sec = (time_t)INT_MAX;
1044                                 ts.tv_nsec = 0;
1045                         } else {
1046                                 ts.tv_sec = (time_t)t->seconds;
1047                                 ts.tv_nsec = (long)t->nseconds;
1048                         }
1049                         tsp = &ts;
1050 #endif
1051
1052 #ifndef HAVE_KQUEUE
1053                         if (t->seconds > INT_MAX / 1000 ||
1054                             (t->seconds == INT_MAX / 1000 &&
1055                             ((t->nseconds + 999999) / 1000000
1056                             > INT_MAX % 1000000)))
1057                                 timeout = INT_MAX;
1058                         else
1059                                 timeout = (int)(t->seconds * 1000 +
1060                                     (t->nseconds + 999999) / 1000000);
1061 #endif
1062                 } else {
1063 #if defined(HAVE_KQUEUE) || defined(HAVE_POLL)
1064                         tsp = NULL;
1065 #endif
1066 #ifndef HAVE_KQUEUE
1067                         timeout = -1;
1068 #endif
1069                 }
1070
1071 #if defined(HAVE_KQUEUE)
1072                 n = kevent(eloop->poll_fd, NULL, 0, &ke, 1, tsp);
1073 #elif defined(HAVE_EPOLL)
1074                 if (signals)
1075                         n = epoll_pwait(eloop->poll_fd, &epe, 1,
1076                             timeout, signals);
1077                 else
1078                         n = epoll_wait(eloop->poll_fd, &epe, 1, timeout);
1079 #elif defined(HAVE_POLL)
1080                 if (signals)
1081                         n = POLLTS(eloop->fds, (nfds_t)eloop->events_len,
1082                             tsp, signals);
1083                 else
1084                         n = poll(eloop->fds, (nfds_t)eloop->events_len,
1085                             timeout);
1086 #endif
1087                 if (n == -1) {
1088                         if (errno == EINTR)
1089                                 continue;
1090                         return -errno;
1091                 }
1092                 if (n == 0)
1093                         continue;
1094
1095                 /* Process any triggered events.
1096                  * We go back to the start after calling each callback incase
1097                  * the current event or next event is removed. */
1098 #if defined(HAVE_KQUEUE)
1099                 if (ke.filter == EVFILT_SIGNAL) {
1100                         eloop->signal_cb((int)ke.ident,
1101                             eloop->signal_cb_ctx);
1102                 } else {
1103                         e = (struct eloop_event *)ke.udata;
1104                         if (ke.filter == EVFILT_WRITE && e->write_cb != NULL)
1105                                 e->write_cb(e->write_cb_arg);
1106                         else if (ke.filter == EVFILT_READ && e->read_cb != NULL)
1107                                 e->read_cb(e->read_cb_arg);
1108                 }
1109 #elif defined(HAVE_EPOLL)
1110                 e = (struct eloop_event *)epe.data.ptr;
1111                 if (epe.events & EPOLLOUT && e->write_cb != NULL)
1112                         e->write_cb(e->write_cb_arg);
1113                 else if (epe.events & (EPOLLIN | EPOLLERR | EPOLLHUP) &&
1114                     e->read_cb != NULL)
1115                         e->read_cb(e->read_cb_arg);
1116 #elif defined(HAVE_POLL)
1117                 size_t i;
1118
1119                 for (i = 0; i < eloop->events_len; i++) {
1120                         if (eloop->fds[i].revents & POLLOUT) {
1121                                 e = eloop->event_fds[eloop->fds[i].fd];
1122                                 if (e->write_cb != NULL) {
1123                                         e->write_cb(e->write_cb_arg);
1124                                         break;
1125                                 }
1126                         }
1127                         if (eloop->fds[i].revents) {
1128                                 e = eloop->event_fds[eloop->fds[i].fd];
1129                                 if (e->read_cb != NULL) {
1130                                         e->read_cb(e->read_cb_arg);
1131                                         break;
1132                                 }
1133                         }
1134                 }
1135 #endif
1136         }
1137
1138         return eloop->exitcode;
1139 }