Import dhcpcd-8.0.4 to vendor branch.
[dragonfly.git] / contrib / dhcpcd / src / eloop.c
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * eloop - portable event based main loop.
4  * Copyright (c) 2006-2019 Roy Marples <roy@marples.name>
5  * All rights reserved.
6
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #if (defined(__unix__) || defined(unix)) && !defined(USG)
30 #include <sys/param.h>
31 #endif
32 #include <sys/time.h>
33
34 #include <assert.h>
35 #include <errno.h>
36 #include <limits.h>
37 #include <signal.h>
38 #include <stdint.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <unistd.h>
42
43 /* config.h should define HAVE_KQUEUE, HAVE_EPOLL, etc. */
44 #if defined(HAVE_CONFIG_H) && !defined(NO_CONFIG_H)
45 #include "config.h"
46 #endif
47
48 /* Attempt to autodetect kqueue or epoll.
49  * Failing that, fall back to pselect. */
50 #if !defined(HAVE_KQUEUE) && !defined(HAVE_EPOLL) && !defined(HAVE_PSELECT) && \
51     !defined(HAVE_POLLTS) && !defined(HAVE_PPOLL)
52 #if defined(BSD)
53 /* Assume BSD has a working sys/queue.h and kqueue(2) interface. */
54 #define HAVE_SYS_QUEUE_H
55 #define HAVE_KQUEUE
56 #define WARN_SELECT
57 #elif defined(__linux__) || defined(__sun)
58 /* Assume Linux and Solaris have a working epoll(3) interface. */
59 #define HAVE_EPOLL
60 #define WARN_SELECT
61 #else
62 /* pselect(2) is a POSIX standard. */
63 #define HAVE_PSELECT
64 #define WARN_SELECT
65 #endif
66 #endif
67
68 /* pollts and ppoll require poll.
69  * pselect is wrapped in a pollts/ppoll style interface
70  * and as such require poll as well. */
71 #if defined(HAVE_PSELECT) || defined(HAVE_POLLTS) || defined(HAVE_PPOLL)
72 #ifndef HAVE_POLL
73 #define HAVE_POLL
74 #endif
75 #if defined(HAVE_POLLTS)
76 #define POLLTS pollts
77 #elif defined(HAVE_PPOLL)
78 #define POLLTS ppoll
79 #else
80 #define POLLTS eloop_pollts
81 #define ELOOP_NEED_POLLTS
82 #endif
83 #endif
84
85 #include "eloop.h"
86
87 #ifndef UNUSED
88 #define UNUSED(a) (void)((a))
89 #endif
90 #ifndef __unused
91 #ifdef __GNUC__
92 #define __unused   __attribute__((__unused__))
93 #else
94 #define __unused
95 #endif
96 #endif
97
98 #ifndef MSEC_PER_SEC
99 #define MSEC_PER_SEC    1000L
100 #define NSEC_PER_MSEC   1000000L
101 #endif
102
103 #if defined(HAVE_KQUEUE)
104 #include <sys/event.h>
105 #include <fcntl.h>
106 #ifdef __NetBSD__
107 /* udata is void * except on NetBSD.
108  * lengths are int except on NetBSD. */
109 #define UPTR(x) ((intptr_t)(x))
110 #define LENC(x) (x)
111 #else
112 #define UPTR(x) (x)
113 #define LENC(x) ((int)(x))
114 #endif
115 #elif defined(HAVE_EPOLL)
116 #include <sys/epoll.h>
117 #elif defined(HAVE_POLL)
118 #if defined(HAVE_PSELECT)
119 #include <sys/select.h>
120 #endif
121 #include <poll.h>
122 #endif
123
124 #ifdef WARN_SELECT
125 #if defined(HAVE_KQUEUE)
126 #pragma message("Compiling eloop with kqueue(2) support.")
127 #elif defined(HAVE_EPOLL)
128 #pragma message("Compiling eloop with epoll(7) support.")
129 #elif defined(HAVE_PSELECT)
130 #pragma message("Compiling eloop with pselect(2) support.")
131 #elif defined(HAVE_PPOLL)
132 #pragma message("Compiling eloop with ppoll(2) support.")
133 #elif defined(HAVE_POLLTS)
134 #pragma message("Compiling eloop with pollts(2) support.")
135 #else
136 #error Unknown select mechanism for eloop
137 #endif
138 #endif
139
140 /* Our structures require TAILQ macros, which really every libc should
141  * ship as they are useful beyond belief.
142  * Sadly some libc's don't have sys/queue.h and some that do don't have
143  * the TAILQ_FOREACH macro. For those that don't, the application using
144  * this implementation will need to ship a working queue.h somewhere.
145  * If we don't have sys/queue.h found in config.h, then
146  * allow QUEUE_H to override loading queue.h in the current directory. */
147 #ifndef TAILQ_FOREACH
148 #ifdef HAVE_SYS_QUEUE_H
149 #include <sys/queue.h>
150 #elif defined(QUEUE_H)
151 #define __QUEUE_HEADER(x) #x
152 #define _QUEUE_HEADER(x) __QUEUE_HEADER(x)
153 #include _QUEUE_HEADER(QUEUE_H)
154 #else
155 #include "queue.h"
156 #endif
157 #endif
158
159 struct eloop_event {
160         TAILQ_ENTRY(eloop_event) next;
161         int fd;
162         void (*read_cb)(void *);
163         void *read_cb_arg;
164         void (*write_cb)(void *);
165         void *write_cb_arg;
166 };
167
168 struct eloop_timeout {
169         TAILQ_ENTRY(eloop_timeout) next;
170         struct timespec when;
171         void (*callback)(void *);
172         void *arg;
173         int queue;
174 };
175
176 struct eloop {
177         size_t events_len;
178         TAILQ_HEAD (event_head, eloop_event) events;
179         struct event_head free_events;
180         int events_maxfd;
181         struct eloop_event **event_fds;
182
183         TAILQ_HEAD (timeout_head, eloop_timeout) timeouts;
184         struct timeout_head free_timeouts;
185
186         void (*timeout0)(void *);
187         void *timeout0_arg;
188         const int *signals;
189         size_t signals_len;
190         void (*signal_cb)(int, void *);
191         void *signal_cb_ctx;
192
193 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
194         int poll_fd;
195 #elif defined(HAVE_POLL)
196         struct pollfd *fds;
197         size_t fds_len;
198 #endif
199
200         int exitnow;
201         int exitcode;
202 };
203
204 #ifdef HAVE_REALLOCARRAY
205 #define eloop_realloca  reallocarray
206 #else
207 /* Handy routing to check for potential overflow.
208  * reallocarray(3) and reallocarr(3) are not portable. */
209 #define SQRT_SIZE_MAX (((size_t)1) << (sizeof(size_t) * CHAR_BIT / 2))
210 static void *
211 eloop_realloca(void *ptr, size_t n, size_t size)
212 {
213
214         if ((n | size) >= SQRT_SIZE_MAX && n > SIZE_MAX / size) {
215                 errno = EOVERFLOW;
216                 return NULL;
217         }
218         return realloc(ptr, n * size);
219 }
220 #endif
221
222 #ifdef HAVE_POLL
223 static void
224 eloop_event_setup_fds(struct eloop *eloop)
225 {
226         struct eloop_event *e;
227         size_t i;
228
229         i = 0;
230         TAILQ_FOREACH(e, &eloop->events, next) {
231                 eloop->fds[i].fd = e->fd;
232                 eloop->fds[i].events = 0;
233                 if (e->read_cb)
234                         eloop->fds[i].events |= POLLIN;
235                 if (e->write_cb)
236                         eloop->fds[i].events |= POLLOUT;
237                 eloop->fds[i].revents = 0;
238                 i++;
239         }
240 }
241
242 #ifdef ELOOP_NEED_POLLTS
243 /* Wrapper around pselect, to imitate the NetBSD pollts call. */
244 static int
245 eloop_pollts(struct pollfd * fds, nfds_t nfds,
246     const struct timespec *ts, const sigset_t *sigmask)
247 {
248         fd_set read_fds, write_fds;
249         nfds_t n;
250         int maxfd, r;
251
252         FD_ZERO(&read_fds);
253         FD_ZERO(&write_fds);
254         maxfd = 0;
255         for (n = 0; n < nfds; n++) {
256                 if (fds[n].events & POLLIN) {
257                         FD_SET(fds[n].fd, &read_fds);
258                         if (fds[n].fd > maxfd)
259                                 maxfd = fds[n].fd;
260                 }
261                 if (fds[n].events & POLLOUT) {
262                         FD_SET(fds[n].fd, &write_fds);
263                         if (fds[n].fd > maxfd)
264                                 maxfd = fds[n].fd;
265                 }
266         }
267
268         r = pselect(maxfd + 1, &read_fds, &write_fds, NULL, ts, sigmask);
269         if (r > 0) {
270                 for (n = 0; n < nfds; n++) {
271                         fds[n].revents =
272                             FD_ISSET(fds[n].fd, &read_fds) ? POLLIN : 0;
273                         if (FD_ISSET(fds[n].fd, &write_fds))
274                                 fds[n].revents |= POLLOUT;
275                 }
276         }
277
278         return r;
279 }
280 #endif /* pollts */
281 #else /* !HAVE_POLL */
282 #define eloop_event_setup_fds(a) {}
283 #endif /* HAVE_POLL */
284
285 int
286 eloop_event_add_rw(struct eloop *eloop, int fd,
287     void (*read_cb)(void *), void *read_cb_arg,
288     void (*write_cb)(void *), void *write_cb_arg)
289 {
290         struct eloop_event *e;
291 #if defined(HAVE_KQUEUE)
292         struct kevent ke[2];
293 #elif defined(HAVE_EPOLL)
294         struct epoll_event epe;
295 #elif defined(HAVE_POLL)
296         struct pollfd *nfds;
297 #endif
298
299         assert(eloop != NULL);
300         assert(read_cb != NULL || write_cb != NULL);
301         if (fd == -1) {
302                 errno = EINVAL;
303                 return -1;
304         }
305
306 #ifdef HAVE_EPOLL
307         memset(&epe, 0, sizeof(epe));
308         epe.data.fd = fd;
309         epe.events = EPOLLIN;
310         if (write_cb)
311                 epe.events |= EPOLLOUT;
312 #endif
313
314         /* We should only have one callback monitoring the fd. */
315         if (fd <= eloop->events_maxfd) {
316                 if ((e = eloop->event_fds[fd]) != NULL) {
317                         int error;
318
319 #if defined(HAVE_KQUEUE)
320                         EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ, EV_ADD,
321                             0, 0, UPTR(e));
322                         if (write_cb)
323                                 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
324                                     EV_ADD, 0, 0, UPTR(e));
325                         else if (e->write_cb)
326                                 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
327                                     EV_DELETE, 0, 0, UPTR(e));
328                         error = kevent(eloop->poll_fd, ke,
329                             e->write_cb || write_cb ? 2 : 1, NULL, 0, NULL);
330 #elif defined(HAVE_EPOLL)
331                         epe.data.ptr = e;
332                         error = epoll_ctl(eloop->poll_fd, EPOLL_CTL_MOD,
333                             fd, &epe);
334 #else
335                         error = 0;
336 #endif
337                         if (read_cb) {
338                                 e->read_cb = read_cb;
339                                 e->read_cb_arg = read_cb_arg;
340                         }
341                         if (write_cb) {
342                                 e->write_cb = write_cb;
343                                 e->write_cb_arg = write_cb_arg;
344                         }
345                         eloop_event_setup_fds(eloop);
346                         return error;
347                 }
348         } else {
349                 struct eloop_event **new_fds;
350                 int maxfd, i;
351
352                 /* Reserve ourself and 4 more. */
353                 maxfd = fd + 4;
354                 new_fds = eloop_realloca(eloop->event_fds,
355                     ((size_t)maxfd + 1), sizeof(*eloop->event_fds));
356                 if (new_fds == NULL)
357                         return -1;
358
359                 /* set new entries NULL as the fd's may not be contiguous. */
360                 for (i = maxfd; i > eloop->events_maxfd; i--)
361                         new_fds[i] = NULL;
362
363                 eloop->event_fds = new_fds;
364                 eloop->events_maxfd = maxfd;
365         }
366
367         /* Allocate a new event if no free ones already allocated. */
368         if ((e = TAILQ_FIRST(&eloop->free_events))) {
369                 TAILQ_REMOVE(&eloop->free_events, e, next);
370         } else {
371                 e = malloc(sizeof(*e));
372                 if (e == NULL)
373                         goto err;
374         }
375
376         /* Ensure we can actually listen to it. */
377         eloop->events_len++;
378 #ifdef HAVE_POLL
379         if (eloop->events_len > eloop->fds_len) {
380                 nfds = eloop_realloca(eloop->fds,
381                     (eloop->fds_len + 5), sizeof(*eloop->fds));
382                 if (nfds == NULL)
383                         goto err;
384                 eloop->fds_len += 5;
385                 eloop->fds = nfds;
386         }
387 #endif
388
389         /* Now populate the structure and add it to the list. */
390         e->fd = fd;
391         e->read_cb = read_cb;
392         e->read_cb_arg = read_cb_arg;
393         e->write_cb = write_cb;
394         e->write_cb_arg = write_cb_arg;
395
396 #if defined(HAVE_KQUEUE)
397         if (read_cb != NULL)
398                 EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ,
399                     EV_ADD, 0, 0, UPTR(e));
400         if (write_cb != NULL)
401                 EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
402                     EV_ADD, 0, 0, UPTR(e));
403         if (kevent(eloop->poll_fd, ke, write_cb ? 2 : 1, NULL, 0, NULL) == -1)
404                 goto err;
405 #elif defined(HAVE_EPOLL)
406         epe.data.ptr = e;
407         if (epoll_ctl(eloop->poll_fd, EPOLL_CTL_ADD, fd, &epe) == -1)
408                 goto err;
409 #endif
410
411         TAILQ_INSERT_HEAD(&eloop->events, e, next);
412         eloop->event_fds[e->fd] = e;
413         eloop_event_setup_fds(eloop);
414         return 0;
415
416 err:
417         if (e) {
418                 eloop->events_len--;
419                 TAILQ_INSERT_TAIL(&eloop->free_events, e, next);
420         }
421         return -1;
422 }
423
424 int
425 eloop_event_add(struct eloop *eloop, int fd,
426     void (*read_cb)(void *), void *read_cb_arg)
427 {
428
429         return eloop_event_add_rw(eloop, fd, read_cb, read_cb_arg, NULL, NULL);
430 }
431
432 int
433 eloop_event_add_w(struct eloop *eloop, int fd,
434     void (*write_cb)(void *), void *write_cb_arg)
435 {
436
437         return eloop_event_add_rw(eloop, fd, NULL,NULL, write_cb, write_cb_arg);
438 }
439
440 int
441 eloop_event_delete_write(struct eloop *eloop, int fd, int write_only)
442 {
443         struct eloop_event *e;
444 #if defined(HAVE_KQUEUE)
445         struct kevent ke[2];
446 #elif defined(HAVE_EPOLL)
447         struct epoll_event epe;
448 #endif
449
450         assert(eloop != NULL);
451
452         if (fd > eloop->events_maxfd ||
453             (e = eloop->event_fds[fd]) == NULL)
454         {
455                 errno = ENOENT;
456                 return -1;
457         }
458
459         if (write_only) {
460                 if (e->write_cb == NULL)
461                         return 0;
462                 if (e->read_cb == NULL)
463                         goto remove;
464                 e->write_cb = NULL;
465                 e->write_cb_arg = NULL;
466 #if defined(HAVE_KQUEUE)
467                 EV_SET(&ke[0], (uintptr_t)e->fd,
468                     EVFILT_WRITE, EV_DELETE, 0, 0, UPTR(NULL));
469                 kevent(eloop->poll_fd, ke, 1, NULL, 0, NULL);
470 #elif defined(HAVE_EPOLL)
471                 memset(&epe, 0, sizeof(epe));
472                 epe.data.fd = e->fd;
473                 epe.data.ptr = e;
474                 epe.events = EPOLLIN;
475                 epoll_ctl(eloop->poll_fd, EPOLL_CTL_MOD, fd, &epe);
476 #endif
477                 eloop_event_setup_fds(eloop);
478                 return 1;
479         }
480
481 remove:
482         TAILQ_REMOVE(&eloop->events, e, next);
483         eloop->event_fds[e->fd] = NULL;
484         TAILQ_INSERT_TAIL(&eloop->free_events, e, next);
485         eloop->events_len--;
486
487 #if defined(HAVE_KQUEUE)
488         EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ,
489             EV_DELETE, 0, 0, UPTR(NULL));
490         if (e->write_cb)
491                 EV_SET(&ke[1], (uintptr_t)fd,
492                     EVFILT_WRITE, EV_DELETE, 0, 0, UPTR(NULL));
493         kevent(eloop->poll_fd, ke, e->write_cb ? 2 : 1, NULL, 0, NULL);
494 #elif defined(HAVE_EPOLL)
495         /* NULL event is safe because we
496          * rely on epoll_pwait which as added
497          * after the delete without event was fixed. */
498         epoll_ctl(eloop->poll_fd, EPOLL_CTL_DEL, fd, NULL);
499 #endif
500
501         eloop_event_setup_fds(eloop);
502         return 1;
503 }
504
505 int
506 eloop_q_timeout_add_tv(struct eloop *eloop, int queue,
507     const struct timespec *when, void (*callback)(void *), void *arg)
508 {
509         struct timespec now, w;
510         struct eloop_timeout *t, *tt = NULL;
511
512         assert(eloop != NULL);
513         assert(when != NULL);
514         assert(callback != NULL);
515
516         clock_gettime(CLOCK_MONOTONIC, &now);
517         timespecadd(&now, when, &w);
518         /* Check for time_t overflow. */
519         if (timespeccmp(&w, &now, <)) {
520                 errno = ERANGE;
521                 return -1;
522         }
523
524         /* Remove existing timeout if present. */
525         TAILQ_FOREACH(t, &eloop->timeouts, next) {
526                 if (t->callback == callback && t->arg == arg) {
527                         TAILQ_REMOVE(&eloop->timeouts, t, next);
528                         break;
529                 }
530         }
531
532         if (t == NULL) {
533                 /* No existing, so allocate or grab one from the free pool. */
534                 if ((t = TAILQ_FIRST(&eloop->free_timeouts))) {
535                         TAILQ_REMOVE(&eloop->free_timeouts, t, next);
536                 } else {
537                         if ((t = malloc(sizeof(*t))) == NULL)
538                                 return -1;
539                 }
540         }
541
542         t->when = w;
543         t->callback = callback;
544         t->arg = arg;
545         t->queue = queue;
546
547         /* The timeout list should be in chronological order,
548          * soonest first. */
549         TAILQ_FOREACH(tt, &eloop->timeouts, next) {
550                 if (timespeccmp(&t->when, &tt->when, <)) {
551                         TAILQ_INSERT_BEFORE(tt, t, next);
552                         return 0;
553                 }
554         }
555         TAILQ_INSERT_TAIL(&eloop->timeouts, t, next);
556         return 0;
557 }
558
559 int
560 eloop_q_timeout_add_sec(struct eloop *eloop, int queue, time_t when,
561     void (*callback)(void *), void *arg)
562 {
563         struct timespec tv;
564
565         tv.tv_sec = when;
566         tv.tv_nsec = 0;
567         return eloop_q_timeout_add_tv(eloop, queue, &tv, callback, arg);
568 }
569
570 int
571 eloop_q_timeout_add_msec(struct eloop *eloop, int queue, long when,
572     void (*callback)(void *), void *arg)
573 {
574         struct timespec tv;
575
576         tv.tv_sec = when / MSEC_PER_SEC;
577         tv.tv_nsec = (when % MSEC_PER_SEC) * NSEC_PER_MSEC;
578         return eloop_q_timeout_add_tv(eloop, queue, &tv, callback, arg);
579 }
580
581 #if !defined(HAVE_KQUEUE)
582 static int
583 eloop_timeout_add_now(struct eloop *eloop,
584     void (*callback)(void *), void *arg)
585 {
586
587         assert(eloop->timeout0 == NULL);
588         eloop->timeout0 = callback;
589         eloop->timeout0_arg = arg;
590         return 0;
591 }
592 #endif
593
594 int
595 eloop_q_timeout_delete(struct eloop *eloop, int queue,
596     void (*callback)(void *), void *arg)
597 {
598         struct eloop_timeout *t, *tt;
599         int n;
600
601         assert(eloop != NULL);
602
603         n = 0;
604         TAILQ_FOREACH_SAFE(t, &eloop->timeouts, next, tt) {
605                 if ((queue == 0 || t->queue == queue) &&
606                     t->arg == arg &&
607                     (!callback || t->callback == callback))
608                 {
609                         TAILQ_REMOVE(&eloop->timeouts, t, next);
610                         TAILQ_INSERT_TAIL(&eloop->free_timeouts, t, next);
611                         n++;
612                 }
613         }
614         return n;
615 }
616
617 void
618 eloop_exit(struct eloop *eloop, int code)
619 {
620
621         assert(eloop != NULL);
622
623         eloop->exitcode = code;
624         eloop->exitnow = 1;
625 }
626
627 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
628 static int
629 eloop_open(struct eloop *eloop)
630 {
631
632 #if defined(HAVE_KQUEUE1)
633         return (eloop->poll_fd = kqueue1(O_CLOEXEC));
634 #elif defined(HAVE_KQUEUE)
635         int i;
636
637         if ((eloop->poll_fd = kqueue()) == -1)
638                 return -1;
639         if ((i = fcntl(eloop->poll_fd, F_GETFD, 0)) == -1 ||
640             fcntl(eloop->poll_fd, F_SETFD, i | FD_CLOEXEC) == -1)
641         {
642                 close(eloop->poll_fd);
643                 eloop->poll_fd = -1;
644         }
645
646         return eloop->poll_fd;
647 #elif defined (HAVE_EPOLL)
648         return (eloop->poll_fd = epoll_create1(EPOLL_CLOEXEC));
649 #else
650         return (eloop->poll_fd = -1);
651 #endif
652 }
653 #endif
654
655 int
656 eloop_requeue(struct eloop *eloop)
657 {
658 #if defined(HAVE_POLL)
659
660         UNUSED(eloop);
661         return 0;
662 #else /* !HAVE_POLL */
663         struct eloop_event *e;
664         int error;
665 #if defined(HAVE_KQUEUE)
666         size_t i;
667         struct kevent *ke;
668 #elif defined(HAVE_EPOLL)
669         struct epoll_event epe;
670 #endif
671
672         assert(eloop != NULL);
673
674         if (eloop->poll_fd != -1)
675                 close(eloop->poll_fd);
676         if (eloop_open(eloop) == -1)
677                 return -1;
678 #if defined (HAVE_KQUEUE)
679         i = eloop->signals_len;
680         TAILQ_FOREACH(e, &eloop->events, next) {
681                 i++;
682                 if (e->write_cb)
683                         i++;
684         }
685
686         if ((ke = malloc(sizeof(*ke) * i)) == NULL)
687                 return -1;
688
689         for (i = 0; i < eloop->signals_len; i++)
690                 EV_SET(&ke[i], (uintptr_t)eloop->signals[i],
691                     EVFILT_SIGNAL, EV_ADD, 0, 0, UPTR(NULL));
692
693         TAILQ_FOREACH(e, &eloop->events, next) {
694                 EV_SET(&ke[i], (uintptr_t)e->fd, EVFILT_READ,
695                     EV_ADD, 0, 0, UPTR(e));
696                 i++;
697                 if (e->write_cb) {
698                         EV_SET(&ke[i], (uintptr_t)e->fd, EVFILT_WRITE,
699                             EV_ADD, 0, 0, UPTR(e));
700                         i++;
701                 }
702         }
703
704         error =  kevent(eloop->poll_fd, ke, LENC(i), NULL, 0, NULL);
705         free(ke);
706
707 #elif defined(HAVE_EPOLL)
708
709         error = 0;
710         TAILQ_FOREACH(e, &eloop->events, next) {
711                 memset(&epe, 0, sizeof(epe));
712                 epe.data.fd = e->fd;
713                 epe.events = EPOLLIN;
714                 if (e->write_cb)
715                         epe.events |= EPOLLOUT;
716                 epe.data.ptr = e;
717                 if (epoll_ctl(eloop->poll_fd, EPOLL_CTL_ADD, e->fd, &epe) == -1)
718                         error = -1;
719         }
720 #endif
721
722         return error;
723 #endif /* HAVE_POLL */
724 }
725
726 int
727 eloop_signal_set_cb(struct eloop *eloop,
728     const int *signals, size_t signals_len,
729     void (*signal_cb)(int, void *), void *signal_cb_ctx)
730 {
731
732         assert(eloop != NULL);
733
734         eloop->signals = signals;
735         eloop->signals_len = signals_len;
736         eloop->signal_cb = signal_cb;
737         eloop->signal_cb_ctx = signal_cb_ctx;
738         return eloop_requeue(eloop);
739 }
740
741 #ifndef HAVE_KQUEUE
742 struct eloop_siginfo {
743         int sig;
744         struct eloop *eloop;
745 };
746 static struct eloop_siginfo _eloop_siginfo;
747 static struct eloop *_eloop;
748
749 static void
750 eloop_signal1(void *arg)
751 {
752         struct eloop_siginfo *si = arg;
753
754         si->eloop->signal_cb(si->sig, si->eloop->signal_cb_ctx);
755 }
756
757 static void
758 eloop_signal3(int sig, __unused siginfo_t *siginfo, __unused void *arg)
759 {
760
761         /* So that we can operate safely under a signal we instruct
762          * eloop to pass a copy of the siginfo structure to handle_signal1
763          * as the very first thing to do. */
764         _eloop_siginfo.eloop = _eloop;
765         _eloop_siginfo.sig = sig;
766         eloop_timeout_add_now(_eloop_siginfo.eloop,
767             eloop_signal1, &_eloop_siginfo);
768 }
769 #endif
770
771 int
772 eloop_signal_mask(struct eloop *eloop, sigset_t *oldset)
773 {
774         sigset_t newset;
775         size_t i;
776 #ifndef HAVE_KQUEUE
777         struct sigaction sa;
778 #endif
779
780         assert(eloop != NULL);
781
782         sigemptyset(&newset);
783         for (i = 0; i < eloop->signals_len; i++)
784                 sigaddset(&newset, eloop->signals[i]);
785         if (sigprocmask(SIG_SETMASK, &newset, oldset) == -1)
786                 return -1;
787
788 #ifndef HAVE_KQUEUE
789         _eloop = eloop;
790
791         memset(&sa, 0, sizeof(sa));
792         sa.sa_sigaction = eloop_signal3;
793         sa.sa_flags = SA_SIGINFO;
794         sigemptyset(&sa.sa_mask);
795
796         for (i = 0; i < eloop->signals_len; i++) {
797                 if (sigaction(eloop->signals[i], &sa, NULL) == -1)
798                         return -1;
799         }
800 #endif
801         return 0;
802 }
803
804 struct eloop *
805 eloop_new(void)
806 {
807         struct eloop *eloop;
808         struct timespec now;
809
810         /* Check we have a working monotonic clock. */
811         if (clock_gettime(CLOCK_MONOTONIC, &now) == -1)
812                 return NULL;
813
814         eloop = calloc(1, sizeof(*eloop));
815         if (eloop) {
816                 TAILQ_INIT(&eloop->events);
817                 eloop->events_maxfd = -1;
818                 TAILQ_INIT(&eloop->free_events);
819                 TAILQ_INIT(&eloop->timeouts);
820                 TAILQ_INIT(&eloop->free_timeouts);
821                 eloop->exitcode = EXIT_FAILURE;
822 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
823                 if (eloop_open(eloop) == -1) {
824                         eloop_free(eloop);
825                         return NULL;
826                 }
827 #endif
828         }
829
830         return eloop;
831 }
832
833 void eloop_free(struct eloop *eloop)
834 {
835         struct eloop_event *e;
836         struct eloop_timeout *t;
837
838         if (eloop == NULL)
839                 return;
840
841         free(eloop->event_fds);
842         while ((e = TAILQ_FIRST(&eloop->events))) {
843                 TAILQ_REMOVE(&eloop->events, e, next);
844                 free(e);
845         }
846         while ((e = TAILQ_FIRST(&eloop->free_events))) {
847                 TAILQ_REMOVE(&eloop->free_events, e, next);
848                 free(e);
849         }
850         while ((t = TAILQ_FIRST(&eloop->timeouts))) {
851                 TAILQ_REMOVE(&eloop->timeouts, t, next);
852                 free(t);
853         }
854         while ((t = TAILQ_FIRST(&eloop->free_timeouts))) {
855                 TAILQ_REMOVE(&eloop->free_timeouts, t, next);
856                 free(t);
857         }
858 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
859         close(eloop->poll_fd);
860 #elif defined(HAVE_POLL)
861         free(eloop->fds);
862 #endif
863         free(eloop);
864 }
865
866 int
867 eloop_start(struct eloop *eloop, sigset_t *signals)
868 {
869         int n;
870         struct eloop_event *e;
871         struct eloop_timeout *t;
872         struct timespec now, ts, *tsp;
873         void (*t0)(void *);
874 #if defined(HAVE_KQUEUE)
875         struct kevent ke;
876         UNUSED(signals);
877 #elif defined(HAVE_EPOLL)
878         struct epoll_event epe;
879 #endif
880 #ifndef HAVE_KQUEUE
881         int timeout;
882 #endif
883
884         assert(eloop != NULL);
885
886         eloop->exitnow = 0;
887         for (;;) {
888                 if (eloop->exitnow)
889                         break;
890
891                 /* Run all timeouts first. */
892                 if (eloop->timeout0) {
893                         t0 = eloop->timeout0;
894                         eloop->timeout0 = NULL;
895                         t0(eloop->timeout0_arg);
896                         continue;
897                 }
898                 if ((t = TAILQ_FIRST(&eloop->timeouts))) {
899                         clock_gettime(CLOCK_MONOTONIC, &now);
900                         if (timespeccmp(&now, &t->when, >)) {
901                                 TAILQ_REMOVE(&eloop->timeouts, t, next);
902                                 t->callback(t->arg);
903                                 TAILQ_INSERT_TAIL(&eloop->free_timeouts, t, next);
904                                 continue;
905                         }
906                         timespecsub(&t->when, &now, &ts);
907                         tsp = &ts;
908                 } else
909                         /* No timeouts, so wait forever. */
910                         tsp = NULL;
911
912                 if (tsp == NULL && eloop->events_len == 0)
913                         break;
914
915 #ifndef HAVE_KQUEUE
916                 if (tsp == NULL)
917                         timeout = -1;
918                 else if (tsp->tv_sec > INT_MAX / 1000 ||
919                     (tsp->tv_sec == INT_MAX / 1000 &&
920                     (tsp->tv_nsec + 999999) / 1000000 > INT_MAX % 1000000))
921                         timeout = INT_MAX;
922                 else
923                         timeout = (int)(tsp->tv_sec * 1000 +
924                             (tsp->tv_nsec + 999999) / 1000000);
925 #endif
926
927 #if defined(HAVE_KQUEUE)
928                 n = kevent(eloop->poll_fd, NULL, 0, &ke, 1, tsp);
929 #elif defined(HAVE_EPOLL)
930                 if (signals)
931                         n = epoll_pwait(eloop->poll_fd, &epe, 1,
932                             timeout, signals);
933                 else
934                         n = epoll_wait(eloop->poll_fd, &epe, 1, timeout);
935 #elif defined(HAVE_POLL)
936                 if (signals)
937                         n = POLLTS(eloop->fds, (nfds_t)eloop->events_len,
938                             tsp, signals);
939                 else
940                         n = poll(eloop->fds, (nfds_t)eloop->events_len,
941                             timeout);
942 #endif
943                 if (n == -1) {
944                         if (errno == EINTR)
945                                 continue;
946                         return -errno;
947                 }
948
949                 /* Process any triggered events.
950                  * We go back to the start after calling each callback incase
951                  * the current event or next event is removed. */
952 #if defined(HAVE_KQUEUE)
953                 if (n) {
954                         if (ke.filter == EVFILT_SIGNAL) {
955                                 eloop->signal_cb((int)ke.ident,
956                                     eloop->signal_cb_ctx);
957                                 continue;
958                         }
959                         e = (struct eloop_event *)ke.udata;
960                         if (ke.filter == EVFILT_WRITE) {
961                                 e->write_cb(e->write_cb_arg);
962                                 continue;
963                         } else if (ke.filter == EVFILT_READ) {
964                                 e->read_cb(e->read_cb_arg);
965                                 continue;
966                         }
967                 }
968 #elif defined(HAVE_EPOLL)
969                 if (n) {
970                         e = (struct eloop_event *)epe.data.ptr;
971                         if (epe.events & EPOLLOUT && e->write_cb != NULL) {
972                                 e->write_cb(e->write_cb_arg);
973                                 continue;
974                         }
975                         if (epe.events &
976                             (EPOLLIN | EPOLLERR | EPOLLHUP) &&
977                             e->read_cb != NULL)
978                         {
979                                 e->read_cb(e->read_cb_arg);
980                                 continue;
981                         }
982                 }
983 #elif defined(HAVE_POLL)
984                 if (n > 0) {
985                         size_t i;
986
987                         for (i = 0; i < eloop->events_len; i++) {
988                                 if (eloop->fds[i].revents & POLLOUT) {
989                                         e = eloop->event_fds[eloop->fds[i].fd];
990                                         if (e->write_cb != NULL) {
991                                                 e->write_cb(e->write_cb_arg);
992                                                 break;
993                                         }
994                                 }
995                                 if (eloop->fds[i].revents) {
996                                         e = eloop->event_fds[eloop->fds[i].fd];
997                                         if (e->read_cb != NULL) {
998                                                 e->read_cb(e->read_cb_arg);
999                                                 break;
1000                                         }
1001                                 }
1002                         }
1003                 }
1004 #endif
1005         }
1006
1007         return eloop->exitcode;
1008 }