2 * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #define WIN32_LEAN_AND_MEAN
34 #undef WIN32_LEAN_AND_MEAN
37 #include <sys/types.h>
39 #ifdef HAVE_SYS_TIME_H
42 #include <sys/_time.h>
44 #include <sys/queue.h>
57 #include "event-internal.h"
60 #ifdef HAVE_EVENT_PORTS
61 extern const struct eventop evportops;
64 extern const struct eventop selectops;
67 extern const struct eventop pollops;
70 extern const struct eventop rtsigops;
73 extern const struct eventop epollops;
75 #ifdef HAVE_WORKING_KQUEUE
76 extern const struct eventop kqops;
79 extern const struct eventop devpollops;
82 extern const struct eventop win32ops;
85 /* In order of preference */
86 const struct eventop *eventops[] = {
87 #ifdef HAVE_EVENT_PORTS
90 #ifdef HAVE_WORKING_KQUEUE
115 struct event_base *current_base = NULL;
116 extern struct event_base *evsignal_base;
117 static int use_monotonic;
119 /* Handle signals - This is a deprecated interface */
120 int (*event_sigcb)(void); /* Signal callback when gotsig is set */
121 volatile sig_atomic_t event_gotsig; /* Set in signal handler */
124 static void event_queue_insert(struct event_base *, struct event *, int);
125 static void event_queue_remove(struct event_base *, struct event *, int);
126 static int event_haveevents(struct event_base *);
128 static void event_process_active(struct event_base *);
130 static int timeout_next(struct event_base *, struct timeval **);
131 static void timeout_process(struct event_base *);
132 static void timeout_correct(struct event_base *, struct timeval *);
135 compare(struct event *a, struct event *b)
137 if (timercmp(&a->ev_timeout, &b->ev_timeout, <))
139 else if (timercmp(&a->ev_timeout, &b->ev_timeout, >))
149 detect_monotonic(void)
151 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
154 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
160 gettime(struct timeval *tp)
162 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
166 if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
169 tp->tv_sec = ts.tv_sec;
170 tp->tv_usec = ts.tv_nsec / 1000;
175 return (gettimeofday(tp, NULL));
178 RB_PROTOTYPE(event_tree, event, ev_timeout_node, compare);
180 RB_GENERATE(event_tree, event, ev_timeout_node, compare);
187 struct event_base *base;
189 if ((base = calloc(1, sizeof(struct event_base))) == NULL)
190 event_err(1, "%s: calloc");
196 gettime(&base->event_tv);
198 RB_INIT(&base->timetree);
199 TAILQ_INIT(&base->eventqueue);
200 TAILQ_INIT(&base->sig.signalqueue);
201 base->sig.ev_signal_pair[0] = -1;
202 base->sig.ev_signal_pair[1] = -1;
205 for (i = 0; eventops[i] && !base->evbase; i++) {
206 base->evsel = eventops[i];
208 base->evbase = base->evsel->init(base);
211 if (base->evbase == NULL)
212 event_errx(1, "%s: no event mechanism available", __func__);
214 if (getenv("EVENT_SHOW_METHOD"))
215 event_msgx("libevent using: %s\n",
218 /* allocate a single active event queue */
219 event_base_priority_init(base, 1);
226 event_base_free(struct event_base *base)
230 if (base == NULL && current_base)
232 if (base == current_base)
236 if (base->evsel->dealloc != NULL)
237 base->evsel->dealloc(base, base->evbase);
238 for (i=0; i < base->nactivequeues; ++i)
239 assert(TAILQ_EMPTY(base->activequeues[i]));
241 assert(RB_EMPTY(&base->timetree));
243 for (i = 0; i < base->nactivequeues; ++i)
244 free(base->activequeues[i]);
245 free(base->activequeues);
247 assert(TAILQ_EMPTY(&base->eventqueue));
253 event_priority_init(int npriorities)
255 return event_base_priority_init(current_base, npriorities);
259 event_base_priority_init(struct event_base *base, int npriorities)
263 if (base->event_count_active)
266 if (base->nactivequeues && npriorities != base->nactivequeues) {
267 for (i = 0; i < base->nactivequeues; ++i) {
268 free(base->activequeues[i]);
270 free(base->activequeues);
273 /* Allocate our priority queues */
274 base->nactivequeues = npriorities;
275 base->activequeues = (struct event_list **)calloc(base->nactivequeues,
276 npriorities * sizeof(struct event_list *));
277 if (base->activequeues == NULL)
278 event_err(1, "%s: calloc", __func__);
280 for (i = 0; i < base->nactivequeues; ++i) {
281 base->activequeues[i] = malloc(sizeof(struct event_list));
282 if (base->activequeues[i] == NULL)
283 event_err(1, "%s: malloc", __func__);
284 TAILQ_INIT(base->activequeues[i]);
291 event_haveevents(struct event_base *base)
293 return (base->event_count > 0);
297 * Active events are stored in priority queues. Lower priorities are always
298 * process before higher priorities. Low priority events can starve high
303 event_process_active(struct event_base *base)
306 struct event_list *activeq = NULL;
310 if (!base->event_count_active)
313 for (i = 0; i < base->nactivequeues; ++i) {
314 if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
315 activeq = base->activequeues[i];
320 assert(activeq != NULL);
322 for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
323 event_queue_remove(base, ev, EVLIST_ACTIVE);
325 /* Allows deletes to work */
326 ncalls = ev->ev_ncalls;
327 ev->ev_pncalls = &ncalls;
330 ev->ev_ncalls = ncalls;
331 (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
339 * Wait continously for events. We exit only if no events are left.
345 return (event_loop(0));
349 event_base_dispatch(struct event_base *event_base)
351 return (event_base_loop(event_base, 0));
355 event_loopexit_cb(int fd, short what, void *arg)
357 struct event_base *base = arg;
358 base->event_gotterm = 1;
361 /* not thread safe */
363 event_loopexit(struct timeval *tv)
365 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
370 event_base_loopexit(struct event_base *event_base, struct timeval *tv)
372 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
376 /* not thread safe */
379 event_loop(int flags)
381 return event_base_loop(current_base, flags);
385 event_base_loop(struct event_base *base, int flags)
387 const struct eventop *evsel = base->evsel;
388 void *evbase = base->evbase;
390 struct timeval *tv_p;
394 if(!TAILQ_EMPTY(&base->sig.signalqueue))
395 evsignal_base = base;
399 /* Calculate the initial events that we are waiting for */
400 if (evsel->recalc(base, evbase, 0) == -1)
403 /* Terminate the loop if we have been asked to */
404 if (base->event_gotterm) {
405 base->event_gotterm = 0;
409 /* You cannot use this interface for multi-threaded apps */
410 while (event_gotsig) {
413 res = (*event_sigcb)();
421 timeout_correct(base, &tv);
424 if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) {
425 timeout_next(base, &tv_p);
428 * if we have active events, we just poll new events
434 /* If we have no events, we just exit */
435 if (!event_haveevents(base)) {
436 event_debug(("%s: no events registered.", __func__));
440 res = evsel->dispatch(base, evbase, tv_p);
446 timeout_process(base);
448 if (base->event_count_active) {
449 event_process_active(base);
450 if (!base->event_count_active && (flags & EVLOOP_ONCE))
452 } else if (flags & EVLOOP_NONBLOCK)
456 event_debug(("%s: asked to terminate loop.", __func__));
460 /* Sets up an event for processing once */
465 void (*cb)(int, short, void *);
469 /* One-time callback, it deletes itself */
472 event_once_cb(int fd, short events, void *arg)
474 struct event_once *eonce = arg;
476 (*eonce->cb)(fd, events, eonce->arg);
480 /* not threadsafe, event scheduled once. */
482 event_once(int fd, short events,
483 void (*callback)(int, short, void *), void *arg, struct timeval *tv)
485 return event_base_once(current_base, fd, events, callback, arg, tv);
488 /* Schedules an event once */
490 event_base_once(struct event_base *base, int fd, short events,
491 void (*callback)(int, short, void *), void *arg, struct timeval *tv)
493 struct event_once *eonce;
497 /* We cannot support signals that just fire once */
498 if (events & EV_SIGNAL)
501 if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
504 eonce->cb = callback;
507 if (events == EV_TIMEOUT) {
513 evtimer_set(&eonce->ev, event_once_cb, eonce);
514 } else if (events & (EV_READ|EV_WRITE)) {
515 events &= EV_READ|EV_WRITE;
517 event_set(&eonce->ev, fd, events, event_once_cb, eonce);
519 /* Bad event combination */
524 res = event_base_set(base, &eonce->ev);
526 res = event_add(&eonce->ev, tv);
536 event_set(struct event *ev, int fd, short events,
537 void (*callback)(int, short, void *), void *arg)
539 /* Take the current base - caller needs to set the real base later */
540 ev->ev_base = current_base;
542 ev->ev_callback = callback;
545 ev->ev_events = events;
547 ev->ev_flags = EVLIST_INIT;
549 ev->ev_pncalls = NULL;
551 /* by default, we put new events into the middle priority */
553 ev->ev_pri = current_base->nactivequeues/2;
557 event_base_set(struct event_base *base, struct event *ev)
559 /* Only innocent events may be assigned to a different base */
560 if (ev->ev_flags != EVLIST_INIT)
564 ev->ev_pri = base->nactivequeues/2;
570 * Set's the priority of an event - if an event is already scheduled
571 * changing the priority is going to fail.
575 event_priority_set(struct event *ev, int pri)
577 if (ev->ev_flags & EVLIST_ACTIVE)
579 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
588 * Checks if a specific event is pending or scheduled.
592 event_pending(struct event *ev, short event, struct timeval *tv)
594 struct timeval now, res;
597 if (ev->ev_flags & EVLIST_INSERTED)
598 flags |= (ev->ev_events & (EV_READ|EV_WRITE));
599 if (ev->ev_flags & EVLIST_ACTIVE)
601 if (ev->ev_flags & EVLIST_TIMEOUT)
603 if (ev->ev_flags & EVLIST_SIGNAL)
606 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
608 /* See if there is a timeout that we should report */
609 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
611 timersub(&ev->ev_timeout, &now, &res);
612 /* correctly remap to real time */
613 gettimeofday(&now, NULL);
614 timeradd(&now, &res, tv);
617 return (flags & event);
621 event_add(struct event *ev, struct timeval *tv)
623 struct event_base *base = ev->ev_base;
624 const struct eventop *evsel = base->evsel;
625 void *evbase = base->evbase;
628 "event_add: event: %p, %s%s%scall %p",
630 ev->ev_events & EV_READ ? "EV_READ " : " ",
631 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
632 tv ? "EV_TIMEOUT " : " ",
635 assert(!(ev->ev_flags & ~EVLIST_ALL));
640 if (ev->ev_flags & EVLIST_TIMEOUT)
641 event_queue_remove(base, ev, EVLIST_TIMEOUT);
643 /* Check if it is active due to a timeout. Rescheduling
644 * this timeout before the callback can be executed
645 * removes it from the active list. */
646 if ((ev->ev_flags & EVLIST_ACTIVE) &&
647 (ev->ev_res & EV_TIMEOUT)) {
648 /* See if we are just active executing this
651 if (ev->ev_ncalls && ev->ev_pncalls) {
656 event_queue_remove(base, ev, EVLIST_ACTIVE);
660 timeradd(&now, tv, &ev->ev_timeout);
663 "event_add: timeout in %d seconds, call %p",
664 tv->tv_sec, ev->ev_callback));
666 event_queue_insert(base, ev, EVLIST_TIMEOUT);
669 if ((ev->ev_events & (EV_READ|EV_WRITE)) &&
670 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
671 event_queue_insert(base, ev, EVLIST_INSERTED);
673 return (evsel->add(evbase, ev));
674 } else if ((ev->ev_events & EV_SIGNAL) &&
675 !(ev->ev_flags & EVLIST_SIGNAL)) {
676 event_queue_insert(base, ev, EVLIST_SIGNAL);
678 return (evsel->add(evbase, ev));
685 event_del(struct event *ev)
687 struct event_base *base;
688 const struct eventop *evsel;
691 event_debug(("event_del: %p, callback %p",
692 ev, ev->ev_callback));
694 /* An event without a base has not been added */
695 if (ev->ev_base == NULL)
700 evbase = base->evbase;
702 assert(!(ev->ev_flags & ~EVLIST_ALL));
704 /* See if we are just active executing this event in a loop */
705 if (ev->ev_ncalls && ev->ev_pncalls) {
710 if (ev->ev_flags & EVLIST_TIMEOUT)
711 event_queue_remove(base, ev, EVLIST_TIMEOUT);
713 if (ev->ev_flags & EVLIST_ACTIVE)
714 event_queue_remove(base, ev, EVLIST_ACTIVE);
716 if (ev->ev_flags & EVLIST_INSERTED) {
717 event_queue_remove(base, ev, EVLIST_INSERTED);
718 return (evsel->del(evbase, ev));
719 } else if (ev->ev_flags & EVLIST_SIGNAL) {
720 event_queue_remove(base, ev, EVLIST_SIGNAL);
721 return (evsel->del(evbase, ev));
728 event_active(struct event *ev, int res, short ncalls)
730 /* We get different kinds of events, add them together */
731 if (ev->ev_flags & EVLIST_ACTIVE) {
737 ev->ev_ncalls = ncalls;
738 ev->ev_pncalls = NULL;
739 event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
743 timeout_next(struct event_base *base, struct timeval **tv_p)
747 struct timeval *tv = *tv_p;
749 if ((ev = RB_MIN(event_tree, &base->timetree)) == NULL) {
750 /* if no time-based events are active wait for I/O */
755 if (gettime(&now) == -1)
758 if (timercmp(&ev->ev_timeout, &now, <=)) {
763 timersub(&ev->ev_timeout, &now, tv);
765 assert(tv->tv_sec >= 0);
766 assert(tv->tv_usec >= 0);
768 event_debug(("timeout_next: in %d seconds", tv->tv_sec));
773 * Determines if the time is running backwards by comparing the current
774 * time against the last time we checked. Not needed when using clock
779 timeout_correct(struct event_base *base, struct timeval *tv)
787 /* Check if time is running backwards */
789 if (timercmp(tv, &base->event_tv, >=)) {
790 base->event_tv = *tv;
794 event_debug(("%s: time is running backwards, corrected",
796 timersub(&base->event_tv, tv, &off);
799 * We can modify the key element of the node without destroying
800 * the key, beause we apply it to all in the right order.
802 RB_FOREACH(ev, event_tree, &base->timetree)
803 timersub(&ev->ev_timeout, &off, &ev->ev_timeout);
807 timeout_process(struct event_base *base)
810 struct event *ev, *next;
814 for (ev = RB_MIN(event_tree, &base->timetree); ev; ev = next) {
815 if (timercmp(&ev->ev_timeout, &now, >))
817 next = RB_NEXT(event_tree, &base->timetree, ev);
819 event_queue_remove(base, ev, EVLIST_TIMEOUT);
821 /* delete this event from the I/O queues */
824 event_debug(("timeout_process: call %p",
826 event_active(ev, EV_TIMEOUT, 1);
831 event_queue_remove(struct event_base *base, struct event *ev, int queue)
835 if (!(ev->ev_flags & queue))
836 event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
837 ev, ev->ev_fd, queue);
839 if (ev->ev_flags & EVLIST_INTERNAL)
845 ev->ev_flags &= ~queue;
849 base->event_count_active--;
850 TAILQ_REMOVE(base->activequeues[ev->ev_pri],
854 TAILQ_REMOVE(&base->sig.signalqueue, ev, ev_signal_next);
857 RB_REMOVE(event_tree, &base->timetree, ev);
859 case EVLIST_INSERTED:
860 TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
863 event_errx(1, "%s: unknown queue %x", __func__, queue);
868 event_queue_insert(struct event_base *base, struct event *ev, int queue)
872 if (ev->ev_flags & queue) {
873 /* Double insertion is possible for active events */
874 if (queue & EVLIST_ACTIVE)
877 event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
878 ev, ev->ev_fd, queue);
881 if (ev->ev_flags & EVLIST_INTERNAL)
887 ev->ev_flags |= queue;
891 base->event_count_active++;
892 TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
896 TAILQ_INSERT_TAIL(&base->sig.signalqueue, ev, ev_signal_next);
898 case EVLIST_TIMEOUT: {
899 struct event *tmp = RB_INSERT(event_tree, &base->timetree, ev);
903 case EVLIST_INSERTED:
904 TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
907 event_errx(1, "%s: unknown queue %x", __func__, queue);
911 /* Functions for debugging */
914 event_get_version(void)
920 * No thread-safe interface needed - the information should be the same
925 event_get_method(void)
927 return (current_base->evsel->name);