2 * Copyright (C) 2004-2008 Internet Systems Consortium, Inc. ("ISC")
3 * Copyright (C) 1998-2003 Internet Software Consortium.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
10 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
11 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
12 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
13 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
14 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
15 * PERFORMANCE OF THIS SOFTWARE.
18 /* $Id: task.c,v 1.105.128.2 2008/03/27 23:46:28 tbox Exp $ */
21 * \author Principal Author: Bob Halley
25 * XXXRTH Need to document the states a task can be in, and the rules
26 * for changing states.
31 #include <isc/condition.h>
32 #include <isc/event.h>
33 #include <isc/magic.h>
36 #include <isc/platform.h>
37 #include <isc/string.h>
39 #include <isc/thread.h>
43 #ifndef ISC_PLATFORM_USETHREADS
45 #endif /* ISC_PLATFORM_USETHREADS */
48 #define XTRACE(m) fprintf(stderr, "task %p thread %lu: %s\n", \
49 task, isc_thread_self(), (m))
50 #define XTTRACE(t, m) fprintf(stderr, "task %p thread %lu: %s\n", \
51 (t), isc_thread_self(), (m))
52 #define XTHREADTRACE(m) fprintf(stderr, "thread %lu: %s\n", \
53 isc_thread_self(), (m))
57 #define XTHREADTRACE(m)
65 task_state_idle, task_state_ready, task_state_running,
70 static const char *statenames[] = {
71 "idle", "ready", "running", "done",
75 #define TASK_MAGIC ISC_MAGIC('T', 'A', 'S', 'K')
76 #define VALID_TASK(t) ISC_MAGIC_VALID(t, TASK_MAGIC)
81 isc_taskmgr_t * manager;
83 /* Locked by task lock. */
85 unsigned int references;
86 isc_eventlist_t events;
87 isc_eventlist_t on_shutdown;
93 /* Locked by task manager lock. */
94 LINK(isc_task_t) link;
95 LINK(isc_task_t) ready_link;
98 #define TASK_F_SHUTTINGDOWN 0x01
100 #define TASK_SHUTTINGDOWN(t) (((t)->flags & TASK_F_SHUTTINGDOWN) \
103 #define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
104 #define VALID_MANAGER(m) ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
111 #ifdef ISC_PLATFORM_USETHREADS
112 unsigned int workers;
113 isc_thread_t * threads;
114 #endif /* ISC_PLATFORM_USETHREADS */
115 /* Locked by task manager lock. */
116 unsigned int default_quantum;
117 LIST(isc_task_t) tasks;
118 isc_tasklist_t ready_tasks;
119 #ifdef ISC_PLATFORM_USETHREADS
120 isc_condition_t work_available;
121 isc_condition_t exclusive_granted;
122 #endif /* ISC_PLATFORM_USETHREADS */
123 unsigned int tasks_running;
124 isc_boolean_t exclusive_requested;
125 isc_boolean_t exiting;
126 #ifndef ISC_PLATFORM_USETHREADS
128 #endif /* ISC_PLATFORM_USETHREADS */
131 #define DEFAULT_TASKMGR_QUANTUM 10
132 #define DEFAULT_DEFAULT_QUANTUM 5
133 #define FINISHED(m) ((m)->exiting && EMPTY((m)->tasks))
135 #ifndef ISC_PLATFORM_USETHREADS
136 static isc_taskmgr_t *taskmgr = NULL;
137 #endif /* ISC_PLATFORM_USETHREADS */
144 task_finished(isc_task_t *task) {
145 isc_taskmgr_t *manager = task->manager;
147 REQUIRE(EMPTY(task->events));
148 REQUIRE(EMPTY(task->on_shutdown));
149 REQUIRE(task->references == 0);
150 REQUIRE(task->state == task_state_done);
152 XTRACE("task_finished");
154 LOCK(&manager->lock);
155 UNLINK(manager->tasks, task, link);
156 #ifdef ISC_PLATFORM_USETHREADS
157 if (FINISHED(manager)) {
159 * All tasks have completed and the
160 * task manager is exiting. Wake up
161 * any idle worker threads so they
164 BROADCAST(&manager->work_available);
166 #endif /* ISC_PLATFORM_USETHREADS */
167 UNLOCK(&manager->lock);
169 DESTROYLOCK(&task->lock);
171 isc_mem_put(manager->mctx, task, sizeof(*task));
175 isc_task_create(isc_taskmgr_t *manager, unsigned int quantum,
179 isc_boolean_t exiting;
182 REQUIRE(VALID_MANAGER(manager));
183 REQUIRE(taskp != NULL && *taskp == NULL);
185 task = isc_mem_get(manager->mctx, sizeof(*task));
187 return (ISC_R_NOMEMORY);
188 XTRACE("isc_task_create");
189 task->manager = manager;
190 result = isc_mutex_init(&task->lock);
191 if (result != ISC_R_SUCCESS) {
192 isc_mem_put(manager->mctx, task, sizeof(*task));
195 task->state = task_state_idle;
196 task->references = 1;
197 INIT_LIST(task->events);
198 INIT_LIST(task->on_shutdown);
199 task->quantum = quantum;
202 memset(task->name, 0, sizeof(task->name));
204 INIT_LINK(task, link);
205 INIT_LINK(task, ready_link);
208 LOCK(&manager->lock);
209 if (!manager->exiting) {
210 if (task->quantum == 0)
211 task->quantum = manager->default_quantum;
212 APPEND(manager->tasks, task, link);
215 UNLOCK(&manager->lock);
218 DESTROYLOCK(&task->lock);
219 isc_mem_put(manager->mctx, task, sizeof(*task));
220 return (ISC_R_SHUTTINGDOWN);
223 task->magic = TASK_MAGIC;
226 return (ISC_R_SUCCESS);
230 isc_task_attach(isc_task_t *source, isc_task_t **targetp) {
233 * Attach *targetp to source.
236 REQUIRE(VALID_TASK(source));
237 REQUIRE(targetp != NULL && *targetp == NULL);
239 XTTRACE(source, "isc_task_attach");
242 source->references++;
243 UNLOCK(&source->lock);
248 static inline isc_boolean_t
249 task_shutdown(isc_task_t *task) {
250 isc_boolean_t was_idle = ISC_FALSE;
251 isc_event_t *event, *prev;
254 * Caller must be holding the task's lock.
257 XTRACE("task_shutdown");
259 if (! TASK_SHUTTINGDOWN(task)) {
260 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
261 ISC_MSG_SHUTTINGDOWN, "shutting down"));
262 task->flags |= TASK_F_SHUTTINGDOWN;
263 if (task->state == task_state_idle) {
264 INSIST(EMPTY(task->events));
265 task->state = task_state_ready;
268 INSIST(task->state == task_state_ready ||
269 task->state == task_state_running);
271 * Note that we post shutdown events LIFO.
273 for (event = TAIL(task->on_shutdown);
276 prev = PREV(event, ev_link);
277 DEQUEUE(task->on_shutdown, event, ev_link);
278 ENQUEUE(task->events, event, ev_link);
286 task_ready(isc_task_t *task) {
287 isc_taskmgr_t *manager = task->manager;
289 REQUIRE(VALID_MANAGER(manager));
290 REQUIRE(task->state == task_state_ready);
292 XTRACE("task_ready");
294 LOCK(&manager->lock);
296 ENQUEUE(manager->ready_tasks, task, ready_link);
297 #ifdef ISC_PLATFORM_USETHREADS
298 SIGNAL(&manager->work_available);
299 #endif /* ISC_PLATFORM_USETHREADS */
301 UNLOCK(&manager->lock);
304 static inline isc_boolean_t
305 task_detach(isc_task_t *task) {
308 * Caller must be holding the task lock.
311 REQUIRE(task->references > 0);
316 if (task->references == 0 && task->state == task_state_idle) {
317 INSIST(EMPTY(task->events));
319 * There are no references to this task, and no
320 * pending events. We could try to optimize and
321 * either initiate shutdown or clean up the task,
322 * depending on its state, but it's easier to just
323 * make the task ready and allow run() or the event
324 * loop to deal with shutting down and termination.
326 task->state = task_state_ready;
334 isc_task_detach(isc_task_t **taskp) {
336 isc_boolean_t was_idle;
339 * Detach *taskp from its task.
342 REQUIRE(taskp != NULL);
344 REQUIRE(VALID_TASK(task));
346 XTRACE("isc_task_detach");
349 was_idle = task_detach(task);
358 static inline isc_boolean_t
359 task_send(isc_task_t *task, isc_event_t **eventp) {
360 isc_boolean_t was_idle = ISC_FALSE;
364 * Caller must be holding the task lock.
367 REQUIRE(eventp != NULL);
369 REQUIRE(event != NULL);
370 REQUIRE(event->ev_type > 0);
371 REQUIRE(task->state != task_state_done);
375 if (task->state == task_state_idle) {
377 INSIST(EMPTY(task->events));
378 task->state = task_state_ready;
380 INSIST(task->state == task_state_ready ||
381 task->state == task_state_running);
382 ENQUEUE(task->events, event, ev_link);
389 isc_task_send(isc_task_t *task, isc_event_t **eventp) {
390 isc_boolean_t was_idle;
393 * Send '*event' to 'task'.
396 REQUIRE(VALID_TASK(task));
398 XTRACE("isc_task_send");
401 * We're trying hard to hold locks for as short a time as possible.
402 * We're also trying to hold as few locks as possible. This is why
403 * some processing is deferred until after the lock is released.
406 was_idle = task_send(task, eventp);
411 * We need to add this task to the ready queue.
413 * We've waited until now to do it because making a task
414 * ready requires locking the manager. If we tried to do
415 * this while holding the task lock, we could deadlock.
417 * We've changed the state to ready, so no one else will
418 * be trying to add this task to the ready queue. The
419 * only way to leave the ready state is by executing the
420 * task. It thus doesn't matter if events are added,
421 * removed, or a shutdown is started in the interval
422 * between the time we released the task lock, and the time
423 * we add the task to the ready queue.
430 isc_task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
431 isc_boolean_t idle1, idle2;
435 * Send '*event' to '*taskp' and then detach '*taskp' from its
439 REQUIRE(taskp != NULL);
441 REQUIRE(VALID_TASK(task));
443 XTRACE("isc_task_sendanddetach");
446 idle1 = task_send(task, eventp);
447 idle2 = task_detach(task);
451 * If idle1, then idle2 shouldn't be true as well since we're holding
452 * the task lock, and thus the task cannot switch from ready back to
455 INSIST(!(idle1 && idle2));
463 #define PURGE_OK(event) (((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
466 dequeue_events(isc_task_t *task, void *sender, isc_eventtype_t first,
467 isc_eventtype_t last, void *tag,
468 isc_eventlist_t *events, isc_boolean_t purging)
470 isc_event_t *event, *next_event;
471 unsigned int count = 0;
473 REQUIRE(VALID_TASK(task));
474 REQUIRE(last >= first);
476 XTRACE("dequeue_events");
479 * Events matching 'sender', whose type is >= first and <= last, and
480 * whose tag is 'tag' will be dequeued. If 'purging', matching events
481 * which are marked as unpurgable will not be dequeued.
483 * sender == NULL means "any sender", and tag == NULL means "any tag".
488 for (event = HEAD(task->events); event != NULL; event = next_event) {
489 next_event = NEXT(event, ev_link);
490 if (event->ev_type >= first && event->ev_type <= last &&
491 (sender == NULL || event->ev_sender == sender) &&
492 (tag == NULL || event->ev_tag == tag) &&
493 (!purging || PURGE_OK(event))) {
494 DEQUEUE(task->events, event, ev_link);
495 ENQUEUE(*events, event, ev_link);
506 isc_task_purgerange(isc_task_t *task, void *sender, isc_eventtype_t first,
507 isc_eventtype_t last, void *tag)
510 isc_eventlist_t events;
511 isc_event_t *event, *next_event;
514 * Purge events from a task's event queue.
517 XTRACE("isc_task_purgerange");
519 ISC_LIST_INIT(events);
521 count = dequeue_events(task, sender, first, last, tag, &events,
524 for (event = HEAD(events); event != NULL; event = next_event) {
525 next_event = NEXT(event, ev_link);
526 isc_event_free(&event);
530 * Note that purging never changes the state of the task.
537 isc_task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
541 * Purge events from a task's event queue.
544 XTRACE("isc_task_purge");
546 return (isc_task_purgerange(task, sender, type, type, tag));
550 isc_task_purgeevent(isc_task_t *task, isc_event_t *event) {
551 isc_event_t *curr_event, *next_event;
554 * Purge 'event' from a task's event queue.
556 * XXXRTH: WARNING: This method may be removed before beta.
559 REQUIRE(VALID_TASK(task));
562 * If 'event' is on the task's event queue, it will be purged,
563 * unless it is marked as unpurgeable. 'event' does not have to be
564 * on the task's event queue; in fact, it can even be an invalid
565 * pointer. Purging only occurs if the event is actually on the task's
568 * Purging never changes the state of the task.
572 for (curr_event = HEAD(task->events);
574 curr_event = next_event) {
575 next_event = NEXT(curr_event, ev_link);
576 if (curr_event == event && PURGE_OK(event)) {
577 DEQUEUE(task->events, curr_event, ev_link);
583 if (curr_event == NULL)
586 isc_event_free(&curr_event);
592 isc_task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
593 isc_eventtype_t last, void *tag,
594 isc_eventlist_t *events)
597 * Remove events from a task's event queue.
600 XTRACE("isc_task_unsendrange");
602 return (dequeue_events(task, sender, first, last, tag, events,
607 isc_task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
608 void *tag, isc_eventlist_t *events)
611 * Remove events from a task's event queue.
614 XTRACE("isc_task_unsend");
616 return (dequeue_events(task, sender, type, type, tag, events,
621 isc_task_onshutdown(isc_task_t *task, isc_taskaction_t action, const void *arg)
623 isc_boolean_t disallowed = ISC_FALSE;
624 isc_result_t result = ISC_R_SUCCESS;
628 * Send a shutdown event with action 'action' and argument 'arg' when
629 * 'task' is shutdown.
632 REQUIRE(VALID_TASK(task));
633 REQUIRE(action != NULL);
635 event = isc_event_allocate(task->manager->mctx,
637 ISC_TASKEVENT_SHUTDOWN,
642 return (ISC_R_NOMEMORY);
645 if (TASK_SHUTTINGDOWN(task)) {
646 disallowed = ISC_TRUE;
647 result = ISC_R_SHUTTINGDOWN;
649 ENQUEUE(task->on_shutdown, event, ev_link);
653 isc_mem_put(task->manager->mctx, event, sizeof(*event));
659 isc_task_shutdown(isc_task_t *task) {
660 isc_boolean_t was_idle;
666 REQUIRE(VALID_TASK(task));
669 was_idle = task_shutdown(task);
677 isc_task_destroy(isc_task_t **taskp) {
683 REQUIRE(taskp != NULL);
685 isc_task_shutdown(*taskp);
686 isc_task_detach(taskp);
690 isc_task_setname(isc_task_t *task, const char *name, void *tag) {
696 REQUIRE(VALID_TASK(task));
699 memset(task->name, 0, sizeof(task->name));
700 strncpy(task->name, name, sizeof(task->name) - 1);
706 isc_task_getname(isc_task_t *task) {
711 isc_task_gettag(isc_task_t *task) {
716 isc_task_getcurrenttime(isc_task_t *task, isc_stdtime_t *t) {
717 REQUIRE(VALID_TASK(task));
731 dispatch(isc_taskmgr_t *manager) {
733 #ifndef ISC_PLATFORM_USETHREADS
734 unsigned int total_dispatch_count = 0;
735 isc_tasklist_t ready_tasks;
736 #endif /* ISC_PLATFORM_USETHREADS */
738 REQUIRE(VALID_MANAGER(manager));
741 * Again we're trying to hold the lock for as short a time as possible
742 * and to do as little locking and unlocking as possible.
744 * In both while loops, the appropriate lock must be held before the
745 * while body starts. Code which acquired the lock at the top of
746 * the loop would be more readable, but would result in a lot of
747 * extra locking. Compare:
754 * while (expression) {
759 * Unlocked part here...
766 * Note how if the loop continues we unlock and then immediately lock.
767 * For N iterations of the loop, this code does 2N+1 locks and 2N+1
768 * unlocks. Also note that the lock is not held when the while
769 * condition is tested, which may or may not be important, depending
775 * while (expression) {
779 * Unlocked part here...
786 * For N iterations of the loop, this code does N+1 locks and N+1
787 * unlocks. The while expression is always protected by the lock.
790 #ifndef ISC_PLATFORM_USETHREADS
791 ISC_LIST_INIT(ready_tasks);
793 LOCK(&manager->lock);
794 while (!FINISHED(manager)) {
795 #ifdef ISC_PLATFORM_USETHREADS
797 * For reasons similar to those given in the comment in
798 * isc_task_send() above, it is safe for us to dequeue
799 * the task while only holding the manager lock, and then
800 * change the task to running state while only holding the
803 while ((EMPTY(manager->ready_tasks) ||
804 manager->exclusive_requested) &&
807 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
809 ISC_MSG_WAIT, "wait"));
810 WAIT(&manager->work_available, &manager->lock);
811 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
813 ISC_MSG_AWAKE, "awake"));
815 #else /* ISC_PLATFORM_USETHREADS */
816 if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM ||
817 EMPTY(manager->ready_tasks))
819 #endif /* ISC_PLATFORM_USETHREADS */
820 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK,
821 ISC_MSG_WORKING, "working"));
823 task = HEAD(manager->ready_tasks);
825 unsigned int dispatch_count = 0;
826 isc_boolean_t done = ISC_FALSE;
827 isc_boolean_t requeue = ISC_FALSE;
828 isc_boolean_t finished = ISC_FALSE;
831 INSIST(VALID_TASK(task));
834 * Note we only unlock the manager lock if we actually
835 * have a task to do. We must reacquire the manager
836 * lock before exiting the 'if (task != NULL)' block.
838 DEQUEUE(manager->ready_tasks, task, ready_link);
839 manager->tasks_running++;
840 UNLOCK(&manager->lock);
843 INSIST(task->state == task_state_ready);
844 task->state = task_state_running;
845 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
846 ISC_MSG_RUNNING, "running"));
847 isc_stdtime_get(&task->now);
849 if (!EMPTY(task->events)) {
850 event = HEAD(task->events);
851 DEQUEUE(task->events, event, ev_link);
854 * Execute the event action.
856 XTRACE(isc_msgcat_get(isc_msgcat,
860 if (event->ev_action != NULL) {
862 (event->ev_action)(task,event);
866 #ifndef ISC_PLATFORM_USETHREADS
867 total_dispatch_count++;
868 #endif /* ISC_PLATFORM_USETHREADS */
871 if (task->references == 0 &&
872 EMPTY(task->events) &&
873 !TASK_SHUTTINGDOWN(task)) {
874 isc_boolean_t was_idle;
877 * There are no references and no
878 * pending events for this task,
879 * which means it will not become
880 * runnable again via an external
881 * action (such as sending an event
884 * We initiate shutdown to prevent
885 * it from becoming a zombie.
887 * We do this here instead of in
888 * the "if EMPTY(task->events)" block
891 * If we post no shutdown events,
892 * we want the task to finish.
894 * If we did post shutdown events,
895 * will still want the task's
896 * quantum to be applied.
898 was_idle = task_shutdown(task);
902 if (EMPTY(task->events)) {
904 * Nothing else to do for this task
907 XTRACE(isc_msgcat_get(isc_msgcat,
911 if (task->references == 0 &&
912 TASK_SHUTTINGDOWN(task)) {
916 XTRACE(isc_msgcat_get(
922 task->state = task_state_done;
924 task->state = task_state_idle;
926 } else if (dispatch_count >= task->quantum) {
928 * Our quantum has expired, but
929 * there is more work to be done.
930 * We'll requeue it to the ready
933 * We don't check quantum until
934 * dispatching at least one event,
935 * so the minimum quantum is one.
937 XTRACE(isc_msgcat_get(isc_msgcat,
941 task->state = task_state_ready;
951 LOCK(&manager->lock);
952 manager->tasks_running--;
953 #ifdef ISC_PLATFORM_USETHREADS
954 if (manager->exclusive_requested &&
955 manager->tasks_running == 1) {
956 SIGNAL(&manager->exclusive_granted);
958 #endif /* ISC_PLATFORM_USETHREADS */
961 * We know we're awake, so we don't have
962 * to wakeup any sleeping threads if the
963 * ready queue is empty before we requeue.
965 * A possible optimization if the queue is
966 * empty is to 'goto' the 'if (task != NULL)'
967 * block, avoiding the ENQUEUE of the task
968 * and the subsequent immediate DEQUEUE
969 * (since it is the only executable task).
970 * We don't do this because then we'd be
971 * skipping the exit_requested check. The
972 * cost of ENQUEUE is low anyway, especially
973 * when you consider that we'd have to do
974 * an extra EMPTY check to see if we could
975 * do the optimization. If the ready queue
976 * were usually nonempty, the 'optimization'
977 * might even hurt rather than help.
979 #ifdef ISC_PLATFORM_USETHREADS
980 ENQUEUE(manager->ready_tasks, task,
983 ENQUEUE(ready_tasks, task, ready_link);
988 #ifndef ISC_PLATFORM_USETHREADS
989 ISC_LIST_APPENDLIST(manager->ready_tasks, ready_tasks, ready_link);
991 UNLOCK(&manager->lock);
994 #ifdef ISC_PLATFORM_USETHREADS
995 static isc_threadresult_t
1000 isc_taskmgr_t *manager = uap;
1002 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1003 ISC_MSG_STARTING, "starting"));
1007 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1008 ISC_MSG_EXITING, "exiting"));
1010 return ((isc_threadresult_t)0);
1012 #endif /* ISC_PLATFORM_USETHREADS */
1015 manager_free(isc_taskmgr_t *manager) {
1018 #ifdef ISC_PLATFORM_USETHREADS
1019 (void)isc_condition_destroy(&manager->exclusive_granted);
1020 (void)isc_condition_destroy(&manager->work_available);
1021 isc_mem_free(manager->mctx, manager->threads);
1022 #endif /* ISC_PLATFORM_USETHREADS */
1023 DESTROYLOCK(&manager->lock);
1025 mctx = manager->mctx;
1026 isc_mem_put(mctx, manager, sizeof(*manager));
1027 isc_mem_detach(&mctx);
1031 isc_taskmgr_create(isc_mem_t *mctx, unsigned int workers,
1032 unsigned int default_quantum, isc_taskmgr_t **managerp)
1034 isc_result_t result;
1035 unsigned int i, started = 0;
1036 isc_taskmgr_t *manager;
1039 * Create a new task manager.
1042 REQUIRE(workers > 0);
1043 REQUIRE(managerp != NULL && *managerp == NULL);
1045 #ifndef ISC_PLATFORM_USETHREADS
1050 if (taskmgr != NULL) {
1052 *managerp = taskmgr;
1053 return (ISC_R_SUCCESS);
1055 #endif /* ISC_PLATFORM_USETHREADS */
1057 manager = isc_mem_get(mctx, sizeof(*manager));
1058 if (manager == NULL)
1059 return (ISC_R_NOMEMORY);
1060 manager->magic = TASK_MANAGER_MAGIC;
1061 manager->mctx = NULL;
1062 result = isc_mutex_init(&manager->lock);
1063 if (result != ISC_R_SUCCESS)
1066 #ifdef ISC_PLATFORM_USETHREADS
1067 manager->workers = 0;
1068 manager->threads = isc_mem_allocate(mctx,
1069 workers * sizeof(isc_thread_t));
1070 if (manager->threads == NULL) {
1071 result = ISC_R_NOMEMORY;
1074 if (isc_condition_init(&manager->work_available) != ISC_R_SUCCESS) {
1075 UNEXPECTED_ERROR(__FILE__, __LINE__,
1076 "isc_condition_init() %s",
1077 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1078 ISC_MSG_FAILED, "failed"));
1079 result = ISC_R_UNEXPECTED;
1080 goto cleanup_threads;
1082 if (isc_condition_init(&manager->exclusive_granted) != ISC_R_SUCCESS) {
1083 UNEXPECTED_ERROR(__FILE__, __LINE__,
1084 "isc_condition_init() %s",
1085 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1086 ISC_MSG_FAILED, "failed"));
1087 result = ISC_R_UNEXPECTED;
1088 goto cleanup_workavailable;
1090 #endif /* ISC_PLATFORM_USETHREADS */
1091 if (default_quantum == 0)
1092 default_quantum = DEFAULT_DEFAULT_QUANTUM;
1093 manager->default_quantum = default_quantum;
1094 INIT_LIST(manager->tasks);
1095 INIT_LIST(manager->ready_tasks);
1096 manager->tasks_running = 0;
1097 manager->exclusive_requested = ISC_FALSE;
1098 manager->exiting = ISC_FALSE;
1100 isc_mem_attach(mctx, &manager->mctx);
1102 #ifdef ISC_PLATFORM_USETHREADS
1103 LOCK(&manager->lock);
1107 for (i = 0; i < workers; i++) {
1108 if (isc_thread_create(run, manager,
1109 &manager->threads[manager->workers]) ==
1115 UNLOCK(&manager->lock);
1118 manager_free(manager);
1119 return (ISC_R_NOTHREADS);
1121 isc_thread_setconcurrency(workers);
1122 #else /* ISC_PLATFORM_USETHREADS */
1125 #endif /* ISC_PLATFORM_USETHREADS */
1127 *managerp = manager;
1129 return (ISC_R_SUCCESS);
1131 #ifdef ISC_PLATFORM_USETHREADS
1132 cleanup_workavailable:
1133 (void)isc_condition_destroy(&manager->work_available);
1135 isc_mem_free(mctx, manager->threads);
1137 DESTROYLOCK(&manager->lock);
1140 isc_mem_put(mctx, manager, sizeof(*manager));
1145 isc_taskmgr_destroy(isc_taskmgr_t **managerp) {
1146 isc_taskmgr_t *manager;
1151 * Destroy '*managerp'.
1154 REQUIRE(managerp != NULL);
1155 manager = *managerp;
1156 REQUIRE(VALID_MANAGER(manager));
1158 #ifndef ISC_PLATFORM_USETHREADS
1161 if (manager->refs > 1) {
1166 #endif /* ISC_PLATFORM_USETHREADS */
1168 XTHREADTRACE("isc_taskmgr_destroy");
1170 * Only one non-worker thread may ever call this routine.
1171 * If a worker thread wants to initiate shutdown of the
1172 * task manager, it should ask some non-worker thread to call
1173 * isc_taskmgr_destroy(), e.g. by signalling a condition variable
1174 * that the startup thread is sleeping on.
1178 * Unlike elsewhere, we're going to hold this lock a long time.
1179 * We need to do so, because otherwise the list of tasks could
1180 * change while we were traversing it.
1182 * This is also the only function where we will hold both the
1183 * task manager lock and a task lock at the same time.
1186 LOCK(&manager->lock);
1189 * Make sure we only get called once.
1191 INSIST(!manager->exiting);
1192 manager->exiting = ISC_TRUE;
1195 * Post shutdown event(s) to every task (if they haven't already been
1198 for (task = HEAD(manager->tasks);
1200 task = NEXT(task, link)) {
1202 if (task_shutdown(task))
1203 ENQUEUE(manager->ready_tasks, task, ready_link);
1204 UNLOCK(&task->lock);
1206 #ifdef ISC_PLATFORM_USETHREADS
1208 * Wake up any sleeping workers. This ensures we get work done if
1209 * there's work left to do, and if there are already no tasks left
1210 * it will cause the workers to see manager->exiting.
1212 BROADCAST(&manager->work_available);
1213 UNLOCK(&manager->lock);
1216 * Wait for all the worker threads to exit.
1218 for (i = 0; i < manager->workers; i++)
1219 (void)isc_thread_join(manager->threads[i], NULL);
1220 #else /* ISC_PLATFORM_USETHREADS */
1222 * Dispatch the shutdown events.
1224 UNLOCK(&manager->lock);
1225 while (isc__taskmgr_ready())
1226 (void)isc__taskmgr_dispatch();
1227 if (!ISC_LIST_EMPTY(manager->tasks))
1228 isc_mem_printallactive(stderr);
1229 INSIST(ISC_LIST_EMPTY(manager->tasks));
1230 #endif /* ISC_PLATFORM_USETHREADS */
1232 manager_free(manager);
1237 #ifndef ISC_PLATFORM_USETHREADS
1239 isc__taskmgr_ready(void) {
1240 if (taskmgr == NULL)
1242 return (ISC_TF(!ISC_LIST_EMPTY(taskmgr->ready_tasks)));
1246 isc__taskmgr_dispatch(void) {
1247 isc_taskmgr_t *manager = taskmgr;
1249 if (taskmgr == NULL)
1250 return (ISC_R_NOTFOUND);
1254 return (ISC_R_SUCCESS);
1257 #endif /* ISC_PLATFORM_USETHREADS */
1260 isc_task_beginexclusive(isc_task_t *task) {
1261 #ifdef ISC_PLATFORM_USETHREADS
1262 isc_taskmgr_t *manager = task->manager;
1263 REQUIRE(task->state == task_state_running);
1264 LOCK(&manager->lock);
1265 if (manager->exclusive_requested) {
1266 UNLOCK(&manager->lock);
1267 return (ISC_R_LOCKBUSY);
1269 manager->exclusive_requested = ISC_TRUE;
1270 while (manager->tasks_running > 1) {
1271 WAIT(&manager->exclusive_granted, &manager->lock);
1273 UNLOCK(&manager->lock);
1277 return (ISC_R_SUCCESS);
1281 isc_task_endexclusive(isc_task_t *task) {
1282 #ifdef ISC_PLATFORM_USETHREADS
1283 isc_taskmgr_t *manager = task->manager;
1284 REQUIRE(task->state == task_state_running);
1285 LOCK(&manager->lock);
1286 REQUIRE(manager->exclusive_requested);
1287 manager->exclusive_requested = ISC_FALSE;
1288 BROADCAST(&manager->work_available);
1289 UNLOCK(&manager->lock);
1298 isc_taskmgr_renderxml(isc_taskmgr_t *mgr, xmlTextWriterPtr writer)
1305 * Write out the thread-model, and some details about each depending
1306 * on which type is enabled.
1308 xmlTextWriterStartElement(writer, ISC_XMLCHAR "thread-model");
1309 #ifdef ISC_PLATFORM_USETHREADS
1310 xmlTextWriterStartElement(writer, ISC_XMLCHAR "type");
1311 xmlTextWriterWriteString(writer, ISC_XMLCHAR "threaded");
1312 xmlTextWriterEndElement(writer); /* type */
1314 xmlTextWriterStartElement(writer, ISC_XMLCHAR "worker-threads");
1315 xmlTextWriterWriteFormatString(writer, "%d", mgr->workers);
1316 xmlTextWriterEndElement(writer); /* worker-threads */
1317 #else /* ISC_PLATFORM_USETHREADS */
1318 xmlTextWriterStartElement(writer, ISC_XMLCHAR "type");
1319 xmlTextWriterWriteString(writer, ISC_XMLCHAR "non-threaded");
1320 xmlTextWriterEndElement(writer); /* type */
1322 xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
1323 xmlTextWriterWriteFormatString(writer, "%d", mgr->refs);
1324 xmlTextWriterEndElement(writer); /* references */
1325 #endif /* ISC_PLATFORM_USETHREADS */
1327 xmlTextWriterStartElement(writer, ISC_XMLCHAR "default-quantum");
1328 xmlTextWriterWriteFormatString(writer, "%d", mgr->default_quantum);
1329 xmlTextWriterEndElement(writer); /* default-quantum */
1331 xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-running");
1332 xmlTextWriterWriteFormatString(writer, "%d", mgr->tasks_running);
1333 xmlTextWriterEndElement(writer); /* tasks-running */
1335 xmlTextWriterEndElement(writer); /* thread-model */
1337 xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks");
1338 task = ISC_LIST_HEAD(mgr->tasks);
1339 while (task != NULL) {
1341 xmlTextWriterStartElement(writer, ISC_XMLCHAR "task");
1343 if (task->name[0] != 0) {
1344 xmlTextWriterStartElement(writer, ISC_XMLCHAR "name");
1345 xmlTextWriterWriteFormatString(writer, "%s",
1347 xmlTextWriterEndElement(writer); /* name */
1350 xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
1351 xmlTextWriterWriteFormatString(writer, "%d", task->references);
1352 xmlTextWriterEndElement(writer); /* references */
1354 xmlTextWriterStartElement(writer, ISC_XMLCHAR "id");
1355 xmlTextWriterWriteFormatString(writer, "%p", task);
1356 xmlTextWriterEndElement(writer); /* id */
1358 xmlTextWriterStartElement(writer, ISC_XMLCHAR "state");
1359 xmlTextWriterWriteFormatString(writer, "%s",
1360 statenames[task->state]);
1361 xmlTextWriterEndElement(writer); /* state */
1363 xmlTextWriterStartElement(writer, ISC_XMLCHAR "quantum");
1364 xmlTextWriterWriteFormatString(writer, "%d", task->quantum);
1365 xmlTextWriterEndElement(writer); /* quantum */
1367 xmlTextWriterEndElement(writer);
1369 UNLOCK(&task->lock);
1370 task = ISC_LIST_NEXT(task, link);
1372 xmlTextWriterEndElement(writer); /* tasks */
1376 #endif /* HAVE_LIBXML2 */