2 * Copyright (C) 2004 Internet Systems Consortium, Inc. ("ISC")
3 * Copyright (C) 1998-2003 Internet Software Consortium.
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
10 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
11 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
12 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
13 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
14 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
15 * PERFORMANCE OF THIS SOFTWARE.
18 /* $Id: task.c,v 1.85.2.3.8.5 2004/10/15 00:45:45 marka Exp $ */
21 * Principal Author: Bob Halley
25 * XXXRTH Need to document the states a task can be in, and the rules
26 * for changing states.
31 #include <isc/condition.h>
32 #include <isc/event.h>
33 #include <isc/magic.h>
36 #include <isc/platform.h>
37 #include <isc/string.h>
39 #include <isc/thread.h>
42 #ifndef ISC_PLATFORM_USETHREADS
44 #endif /* ISC_PLATFORM_USETHREADS */
46 #define ISC_TASK_NAMES 1
49 #define XTRACE(m) fprintf(stderr, "task %p thread %lu: %s\n", \
50 task, isc_thread_self(), (m))
51 #define XTTRACE(t, m) fprintf(stderr, "task %p thread %lu: %s\n", \
52 (t), isc_thread_self(), (m))
53 #define XTHREADTRACE(m) fprintf(stderr, "thread %lu: %s\n", \
54 isc_thread_self(), (m))
58 #define XTHREADTRACE(m)
66 task_state_idle, task_state_ready, task_state_running,
70 #define TASK_MAGIC ISC_MAGIC('T', 'A', 'S', 'K')
71 #define VALID_TASK(t) ISC_MAGIC_VALID(t, TASK_MAGIC)
76 isc_taskmgr_t * manager;
78 /* Locked by task lock. */
80 unsigned int references;
81 isc_eventlist_t events;
82 isc_eventlist_t on_shutdown;
90 /* Locked by task manager lock. */
91 LINK(isc_task_t) link;
92 LINK(isc_task_t) ready_link;
95 #define TASK_F_SHUTTINGDOWN 0x01
97 #define TASK_SHUTTINGDOWN(t) (((t)->flags & TASK_F_SHUTTINGDOWN) \
100 #define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
101 #define VALID_MANAGER(m) ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
108 #ifdef ISC_PLATFORM_USETHREADS
109 unsigned int workers;
110 isc_thread_t * threads;
111 #endif /* ISC_PLATFORM_USETHREADS */
112 /* Locked by task manager lock. */
113 unsigned int default_quantum;
114 LIST(isc_task_t) tasks;
115 isc_tasklist_t ready_tasks;
116 #ifdef ISC_PLATFORM_USETHREADS
117 isc_condition_t work_available;
118 isc_condition_t exclusive_granted;
119 #endif /* ISC_PLATFORM_USETHREADS */
120 unsigned int tasks_running;
121 isc_boolean_t exclusive_requested;
122 isc_boolean_t exiting;
123 #ifndef ISC_PLATFORM_USETHREADS
125 #endif /* ISC_PLATFORM_USETHREADS */
128 #define DEFAULT_TASKMGR_QUANTUM 10
129 #define DEFAULT_DEFAULT_QUANTUM 5
130 #define FINISHED(m) ((m)->exiting && EMPTY((m)->tasks))
132 #ifndef ISC_PLATFORM_USETHREADS
133 static isc_taskmgr_t *taskmgr = NULL;
134 #endif /* ISC_PLATFORM_USETHREADS */
141 task_finished(isc_task_t *task) {
142 isc_taskmgr_t *manager = task->manager;
144 REQUIRE(EMPTY(task->events));
145 REQUIRE(EMPTY(task->on_shutdown));
146 REQUIRE(task->references == 0);
147 REQUIRE(task->state == task_state_done);
149 XTRACE("task_finished");
151 LOCK(&manager->lock);
152 UNLINK(manager->tasks, task, link);
153 #ifdef ISC_PLATFORM_USETHREADS
154 if (FINISHED(manager)) {
156 * All tasks have completed and the
157 * task manager is exiting. Wake up
158 * any idle worker threads so they
161 BROADCAST(&manager->work_available);
163 #endif /* ISC_PLATFORM_USETHREADS */
164 UNLOCK(&manager->lock);
166 DESTROYLOCK(&task->lock);
168 isc_mem_put(manager->mctx, task, sizeof(*task));
172 isc_task_create(isc_taskmgr_t *manager, unsigned int quantum,
176 isc_boolean_t exiting;
178 REQUIRE(VALID_MANAGER(manager));
179 REQUIRE(taskp != NULL && *taskp == NULL);
181 task = isc_mem_get(manager->mctx, sizeof(*task));
183 return (ISC_R_NOMEMORY);
184 XTRACE("isc_task_create");
185 task->manager = manager;
186 if (isc_mutex_init(&task->lock) != ISC_R_SUCCESS) {
187 isc_mem_put(manager->mctx, task, sizeof(*task));
188 UNEXPECTED_ERROR(__FILE__, __LINE__,
189 "isc_mutex_init() %s",
190 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
191 ISC_MSG_FAILED, "failed"));
192 return (ISC_R_UNEXPECTED);
194 task->state = task_state_idle;
195 task->references = 1;
196 INIT_LIST(task->events);
197 INIT_LIST(task->on_shutdown);
198 task->quantum = quantum;
201 #ifdef ISC_TASK_NAMES
202 memset(task->name, 0, sizeof(task->name));
205 INIT_LINK(task, link);
206 INIT_LINK(task, ready_link);
209 LOCK(&manager->lock);
210 if (!manager->exiting) {
211 if (task->quantum == 0)
212 task->quantum = manager->default_quantum;
213 APPEND(manager->tasks, task, link);
216 UNLOCK(&manager->lock);
219 DESTROYLOCK(&task->lock);
220 isc_mem_put(manager->mctx, task, sizeof(*task));
221 return (ISC_R_SHUTTINGDOWN);
224 task->magic = TASK_MAGIC;
227 return (ISC_R_SUCCESS);
231 isc_task_attach(isc_task_t *source, isc_task_t **targetp) {
234 * Attach *targetp to source.
237 REQUIRE(VALID_TASK(source));
238 REQUIRE(targetp != NULL && *targetp == NULL);
240 XTTRACE(source, "isc_task_attach");
243 source->references++;
244 UNLOCK(&source->lock);
249 static inline isc_boolean_t
250 task_shutdown(isc_task_t *task) {
251 isc_boolean_t was_idle = ISC_FALSE;
252 isc_event_t *event, *prev;
255 * Caller must be holding the task's lock.
258 XTRACE("task_shutdown");
260 if (! TASK_SHUTTINGDOWN(task)) {
261 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
262 ISC_MSG_SHUTTINGDOWN, "shutting down"));
263 task->flags |= TASK_F_SHUTTINGDOWN;
264 if (task->state == task_state_idle) {
265 INSIST(EMPTY(task->events));
266 task->state = task_state_ready;
269 INSIST(task->state == task_state_ready ||
270 task->state == task_state_running);
272 * Note that we post shutdown events LIFO.
274 for (event = TAIL(task->on_shutdown);
277 prev = PREV(event, ev_link);
278 DEQUEUE(task->on_shutdown, event, ev_link);
279 ENQUEUE(task->events, event, ev_link);
287 task_ready(isc_task_t *task) {
288 isc_taskmgr_t *manager = task->manager;
290 REQUIRE(VALID_MANAGER(manager));
291 REQUIRE(task->state == task_state_ready);
293 XTRACE("task_ready");
295 LOCK(&manager->lock);
297 ENQUEUE(manager->ready_tasks, task, ready_link);
298 #ifdef ISC_PLATFORM_USETHREADS
299 SIGNAL(&manager->work_available);
300 #endif /* ISC_PLATFORM_USETHREADS */
302 UNLOCK(&manager->lock);
305 static inline isc_boolean_t
306 task_detach(isc_task_t *task) {
309 * Caller must be holding the task lock.
312 REQUIRE(task->references > 0);
317 if (task->references == 0 && task->state == task_state_idle) {
318 INSIST(EMPTY(task->events));
320 * There are no references to this task, and no
321 * pending events. We could try to optimize and
322 * either initiate shutdown or clean up the task,
323 * depending on its state, but it's easier to just
324 * make the task ready and allow run() or the event
325 * loop to deal with shutting down and termination.
327 task->state = task_state_ready;
335 isc_task_detach(isc_task_t **taskp) {
337 isc_boolean_t was_idle;
340 * Detach *taskp from its task.
343 REQUIRE(taskp != NULL);
345 REQUIRE(VALID_TASK(task));
347 XTRACE("isc_task_detach");
350 was_idle = task_detach(task);
359 static inline isc_boolean_t
360 task_send(isc_task_t *task, isc_event_t **eventp) {
361 isc_boolean_t was_idle = ISC_FALSE;
365 * Caller must be holding the task lock.
368 REQUIRE(eventp != NULL);
370 REQUIRE(event != NULL);
371 REQUIRE(event->ev_type > 0);
372 REQUIRE(task->state != task_state_done);
376 if (task->state == task_state_idle) {
378 INSIST(EMPTY(task->events));
379 task->state = task_state_ready;
381 INSIST(task->state == task_state_ready ||
382 task->state == task_state_running);
383 ENQUEUE(task->events, event, ev_link);
390 isc_task_send(isc_task_t *task, isc_event_t **eventp) {
391 isc_boolean_t was_idle;
394 * Send '*event' to 'task'.
397 REQUIRE(VALID_TASK(task));
399 XTRACE("isc_task_send");
402 * We're trying hard to hold locks for as short a time as possible.
403 * We're also trying to hold as few locks as possible. This is why
404 * some processing is deferred until after the lock is released.
407 was_idle = task_send(task, eventp);
412 * We need to add this task to the ready queue.
414 * We've waited until now to do it because making a task
415 * ready requires locking the manager. If we tried to do
416 * this while holding the task lock, we could deadlock.
418 * We've changed the state to ready, so no one else will
419 * be trying to add this task to the ready queue. The
420 * only way to leave the ready state is by executing the
421 * task. It thus doesn't matter if events are added,
422 * removed, or a shutdown is started in the interval
423 * between the time we released the task lock, and the time
424 * we add the task to the ready queue.
431 isc_task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
432 isc_boolean_t idle1, idle2;
436 * Send '*event' to '*taskp' and then detach '*taskp' from its
440 REQUIRE(taskp != NULL);
442 REQUIRE(VALID_TASK(task));
444 XTRACE("isc_task_sendanddetach");
447 idle1 = task_send(task, eventp);
448 idle2 = task_detach(task);
452 * If idle1, then idle2 shouldn't be true as well since we're holding
453 * the task lock, and thus the task cannot switch from ready back to
456 INSIST(!(idle1 && idle2));
464 #define PURGE_OK(event) (((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
467 dequeue_events(isc_task_t *task, void *sender, isc_eventtype_t first,
468 isc_eventtype_t last, void *tag,
469 isc_eventlist_t *events, isc_boolean_t purging)
471 isc_event_t *event, *next_event;
472 unsigned int count = 0;
474 REQUIRE(VALID_TASK(task));
475 REQUIRE(last >= first);
477 XTRACE("dequeue_events");
480 * Events matching 'sender', whose type is >= first and <= last, and
481 * whose tag is 'tag' will be dequeued. If 'purging', matching events
482 * which are marked as unpurgable will not be dequeued.
484 * sender == NULL means "any sender", and tag == NULL means "any tag".
489 for (event = HEAD(task->events); event != NULL; event = next_event) {
490 next_event = NEXT(event, ev_link);
491 if (event->ev_type >= first && event->ev_type <= last &&
492 (sender == NULL || event->ev_sender == sender) &&
493 (tag == NULL || event->ev_tag == tag) &&
494 (!purging || PURGE_OK(event))) {
495 DEQUEUE(task->events, event, ev_link);
496 ENQUEUE(*events, event, ev_link);
507 isc_task_purgerange(isc_task_t *task, void *sender, isc_eventtype_t first,
508 isc_eventtype_t last, void *tag)
511 isc_eventlist_t events;
512 isc_event_t *event, *next_event;
515 * Purge events from a task's event queue.
518 XTRACE("isc_task_purgerange");
520 ISC_LIST_INIT(events);
522 count = dequeue_events(task, sender, first, last, tag, &events,
525 for (event = HEAD(events); event != NULL; event = next_event) {
526 next_event = NEXT(event, ev_link);
527 isc_event_free(&event);
531 * Note that purging never changes the state of the task.
538 isc_task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
542 * Purge events from a task's event queue.
545 XTRACE("isc_task_purge");
547 return (isc_task_purgerange(task, sender, type, type, tag));
551 isc_task_purgeevent(isc_task_t *task, isc_event_t *event) {
552 isc_event_t *curr_event, *next_event;
555 * Purge 'event' from a task's event queue.
557 * XXXRTH: WARNING: This method may be removed before beta.
560 REQUIRE(VALID_TASK(task));
563 * If 'event' is on the task's event queue, it will be purged,
564 * unless it is marked as unpurgeable. 'event' does not have to be
565 * on the task's event queue; in fact, it can even be an invalid
566 * pointer. Purging only occurs if the event is actually on the task's
569 * Purging never changes the state of the task.
573 for (curr_event = HEAD(task->events);
575 curr_event = next_event) {
576 next_event = NEXT(curr_event, ev_link);
577 if (curr_event == event && PURGE_OK(event)) {
578 DEQUEUE(task->events, curr_event, ev_link);
584 if (curr_event == NULL)
587 isc_event_free(&curr_event);
593 isc_task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
594 isc_eventtype_t last, void *tag,
595 isc_eventlist_t *events)
598 * Remove events from a task's event queue.
601 XTRACE("isc_task_unsendrange");
603 return (dequeue_events(task, sender, first, last, tag, events,
608 isc_task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
609 void *tag, isc_eventlist_t *events)
612 * Remove events from a task's event queue.
615 XTRACE("isc_task_unsend");
617 return (dequeue_events(task, sender, type, type, tag, events,
622 isc_task_onshutdown(isc_task_t *task, isc_taskaction_t action, const void *arg)
624 isc_boolean_t disallowed = ISC_FALSE;
625 isc_result_t result = ISC_R_SUCCESS;
629 * Send a shutdown event with action 'action' and argument 'arg' when
630 * 'task' is shutdown.
633 REQUIRE(VALID_TASK(task));
634 REQUIRE(action != NULL);
636 event = isc_event_allocate(task->manager->mctx,
638 ISC_TASKEVENT_SHUTDOWN,
643 return (ISC_R_NOMEMORY);
646 if (TASK_SHUTTINGDOWN(task)) {
647 disallowed = ISC_TRUE;
648 result = ISC_R_SHUTTINGDOWN;
650 ENQUEUE(task->on_shutdown, event, ev_link);
654 isc_mem_put(task->manager->mctx, event, sizeof(*event));
660 isc_task_shutdown(isc_task_t *task) {
661 isc_boolean_t was_idle;
667 REQUIRE(VALID_TASK(task));
670 was_idle = task_shutdown(task);
678 isc_task_destroy(isc_task_t **taskp) {
684 REQUIRE(taskp != NULL);
686 isc_task_shutdown(*taskp);
687 isc_task_detach(taskp);
691 isc_task_setname(isc_task_t *task, const char *name, void *tag) {
697 REQUIRE(VALID_TASK(task));
699 #ifdef ISC_TASK_NAMES
701 memset(task->name, 0, sizeof(task->name));
702 strncpy(task->name, name, sizeof(task->name) - 1);
713 isc_task_getname(isc_task_t *task) {
718 isc_task_gettag(isc_task_t *task) {
723 isc_task_getcurrenttime(isc_task_t *task, isc_stdtime_t *t) {
724 REQUIRE(VALID_TASK(task));
738 dispatch(isc_taskmgr_t *manager) {
740 #ifndef ISC_PLATFORM_USETHREADS
741 unsigned int total_dispatch_count = 0;
742 isc_tasklist_t ready_tasks;
743 #endif /* ISC_PLATFORM_USETHREADS */
745 REQUIRE(VALID_MANAGER(manager));
748 * Again we're trying to hold the lock for as short a time as possible
749 * and to do as little locking and unlocking as possible.
751 * In both while loops, the appropriate lock must be held before the
752 * while body starts. Code which acquired the lock at the top of
753 * the loop would be more readable, but would result in a lot of
754 * extra locking. Compare:
761 * while (expression) {
766 * Unlocked part here...
773 * Note how if the loop continues we unlock and then immediately lock.
774 * For N iterations of the loop, this code does 2N+1 locks and 2N+1
775 * unlocks. Also note that the lock is not held when the while
776 * condition is tested, which may or may not be important, depending
782 * while (expression) {
786 * Unlocked part here...
793 * For N iterations of the loop, this code does N+1 locks and N+1
794 * unlocks. The while expression is always protected by the lock.
797 #ifndef ISC_PLATFORM_USETHREADS
798 ISC_LIST_INIT(ready_tasks);
800 LOCK(&manager->lock);
801 while (!FINISHED(manager)) {
802 #ifdef ISC_PLATFORM_USETHREADS
804 * For reasons similar to those given in the comment in
805 * isc_task_send() above, it is safe for us to dequeue
806 * the task while only holding the manager lock, and then
807 * change the task to running state while only holding the
810 while ((EMPTY(manager->ready_tasks) ||
811 manager->exclusive_requested) &&
814 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
816 ISC_MSG_WAIT, "wait"));
817 WAIT(&manager->work_available, &manager->lock);
818 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
820 ISC_MSG_AWAKE, "awake"));
822 #else /* ISC_PLATFORM_USETHREADS */
823 if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM ||
824 EMPTY(manager->ready_tasks))
826 #endif /* ISC_PLATFORM_USETHREADS */
827 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK,
828 ISC_MSG_WORKING, "working"));
830 task = HEAD(manager->ready_tasks);
832 unsigned int dispatch_count = 0;
833 isc_boolean_t done = ISC_FALSE;
834 isc_boolean_t requeue = ISC_FALSE;
835 isc_boolean_t finished = ISC_FALSE;
838 INSIST(VALID_TASK(task));
841 * Note we only unlock the manager lock if we actually
842 * have a task to do. We must reacquire the manager
843 * lock before exiting the 'if (task != NULL)' block.
845 DEQUEUE(manager->ready_tasks, task, ready_link);
846 manager->tasks_running++;
847 UNLOCK(&manager->lock);
850 INSIST(task->state == task_state_ready);
851 task->state = task_state_running;
852 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
853 ISC_MSG_RUNNING, "running"));
854 isc_stdtime_get(&task->now);
856 if (!EMPTY(task->events)) {
857 event = HEAD(task->events);
858 DEQUEUE(task->events, event, ev_link);
861 * Execute the event action.
863 XTRACE(isc_msgcat_get(isc_msgcat,
867 if (event->ev_action != NULL) {
869 (event->ev_action)(task,event);
873 #ifndef ISC_PLATFORM_USETHREADS
874 total_dispatch_count++;
875 #endif /* ISC_PLATFORM_USETHREADS */
878 if (task->references == 0 &&
879 EMPTY(task->events) &&
880 !TASK_SHUTTINGDOWN(task)) {
881 isc_boolean_t was_idle;
884 * There are no references and no
885 * pending events for this task,
886 * which means it will not become
887 * runnable again via an external
888 * action (such as sending an event
891 * We initiate shutdown to prevent
892 * it from becoming a zombie.
894 * We do this here instead of in
895 * the "if EMPTY(task->events)" block
898 * If we post no shutdown events,
899 * we want the task to finish.
901 * If we did post shutdown events,
902 * will still want the task's
903 * quantum to be applied.
905 was_idle = task_shutdown(task);
909 if (EMPTY(task->events)) {
911 * Nothing else to do for this task
914 XTRACE(isc_msgcat_get(isc_msgcat,
918 if (task->references == 0 &&
919 TASK_SHUTTINGDOWN(task)) {
923 XTRACE(isc_msgcat_get(
929 task->state = task_state_done;
931 task->state = task_state_idle;
933 } else if (dispatch_count >= task->quantum) {
935 * Our quantum has expired, but
936 * there is more work to be done.
937 * We'll requeue it to the ready
940 * We don't check quantum until
941 * dispatching at least one event,
942 * so the minimum quantum is one.
944 XTRACE(isc_msgcat_get(isc_msgcat,
948 task->state = task_state_ready;
958 LOCK(&manager->lock);
959 manager->tasks_running--;
960 #ifdef ISC_PLATFORM_USETHREADS
961 if (manager->exclusive_requested &&
962 manager->tasks_running == 1) {
963 SIGNAL(&manager->exclusive_granted);
965 #endif /* ISC_PLATFORM_USETHREADS */
968 * We know we're awake, so we don't have
969 * to wakeup any sleeping threads if the
970 * ready queue is empty before we requeue.
972 * A possible optimization if the queue is
973 * empty is to 'goto' the 'if (task != NULL)'
974 * block, avoiding the ENQUEUE of the task
975 * and the subsequent immediate DEQUEUE
976 * (since it is the only executable task).
977 * We don't do this because then we'd be
978 * skipping the exit_requested check. The
979 * cost of ENQUEUE is low anyway, especially
980 * when you consider that we'd have to do
981 * an extra EMPTY check to see if we could
982 * do the optimization. If the ready queue
983 * were usually nonempty, the 'optimization'
984 * might even hurt rather than help.
986 #ifdef ISC_PLATFORM_USETHREADS
987 ENQUEUE(manager->ready_tasks, task,
990 ENQUEUE(ready_tasks, task, ready_link);
995 #ifndef ISC_PLATFORM_USETHREADS
996 ISC_LIST_APPENDLIST(manager->ready_tasks, ready_tasks, ready_link);
998 UNLOCK(&manager->lock);
1001 #ifdef ISC_PLATFORM_USETHREADS
1002 static isc_threadresult_t
1007 isc_taskmgr_t *manager = uap;
1009 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1010 ISC_MSG_STARTING, "starting"));
1014 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1015 ISC_MSG_EXITING, "exiting"));
1017 return ((isc_threadresult_t)0);
1019 #endif /* ISC_PLATFORM_USETHREADS */
1022 manager_free(isc_taskmgr_t *manager) {
1025 #ifdef ISC_PLATFORM_USETHREADS
1026 (void)isc_condition_destroy(&manager->exclusive_granted);
1027 (void)isc_condition_destroy(&manager->work_available);
1028 isc_mem_free(manager->mctx, manager->threads);
1029 #endif /* ISC_PLATFORM_USETHREADS */
1030 DESTROYLOCK(&manager->lock);
1032 mctx = manager->mctx;
1033 isc_mem_put(mctx, manager, sizeof(*manager));
1034 isc_mem_detach(&mctx);
1038 isc_taskmgr_create(isc_mem_t *mctx, unsigned int workers,
1039 unsigned int default_quantum, isc_taskmgr_t **managerp)
1041 isc_result_t result;
1042 unsigned int i, started = 0;
1043 isc_taskmgr_t *manager;
1046 * Create a new task manager.
1049 REQUIRE(workers > 0);
1050 REQUIRE(managerp != NULL && *managerp == NULL);
1052 #ifndef ISC_PLATFORM_USETHREADS
1057 if (taskmgr != NULL) {
1059 *managerp = taskmgr;
1060 return (ISC_R_SUCCESS);
1062 #endif /* ISC_PLATFORM_USETHREADS */
1064 manager = isc_mem_get(mctx, sizeof(*manager));
1065 if (manager == NULL)
1066 return (ISC_R_NOMEMORY);
1067 manager->magic = TASK_MANAGER_MAGIC;
1068 manager->mctx = NULL;
1069 if (isc_mutex_init(&manager->lock) != ISC_R_SUCCESS) {
1070 UNEXPECTED_ERROR(__FILE__, __LINE__,
1071 "isc_mutex_init() %s",
1072 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1073 ISC_MSG_FAILED, "failed"));
1074 result = ISC_R_UNEXPECTED;
1077 #ifdef ISC_PLATFORM_USETHREADS
1078 manager->workers = 0;
1079 manager->threads = isc_mem_allocate(mctx,
1080 workers * sizeof(isc_thread_t));
1081 if (manager->threads == NULL) {
1082 result = ISC_R_NOMEMORY;
1085 if (isc_condition_init(&manager->work_available) != ISC_R_SUCCESS) {
1086 UNEXPECTED_ERROR(__FILE__, __LINE__,
1087 "isc_condition_init() %s",
1088 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1089 ISC_MSG_FAILED, "failed"));
1090 result = ISC_R_UNEXPECTED;
1091 goto cleanup_threads;
1093 if (isc_condition_init(&manager->exclusive_granted) != ISC_R_SUCCESS) {
1094 UNEXPECTED_ERROR(__FILE__, __LINE__,
1095 "isc_condition_init() %s",
1096 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1097 ISC_MSG_FAILED, "failed"));
1098 result = ISC_R_UNEXPECTED;
1099 goto cleanup_workavailable;
1101 #endif /* ISC_PLATFORM_USETHREADS */
1102 if (default_quantum == 0)
1103 default_quantum = DEFAULT_DEFAULT_QUANTUM;
1104 manager->default_quantum = default_quantum;
1105 INIT_LIST(manager->tasks);
1106 INIT_LIST(manager->ready_tasks);
1107 manager->tasks_running = 0;
1108 manager->exclusive_requested = ISC_FALSE;
1109 manager->exiting = ISC_FALSE;
1111 isc_mem_attach(mctx, &manager->mctx);
1113 #ifdef ISC_PLATFORM_USETHREADS
1114 LOCK(&manager->lock);
1118 for (i = 0; i < workers; i++) {
1119 if (isc_thread_create(run, manager,
1120 &manager->threads[manager->workers]) ==
1126 UNLOCK(&manager->lock);
1129 manager_free(manager);
1130 return (ISC_R_NOTHREADS);
1132 isc_thread_setconcurrency(workers);
1133 #else /* ISC_PLATFORM_USETHREADS */
1136 #endif /* ISC_PLATFORM_USETHREADS */
1138 *managerp = manager;
1140 return (ISC_R_SUCCESS);
1142 #ifdef ISC_PLATFORM_USETHREADS
1143 cleanup_workavailable:
1144 (void)isc_condition_destroy(&manager->work_available);
1146 isc_mem_free(mctx, manager->threads);
1148 DESTROYLOCK(&manager->lock);
1151 isc_mem_put(mctx, manager, sizeof(*manager));
1156 isc_taskmgr_destroy(isc_taskmgr_t **managerp) {
1157 isc_taskmgr_t *manager;
1162 * Destroy '*managerp'.
1165 REQUIRE(managerp != NULL);
1166 manager = *managerp;
1167 REQUIRE(VALID_MANAGER(manager));
1169 #ifndef ISC_PLATFORM_USETHREADS
1172 if (manager->refs > 1) {
1177 #endif /* ISC_PLATFORM_USETHREADS */
1179 XTHREADTRACE("isc_taskmgr_destroy");
1181 * Only one non-worker thread may ever call this routine.
1182 * If a worker thread wants to initiate shutdown of the
1183 * task manager, it should ask some non-worker thread to call
1184 * isc_taskmgr_destroy(), e.g. by signalling a condition variable
1185 * that the startup thread is sleeping on.
1189 * Unlike elsewhere, we're going to hold this lock a long time.
1190 * We need to do so, because otherwise the list of tasks could
1191 * change while we were traversing it.
1193 * This is also the only function where we will hold both the
1194 * task manager lock and a task lock at the same time.
1197 LOCK(&manager->lock);
1200 * Make sure we only get called once.
1202 INSIST(!manager->exiting);
1203 manager->exiting = ISC_TRUE;
1206 * Post shutdown event(s) to every task (if they haven't already been
1209 for (task = HEAD(manager->tasks);
1211 task = NEXT(task, link)) {
1213 if (task_shutdown(task))
1214 ENQUEUE(manager->ready_tasks, task, ready_link);
1215 UNLOCK(&task->lock);
1217 #ifdef ISC_PLATFORM_USETHREADS
1219 * Wake up any sleeping workers. This ensures we get work done if
1220 * there's work left to do, and if there are already no tasks left
1221 * it will cause the workers to see manager->exiting.
1223 BROADCAST(&manager->work_available);
1224 UNLOCK(&manager->lock);
1227 * Wait for all the worker threads to exit.
1229 for (i = 0; i < manager->workers; i++)
1230 (void)isc_thread_join(manager->threads[i], NULL);
1231 #else /* ISC_PLATFORM_USETHREADS */
1233 * Dispatch the shutdown events.
1235 UNLOCK(&manager->lock);
1236 while (isc__taskmgr_ready())
1237 (void)isc__taskmgr_dispatch();
1238 INSIST(ISC_LIST_EMPTY(manager->tasks));
1239 #endif /* ISC_PLATFORM_USETHREADS */
1241 manager_free(manager);
1246 #ifndef ISC_PLATFORM_USETHREADS
1248 isc__taskmgr_ready(void) {
1249 if (taskmgr == NULL)
1251 return (ISC_TF(!ISC_LIST_EMPTY(taskmgr->ready_tasks)));
1255 isc__taskmgr_dispatch(void) {
1256 isc_taskmgr_t *manager = taskmgr;
1258 if (taskmgr == NULL)
1259 return (ISC_R_NOTFOUND);
1263 return (ISC_R_SUCCESS);
1266 #endif /* ISC_PLATFORM_USETHREADS */
1269 isc_task_beginexclusive(isc_task_t *task) {
1270 #ifdef ISC_PLATFORM_USETHREADS
1271 isc_taskmgr_t *manager = task->manager;
1272 REQUIRE(task->state == task_state_running);
1273 LOCK(&manager->lock);
1274 if (manager->exclusive_requested) {
1275 UNLOCK(&manager->lock);
1276 return (ISC_R_LOCKBUSY);
1278 manager->exclusive_requested = ISC_TRUE;
1279 while (manager->tasks_running > 1) {
1280 WAIT(&manager->exclusive_granted, &manager->lock);
1282 UNLOCK(&manager->lock);
1286 return (ISC_R_SUCCESS);
1290 isc_task_endexclusive(isc_task_t *task) {
1291 #ifdef ISC_PLATFORM_USETHREADS
1292 isc_taskmgr_t *manager = task->manager;
1293 REQUIRE(task->state == task_state_running);
1294 LOCK(&manager->lock);
1295 REQUIRE(manager->exclusive_requested);
1296 manager->exclusive_requested = ISC_FALSE;
1297 BROADCAST(&manager->work_available);
1298 UNLOCK(&manager->lock);