2 * Copyright (c) 2000 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/kern/subr_taskqueue.c,v 1.69 2012/08/28 13:35:37 jhb Exp $"
29 #include <sys/param.h>
30 #include <sys/queue.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/taskqueue.h>
34 #include <sys/interrupt.h>
36 #include <sys/malloc.h>
37 #include <sys/kthread.h>
38 #include <sys/spinlock.h>
39 #include <sys/spinlock2.h>
40 #include <sys/serialize.h>
43 MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
45 static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
46 static struct lock taskqueue_queues_lock;
49 STAILQ_ENTRY(taskqueue) tq_link;
50 STAILQ_HEAD(, task) tq_queue;
52 /* NOTE: tq must be locked before calling tq_enqueue */
53 taskqueue_enqueue_fn tq_enqueue;
56 struct task *tq_running;
57 struct spinlock tq_lock;
58 struct thread **tq_threads;
64 #define TQ_FLAGS_ACTIVE (1 << 0)
65 #define TQ_FLAGS_BLOCKED (1 << 1)
66 #define TQ_FLAGS_PENDING (1 << 2)
68 #define DT_CALLOUT_ARMED (1 << 0)
71 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
72 int priority, task_fn_t func, void *context)
75 TASK_INIT(&timeout_task->t, priority, func, context);
76 callout_init(&timeout_task->c); /* XXX use callout_init_mp() */
77 timeout_task->q = queue;
81 static void taskqueue_run(struct taskqueue *queue, int lock_held);
84 TQ_LOCK_INIT(struct taskqueue *tq)
86 spin_init(&tq->tq_lock, "tqlock");
90 TQ_LOCK_UNINIT(struct taskqueue *tq)
92 spin_uninit(&tq->tq_lock);
96 TQ_LOCK(struct taskqueue *tq)
98 spin_lock(&tq->tq_lock);
102 TQ_UNLOCK(struct taskqueue *tq)
104 spin_unlock(&tq->tq_lock);
108 TQ_SLEEP(struct taskqueue *tq, void *ident, const char *wmesg)
110 ssleep(ident, &tq->tq_lock, 0, wmesg, 0);
114 taskqueue_create(const char *name, int mflags,
115 taskqueue_enqueue_fn enqueue, void *context)
117 struct taskqueue *queue;
119 queue = kmalloc(sizeof(*queue), M_TASKQUEUE, mflags | M_ZERO);
122 STAILQ_INIT(&queue->tq_queue);
123 queue->tq_name = name;
124 queue->tq_enqueue = enqueue;
125 queue->tq_context = context;
126 queue->tq_flags |= TQ_FLAGS_ACTIVE;
129 lockmgr(&taskqueue_queues_lock, LK_EXCLUSIVE);
130 STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link);
131 lockmgr(&taskqueue_queues_lock, LK_RELEASE);
136 /* NOTE: tq must be locked */
138 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
140 while(tq->tq_tcount > 0) {
141 /* Unlock spinlock before wakeup() */
145 TQ_SLEEP(tq, pp, "taskqueue_terminate");
150 taskqueue_free(struct taskqueue *queue)
153 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
154 taskqueue_run(queue, 1);
155 taskqueue_terminate(queue->tq_threads, queue);
158 lockmgr(&taskqueue_queues_lock, LK_EXCLUSIVE);
159 STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
160 lockmgr(&taskqueue_queues_lock, LK_RELEASE);
162 TQ_LOCK_UNINIT(queue);
164 kfree(queue, M_TASKQUEUE);
168 taskqueue_find(const char *name)
170 struct taskqueue *queue;
172 lockmgr(&taskqueue_queues_lock, LK_EXCLUSIVE);
173 STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) {
174 if (!strcmp(queue->tq_name, name)) {
175 lockmgr(&taskqueue_queues_lock, LK_RELEASE);
179 lockmgr(&taskqueue_queues_lock, LK_RELEASE);
184 * NOTE! If using the per-cpu taskqueues ``taskqueue_thread[mycpuid]'',
185 * be sure NOT TO SHARE the ``task'' between CPUs. TASKS ARE NOT LOCKED.
186 * So either use a throwaway task which will only be enqueued once, or
187 * use one task per CPU!
190 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
196 * Don't allow new tasks on a queue which is being freed.
198 if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
202 * Count multiple enqueues.
204 if (task->ta_pending) {
210 * Optimise the case when all tasks have the same priority.
212 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
213 if (!prev || prev->ta_priority >= task->ta_priority) {
214 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
217 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
218 prev = ins, ins = STAILQ_NEXT(ins, ta_link))
219 if (ins->ta_priority < task->ta_priority)
223 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
225 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
228 task->ta_pending = 1;
229 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) {
230 if (queue->tq_enqueue)
231 queue->tq_enqueue(queue->tq_context);
233 queue->tq_flags |= TQ_FLAGS_PENDING;
240 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
245 res = taskqueue_enqueue_locked(queue, task);
252 taskqueue_timeout_func(void *arg)
254 struct taskqueue *queue;
255 struct timeout_task *timeout_task;
258 queue = timeout_task->q;
261 KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
262 timeout_task->f &= ~DT_CALLOUT_ARMED;
263 queue->tq_callouts--;
264 taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
269 taskqueue_enqueue_timeout(struct taskqueue *queue,
270 struct timeout_task *timeout_task, int ticks)
275 KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
277 timeout_task->q = queue;
278 res = timeout_task->t.ta_pending;
280 taskqueue_enqueue_locked(queue, &timeout_task->t);
283 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
286 queue->tq_callouts++;
287 timeout_task->f |= DT_CALLOUT_ARMED;
290 callout_reset(&timeout_task->c, ticks, taskqueue_timeout_func,
297 taskqueue_block(struct taskqueue *queue)
300 queue->tq_flags |= TQ_FLAGS_BLOCKED;
305 taskqueue_unblock(struct taskqueue *queue)
308 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
309 if (queue->tq_flags & TQ_FLAGS_PENDING) {
310 queue->tq_flags &= ~TQ_FLAGS_PENDING;
311 if (queue->tq_enqueue)
312 queue->tq_enqueue(queue->tq_context);
318 taskqueue_run(struct taskqueue *queue, int lock_held)
325 while (STAILQ_FIRST(&queue->tq_queue)) {
327 * Carefully remove the first task from the queue and
328 * zero its pending count.
330 task = STAILQ_FIRST(&queue->tq_queue);
331 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
332 pending = task->ta_pending;
333 task->ta_pending = 0;
334 queue->tq_running = task;
337 task->ta_func(task->ta_context, pending);
338 queue->tq_running = NULL;
347 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
351 if (task->ta_pending > 0)
352 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
354 *pendp = task->ta_pending;
355 task->ta_pending = 0;
356 return (task == queue->tq_running ? EBUSY : 0);
360 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
365 error = taskqueue_cancel_locked(queue, task, pendp);
372 taskqueue_cancel_timeout(struct taskqueue *queue,
373 struct timeout_task *timeout_task, u_int *pendp)
375 u_int pending, pending1;
378 pending = !!callout_stop(&timeout_task->c);
380 error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
381 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
382 timeout_task->f &= ~DT_CALLOUT_ARMED;
383 queue->tq_callouts--;
388 *pendp = pending + pending1;
393 taskqueue_drain(struct taskqueue *queue, struct task *task)
396 while (task->ta_pending != 0 || task == queue->tq_running)
397 TQ_SLEEP(queue, task, "-");
402 taskqueue_drain_timeout(struct taskqueue *queue,
403 struct timeout_task *timeout_task)
406 callout_stop_sync(&timeout_task->c);
407 taskqueue_drain(queue, &timeout_task->t);
411 taskqueue_swi_enqueue(void *context)
417 taskqueue_swi_run(void *arg, void *frame)
419 taskqueue_run(taskqueue_swi, 0);
423 taskqueue_swi_mp_run(void *arg, void *frame)
425 taskqueue_run(taskqueue_swi_mp, 0);
429 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, int ncpu,
430 const char *fmt, ...)
434 struct taskqueue *tq;
436 char ktname[MAXCOMLEN];
440 /* catch call argument mistakes */
441 KKASSERT(pri > 0 && pri < TDPRI_MAX);
447 kvsnprintf(ktname, MAXCOMLEN, fmt, ap);
450 tq->tq_threads = kmalloc(sizeof(struct thread *) * count, M_TASKQUEUE,
453 for (i = 0; i < count; i++) {
455 * If no specific cpu was specified and more than one thread
456 * is to be created, we distribute the threads amongst all
459 if ((ncpu <= -1) && (count > 1))
463 error = lwkt_create(taskqueue_thread_loop, tqp,
464 &tq->tq_threads[i], NULL,
468 error = lwkt_create(taskqueue_thread_loop, tqp,
469 &tq->tq_threads[i], NULL,
474 kprintf("%s: lwkt_create(%s): error %d", __func__,
476 tq->tq_threads[i] = NULL;
478 td = tq->tq_threads[i];
479 lwkt_setpri_initial(td, pri);
489 taskqueue_thread_loop(void *arg)
491 struct taskqueue **tqp, *tq;
496 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
497 taskqueue_run(tq, 1);
498 TQ_SLEEP(tq, tq, "tqthr");
501 /* rendezvous with thread that asked us to terminate */
504 wakeup_one(tq->tq_threads);
508 /* NOTE: tq must be locked */
510 taskqueue_thread_enqueue(void *context)
512 struct taskqueue **tqp, *tq;
517 /* Unlock spinlock before wakeup_one() */
523 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0,
524 register_swi(SWI_TQ, taskqueue_swi_run, NULL, "swi_taskq", NULL, -1));
526 * XXX: possibly use a different SWI_TQ_MP or so.
527 * related: sys/interrupt.h
528 * related: platform/XXX/isa/ipl_funcs.c
530 TASKQUEUE_DEFINE(swi_mp, taskqueue_swi_enqueue, 0,
531 register_swi_mp(SWI_TQ, taskqueue_swi_mp_run, NULL, "swi_mp_taskq", NULL,
534 struct taskqueue *taskqueue_thread[MAXCPU];
541 lockinit(&taskqueue_queues_lock, "tqqueues", 0, 0);
542 STAILQ_INIT(&taskqueue_queues);
544 for (cpu = 0; cpu < ncpus; cpu++) {
545 taskqueue_thread[cpu] = taskqueue_create("thread", M_INTWAIT,
546 taskqueue_thread_enqueue, &taskqueue_thread[cpu]);
547 taskqueue_start_threads(&taskqueue_thread[cpu], 1,
548 TDPRI_KERN_DAEMON, cpu, "taskq_cpu %d", cpu);
552 SYSINIT(taskqueueinit, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, taskqueue_init, NULL);