sys/kern: Adjust some function declaration vs. definition mismatches.
[dragonfly.git] / sys / kern / subr_taskqueue.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
d96c1475 26 * $FreeBSD: src/sys/kern/subr_taskqueue.c,v 1.69 2012/08/28 13:35:37 jhb Exp $"
984263bc
MD
27 */
28
29#include <sys/param.h>
30#include <sys/queue.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/taskqueue.h>
34#include <sys/interrupt.h>
b3504e03 35#include <sys/lock.h>
984263bc 36#include <sys/malloc.h>
62ade751 37#include <sys/kthread.h>
e43a034f 38#include <sys/thread2.h>
8619d09d
AH
39#include <sys/spinlock.h>
40#include <sys/spinlock2.h>
41#include <sys/serialize.h>
42#include <sys/proc.h>
43#include <machine/varargs.h>
62ade751 44
984263bc
MD
45MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
46
47static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
8619d09d 48static struct lock taskqueue_queues_lock;
984263bc
MD
49
50struct taskqueue {
51 STAILQ_ENTRY(taskqueue) tq_link;
52 STAILQ_HEAD(, task) tq_queue;
53 const char *tq_name;
54 taskqueue_enqueue_fn tq_enqueue;
55 void *tq_context;
8619d09d
AH
56
57 struct task *tq_running;
58 struct spinlock tq_lock;
59 struct thread **tq_threads;
60 int tq_tcount;
61 int tq_flags;
d96c1475 62 int tq_callouts;
984263bc
MD
63};
64
8619d09d
AH
65#define TQ_FLAGS_ACTIVE (1 << 0)
66#define TQ_FLAGS_BLOCKED (1 << 1)
67#define TQ_FLAGS_PENDING (1 << 2)
68
d96c1475
FT
69#define DT_CALLOUT_ARMED (1 << 0)
70
71void
72_timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
73 int priority, task_fn_t func, void *context)
74{
75
76 TASK_INIT(&timeout_task->t, priority, func, context);
77 callout_init(&timeout_task->c);
78 timeout_task->q = queue;
79 timeout_task->f = 0;
80}
81
3bb42951
MD
82static void taskqueue_run(struct taskqueue *queue, int lock_held);
83
8619d09d
AH
84static __inline void
85TQ_LOCK_INIT(struct taskqueue *tq)
86{
ba87a4ab 87 spin_init(&tq->tq_lock, "tqlock");
8619d09d
AH
88}
89
90static __inline void
91TQ_LOCK_UNINIT(struct taskqueue *tq)
92{
93 spin_uninit(&tq->tq_lock);
94}
95
96static __inline void
97TQ_LOCK(struct taskqueue *tq)
98{
287a8577 99 spin_lock(&tq->tq_lock);
8619d09d
AH
100}
101
102static __inline void
103TQ_UNLOCK(struct taskqueue *tq)
104{
287a8577 105 spin_unlock(&tq->tq_lock);
8619d09d
AH
106}
107
108static __inline void
109TQ_SLEEP(struct taskqueue *tq, void *ident, const char *wmesg)
110{
111 ssleep(ident, &tq->tq_lock, 0, wmesg, 0);
112}
113
984263bc
MD
114struct taskqueue *
115taskqueue_create(const char *name, int mflags,
116 taskqueue_enqueue_fn enqueue, void *context)
117{
118 struct taskqueue *queue;
984263bc 119
fb525024 120 queue = kmalloc(sizeof(*queue), M_TASKQUEUE, mflags | M_ZERO);
984263bc 121 if (!queue)
8e6d56f3 122 return NULL;
984263bc
MD
123 STAILQ_INIT(&queue->tq_queue);
124 queue->tq_name = name;
125 queue->tq_enqueue = enqueue;
126 queue->tq_context = context;
8619d09d
AH
127 queue->tq_flags |= TQ_FLAGS_ACTIVE;
128 TQ_LOCK_INIT(queue);
984263bc 129
8619d09d 130 lockmgr(&taskqueue_queues_lock, LK_EXCLUSIVE);
984263bc 131 STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link);
8619d09d 132 lockmgr(&taskqueue_queues_lock, LK_RELEASE);
984263bc
MD
133
134 return queue;
135}
136
8619d09d
AH
137static void
138taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
139{
140 while(tq->tq_tcount > 0) {
141 wakeup(tq);
142 TQ_SLEEP(tq, pp, "taskqueue_terminate");
143 }
144}
145
984263bc
MD
146void
147taskqueue_free(struct taskqueue *queue)
148{
8619d09d
AH
149 TQ_LOCK(queue);
150 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
3bb42951 151 taskqueue_run(queue, 1);
8619d09d
AH
152 taskqueue_terminate(queue->tq_threads, queue);
153 TQ_UNLOCK(queue);
154
155 lockmgr(&taskqueue_queues_lock, LK_EXCLUSIVE);
984263bc 156 STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
8619d09d
AH
157 lockmgr(&taskqueue_queues_lock, LK_RELEASE);
158
159 TQ_LOCK_UNINIT(queue);
984263bc 160
efda3bd0 161 kfree(queue, M_TASKQUEUE);
984263bc
MD
162}
163
164struct taskqueue *
165taskqueue_find(const char *name)
166{
167 struct taskqueue *queue;
984263bc 168
8619d09d 169 lockmgr(&taskqueue_queues_lock, LK_EXCLUSIVE);
e43a034f 170 STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) {
984263bc 171 if (!strcmp(queue->tq_name, name)) {
8619d09d 172 lockmgr(&taskqueue_queues_lock, LK_RELEASE);
984263bc
MD
173 return queue;
174 }
e43a034f 175 }
8619d09d 176 lockmgr(&taskqueue_queues_lock, LK_RELEASE);
8e6d56f3 177 return NULL;
984263bc
MD
178}
179
0bb018c6
SS
180/*
181 * NOTE! If using the per-cpu taskqueues ``taskqueue_thread[mycpuid]'',
182 * be sure NOT TO SHARE the ``task'' between CPUs. TASKS ARE NOT LOCKED.
183 * So either use a throwaway task which will only be enqueued once, or
184 * use one task per CPU!
185 */
d96c1475
FT
186static int
187taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
984263bc
MD
188{
189 struct task *ins;
190 struct task *prev;
191
984263bc
MD
192 /*
193 * Don't allow new tasks on a queue which is being freed.
194 */
6c0cea68 195 if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
984263bc 196 return EPIPE;
984263bc
MD
197
198 /*
199 * Count multiple enqueues.
200 */
201 if (task->ta_pending) {
202 task->ta_pending++;
984263bc
MD
203 return 0;
204 }
205
206 /*
207 * Optimise the case when all tasks have the same priority.
208 */
209 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
210 if (!prev || prev->ta_priority >= task->ta_priority) {
211 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
212 } else {
8619d09d 213 prev = NULL;
984263bc
MD
214 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
215 prev = ins, ins = STAILQ_NEXT(ins, ta_link))
216 if (ins->ta_priority < task->ta_priority)
217 break;
218
219 if (prev)
220 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
221 else
222 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
223 }
224
225 task->ta_pending = 1;
8619d09d
AH
226 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) {
227 if (queue->tq_enqueue)
228 queue->tq_enqueue(queue->tq_context);
229 } else {
230 queue->tq_flags |= TQ_FLAGS_PENDING;
231 }
984263bc 232
d96c1475
FT
233 return 0;
234}
235
236int
237taskqueue_enqueue(struct taskqueue *queue, struct task *task)
238{
239 int res;
240
241 TQ_LOCK(queue);
242 res = taskqueue_enqueue_locked(queue, task);
8619d09d 243 TQ_UNLOCK(queue);
984263bc 244
d96c1475
FT
245 return (res);
246}
247
248static void
249taskqueue_timeout_func(void *arg)
250{
251 struct taskqueue *queue;
252 struct timeout_task *timeout_task;
253
254 timeout_task = arg;
255 queue = timeout_task->q;
256 KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
257 timeout_task->f &= ~DT_CALLOUT_ARMED;
258 queue->tq_callouts--;
259 taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
260}
261
262int
263taskqueue_enqueue_timeout(struct taskqueue *queue,
264 struct timeout_task *timeout_task, int ticks)
265{
266 int res;
267
268 TQ_LOCK(queue);
269 KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
d60d0616 270 ("Migrated queue"));
d96c1475
FT
271 timeout_task->q = queue;
272 res = timeout_task->t.ta_pending;
273 if (ticks == 0) {
274 taskqueue_enqueue_locked(queue, &timeout_task->t);
d60d0616 275 TQ_UNLOCK(queue);
d96c1475
FT
276 } else {
277 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
278 res++;
279 } else {
280 queue->tq_callouts++;
281 timeout_task->f |= DT_CALLOUT_ARMED;
282 }
d60d0616 283 TQ_UNLOCK(queue);
d96c1475 284 callout_reset(&timeout_task->c, ticks, taskqueue_timeout_func,
d60d0616 285 timeout_task);
d96c1475 286 }
d96c1475 287 return (res);
984263bc
MD
288}
289
8619d09d
AH
290void
291taskqueue_block(struct taskqueue *queue)
292{
293 TQ_LOCK(queue);
294 queue->tq_flags |= TQ_FLAGS_BLOCKED;
295 TQ_UNLOCK(queue);
296}
297
298void
299taskqueue_unblock(struct taskqueue *queue)
300{
301 TQ_LOCK(queue);
302 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
303 if (queue->tq_flags & TQ_FLAGS_PENDING) {
304 queue->tq_flags &= ~TQ_FLAGS_PENDING;
305 if (queue->tq_enqueue)
306 queue->tq_enqueue(queue->tq_context);
307 }
308 TQ_UNLOCK(queue);
309}
310
59b728a7 311static void
3bb42951 312taskqueue_run(struct taskqueue *queue, int lock_held)
984263bc 313{
984263bc
MD
314 struct task *task;
315 int pending;
316
3bb42951
MD
317 if (lock_held == 0)
318 TQ_LOCK(queue);
984263bc
MD
319 while (STAILQ_FIRST(&queue->tq_queue)) {
320 /*
321 * Carefully remove the first task from the queue and
322 * zero its pending count.
323 */
324 task = STAILQ_FIRST(&queue->tq_queue);
325 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
326 pending = task->ta_pending;
327 task->ta_pending = 0;
8619d09d 328 queue->tq_running = task;
984263bc 329
d60d0616 330 TQ_UNLOCK(queue);
984263bc 331 task->ta_func(task->ta_context, pending);
8619d09d
AH
332 queue->tq_running = NULL;
333 wakeup(task);
d60d0616 334 TQ_LOCK(queue);
984263bc 335 }
3bb42951
MD
336 if (lock_held == 0)
337 TQ_UNLOCK(queue);
8619d09d
AH
338}
339
d96c1475
FT
340static int
341taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
342 u_int *pendp)
343{
344
345 if (task->ta_pending > 0)
346 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
347 if (pendp != NULL)
348 *pendp = task->ta_pending;
349 task->ta_pending = 0;
350 return (task == queue->tq_running ? EBUSY : 0);
351}
352
353int
354taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
355{
d96c1475
FT
356 int error;
357
358 TQ_LOCK(queue);
d96c1475
FT
359 error = taskqueue_cancel_locked(queue, task, pendp);
360 TQ_UNLOCK(queue);
361
362 return (error);
363}
364
365int
366taskqueue_cancel_timeout(struct taskqueue *queue,
d60d0616 367 struct timeout_task *timeout_task, u_int *pendp)
d96c1475
FT
368{
369 u_int pending, pending1;
370 int error;
371
d96c1475 372 pending = !!callout_stop(&timeout_task->c);
d60d0616 373 TQ_LOCK(queue);
d96c1475
FT
374 error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
375 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
376 timeout_task->f &= ~DT_CALLOUT_ARMED;
377 queue->tq_callouts--;
378 }
379 TQ_UNLOCK(queue);
380
381 if (pendp != NULL)
382 *pendp = pending + pending1;
383 return (error);
384}
385
8619d09d
AH
386void
387taskqueue_drain(struct taskqueue *queue, struct task *task)
388{
389 TQ_LOCK(queue);
390 while (task->ta_pending != 0 || task == queue->tq_running)
391 TQ_SLEEP(queue, task, "-");
392 TQ_UNLOCK(queue);
984263bc
MD
393}
394
d96c1475
FT
395void
396taskqueue_drain_timeout(struct taskqueue *queue,
397 struct timeout_task *timeout_task)
398{
399
400 callout_stop_sync(&timeout_task->c);
401 taskqueue_drain(queue, &timeout_task->t);
402}
403
984263bc
MD
404static void
405taskqueue_swi_enqueue(void *context)
406{
407 setsofttq();
408}
409
410static void
477d3c1c 411taskqueue_swi_run(void *arg, void *frame)
984263bc 412{
3bb42951 413 taskqueue_run(taskqueue_swi, 0);
984263bc
MD
414}
415
8619d09d
AH
416static void
417taskqueue_swi_mp_run(void *arg, void *frame)
418{
3bb42951 419 taskqueue_run(taskqueue_swi_mp, 0);
8619d09d
AH
420}
421
422int
6b26ab14 423taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, int ncpu,
8619d09d
AH
424 const char *fmt, ...)
425{
426 __va_list ap;
427 struct thread *td;
428 struct taskqueue *tq;
6b26ab14 429 int i, error, cpu;
8619d09d
AH
430 char ktname[MAXCOMLEN];
431
432 if (count <= 0)
433 return EINVAL;
434
435 tq = *tqp;
6b26ab14 436 cpu = ncpu;
8619d09d
AH
437
438 __va_start(ap, fmt);
439 kvsnprintf(ktname, MAXCOMLEN, fmt, ap);
440 __va_end(ap);
441
442 tq->tq_threads = kmalloc(sizeof(struct thread *) * count, M_TASKQUEUE,
443 M_WAITOK | M_ZERO);
444
445 for (i = 0; i < count; i++) {
6b26ab14
AH
446 /*
447 * If no specific cpu was specified and more than one thread
448 * is to be created, we distribute the threads amongst all
449 * cpus.
450 */
451 if ((ncpu <= -1) && (count > 1))
452 cpu = i%ncpus;
453
da5ac978
AH
454 if (count == 1) {
455 error = lwkt_create(taskqueue_thread_loop, tqp,
fdce8919 456 &tq->tq_threads[i], NULL,
4643740a 457 TDF_NOSTART, cpu,
fdce8919 458 "%s", ktname);
da5ac978
AH
459 } else {
460 error = lwkt_create(taskqueue_thread_loop, tqp,
fdce8919 461 &tq->tq_threads[i], NULL,
4643740a 462 TDF_NOSTART, cpu,
fdce8919 463 "%s_%d", ktname, i);
da5ac978 464 }
8619d09d 465 if (error) {
98a14266 466 kprintf("%s: lwkt_create(%s): error %d", __func__,
8619d09d
AH
467 ktname, error);
468 tq->tq_threads[i] = NULL;
469 } else {
470 td = tq->tq_threads[i];
3723ced5 471 lwkt_setpri_initial(td, pri);
8619d09d
AH
472 lwkt_schedule(td);
473 tq->tq_tcount++;
474 }
475 }
476
477 return 0;
478}
479
480void
481taskqueue_thread_loop(void *arg)
482{
483 struct taskqueue **tqp, *tq;
484
485 tqp = arg;
486 tq = *tqp;
487 TQ_LOCK(tq);
488 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
3bb42951 489 taskqueue_run(tq, 1);
6b26ab14 490 TQ_SLEEP(tq, tq, "tqthr");
8619d09d
AH
491 }
492
493 /* rendezvous with thread that asked us to terminate */
494 tq->tq_tcount--;
8619d09d 495 TQ_UNLOCK(tq);
d60d0616 496 wakeup_one(tq->tq_threads);
8619d09d
AH
497 lwkt_exit();
498}
499
500void
501taskqueue_thread_enqueue(void *context)
502{
503 struct taskqueue **tqp, *tq;
504
505 tqp = context;
506 tq = *tqp;
507
508 wakeup_one(tq);
509}
510
b3504e03 511TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0,
1da8d52f 512 register_swi(SWI_TQ, taskqueue_swi_run, NULL, "swi_taskq", NULL, -1));
8619d09d
AH
513/*
514 * XXX: possibly use a different SWI_TQ_MP or so.
515 * related: sys/interrupt.h
516 * related: platform/XXX/isa/ipl_funcs.c
517 */
518TASKQUEUE_DEFINE(swi_mp, taskqueue_swi_enqueue, 0,
625d1167
VS
519 register_swi_mp(SWI_TQ, taskqueue_swi_mp_run, NULL, "swi_mp_taskq", NULL,
520 -1));
b3504e03 521
b3504e03 522struct taskqueue *taskqueue_thread[MAXCPU];
b3504e03
JH
523
524static void
525taskqueue_init(void)
526{
527 int cpu;
528
8619d09d
AH
529 lockinit(&taskqueue_queues_lock, "tqqueues", 0, 0);
530 STAILQ_INIT(&taskqueue_queues);
531
b3504e03
JH
532 for (cpu = 0; cpu < ncpus; cpu++) {
533 taskqueue_thread[cpu] = taskqueue_create("thread", M_INTWAIT,
6b26ab14
AH
534 taskqueue_thread_enqueue, &taskqueue_thread[cpu]);
535 taskqueue_start_threads(&taskqueue_thread[cpu], 1,
da5ac978 536 TDPRI_KERN_DAEMON, cpu, "taskq_cpu %d", cpu);
b3504e03
JH
537 }
538}
539
8619d09d 540SYSINIT(taskqueueinit, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, taskqueue_init, NULL);