Disallow writes to filesystems mounted read-only via NULLFS. In this case
[dragonfly.git] / sys / kern / subr_taskqueue.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
62ade751 26 * $FreeBSD: src/sys/kern/subr_taskqueue.c,v 1.1.2.3 2003/09/10 00:40:39 ken Exp $
efda3bd0 27 * $DragonFly: src/sys/kern/subr_taskqueue.c,v 1.10 2006/09/05 00:55:45 dillon Exp $
984263bc
MD
28 */
29
30#include <sys/param.h>
31#include <sys/queue.h>
32#include <sys/systm.h>
33#include <sys/kernel.h>
34#include <sys/taskqueue.h>
35#include <sys/interrupt.h>
b3504e03 36#include <sys/lock.h>
984263bc 37#include <sys/malloc.h>
62ade751 38#include <sys/kthread.h>
e43a034f 39#include <sys/thread2.h>
62ade751 40
984263bc
MD
41#include <machine/ipl.h>
42
43MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
44
45static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
46
47struct taskqueue {
48 STAILQ_ENTRY(taskqueue) tq_link;
49 STAILQ_HEAD(, task) tq_queue;
50 const char *tq_name;
51 taskqueue_enqueue_fn tq_enqueue;
52 void *tq_context;
53 int tq_draining;
54};
55
56struct taskqueue *
57taskqueue_create(const char *name, int mflags,
58 taskqueue_enqueue_fn enqueue, void *context)
59{
60 struct taskqueue *queue;
61 static int once = 1;
984263bc 62
efda3bd0 63 queue = kmalloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags);
984263bc
MD
64 if (!queue)
65 return 0;
66 STAILQ_INIT(&queue->tq_queue);
67 queue->tq_name = name;
68 queue->tq_enqueue = enqueue;
69 queue->tq_context = context;
70 queue->tq_draining = 0;
71
e43a034f 72 crit_enter();
984263bc
MD
73 if (once) {
74 STAILQ_INIT(&taskqueue_queues);
75 once = 0;
76 }
77 STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link);
e43a034f 78 crit_exit();
984263bc
MD
79
80 return queue;
81}
82
83void
84taskqueue_free(struct taskqueue *queue)
85{
e43a034f 86 crit_enter();
984263bc 87 queue->tq_draining = 1;
e43a034f 88 crit_exit();
984263bc
MD
89
90 taskqueue_run(queue);
91
e43a034f 92 crit_enter();
984263bc 93 STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
e43a034f 94 crit_exit();
984263bc 95
efda3bd0 96 kfree(queue, M_TASKQUEUE);
984263bc
MD
97}
98
99struct taskqueue *
100taskqueue_find(const char *name)
101{
102 struct taskqueue *queue;
984263bc 103
e43a034f
MD
104 crit_enter();
105 STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) {
984263bc 106 if (!strcmp(queue->tq_name, name)) {
e43a034f 107 crit_exit();
984263bc
MD
108 return queue;
109 }
e43a034f
MD
110 }
111 crit_exit();
984263bc
MD
112 return 0;
113}
114
115int
116taskqueue_enqueue(struct taskqueue *queue, struct task *task)
117{
118 struct task *ins;
119 struct task *prev;
120
e43a034f 121 crit_enter();
984263bc
MD
122
123 /*
124 * Don't allow new tasks on a queue which is being freed.
125 */
126 if (queue->tq_draining) {
e43a034f 127 crit_exit();
984263bc
MD
128 return EPIPE;
129 }
130
131 /*
132 * Count multiple enqueues.
133 */
134 if (task->ta_pending) {
135 task->ta_pending++;
e43a034f 136 crit_exit();
984263bc
MD
137 return 0;
138 }
139
140 /*
141 * Optimise the case when all tasks have the same priority.
142 */
143 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
144 if (!prev || prev->ta_priority >= task->ta_priority) {
145 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
146 } else {
147 prev = 0;
148 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
149 prev = ins, ins = STAILQ_NEXT(ins, ta_link))
150 if (ins->ta_priority < task->ta_priority)
151 break;
152
153 if (prev)
154 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
155 else
156 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
157 }
158
159 task->ta_pending = 1;
160 if (queue->tq_enqueue)
161 queue->tq_enqueue(queue->tq_context);
162
e43a034f 163 crit_exit();
984263bc
MD
164
165 return 0;
166}
167
168void
169taskqueue_run(struct taskqueue *queue)
170{
984263bc
MD
171 struct task *task;
172 int pending;
173
e43a034f 174 crit_enter();
984263bc
MD
175 while (STAILQ_FIRST(&queue->tq_queue)) {
176 /*
177 * Carefully remove the first task from the queue and
178 * zero its pending count.
179 */
180 task = STAILQ_FIRST(&queue->tq_queue);
181 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
182 pending = task->ta_pending;
183 task->ta_pending = 0;
e43a034f 184 crit_exit();
984263bc
MD
185
186 task->ta_func(task->ta_context, pending);
187
e43a034f 188 crit_enter();
984263bc 189 }
e43a034f 190 crit_exit();
984263bc
MD
191}
192
193static void
194taskqueue_swi_enqueue(void *context)
195{
196 setsofttq();
197}
198
199static void
477d3c1c 200taskqueue_swi_run(void *arg, void *frame)
984263bc
MD
201{
202 taskqueue_run(taskqueue_swi);
203}
204
b3504e03 205TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0,
477d3c1c 206 register_swi(SWI_TQ, taskqueue_swi_run, NULL, "swi_taskq", NULL));
b3504e03 207
62ade751
MD
208static void
209taskqueue_kthread(void *arg)
210{
62ade751 211 for (;;) {
b3504e03 212 taskqueue_run(taskqueue_thread[mycpuid]);
e43a034f 213 crit_enter();
b3504e03
JH
214 if (STAILQ_EMPTY(&taskqueue_thread[mycpuid]->tq_queue))
215 tsleep(taskqueue_thread[mycpuid], 0, "tqthr", 0);
e43a034f 216 crit_exit();
62ade751
MD
217 }
218}
219
220static void
221taskqueue_thread_enqueue(void *context)
222{
b3504e03 223 wakeup(taskqueue_thread[mycpuid]);
62ade751
MD
224}
225
b3504e03
JH
226struct taskqueue *taskqueue_thread[MAXCPU];
227static struct thread *taskqueue_thread_td[MAXCPU];
228
229static void
230taskqueue_init(void)
231{
232 int cpu;
233
234 for (cpu = 0; cpu < ncpus; cpu++) {
235 taskqueue_thread[cpu] = taskqueue_create("thread", M_INTWAIT,
236 taskqueue_thread_enqueue, NULL);
237 kthread_create(taskqueue_kthread, NULL,
238 &taskqueue_thread_td[cpu], "taskqueue");
239 }
240}
241
242SYSINIT(taskqueueinit, SI_SUB_CONFIGURE, SI_ORDER_SECOND, taskqueue_init, NULL);