2 * Copyright (c) 2020 François Tigeot <ftigeot@wolfpond.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <linux/interrupt.h>
28 #include <linux/slab.h>
30 #include <sys/kthread.h>
33 * Linux tasklet constraints:
34 * - tasklets that have the same type cannot be run on multiple processors at
36 * - tasklets always run on the processor from which they were originally
38 * - when a tasklet is scheduled, its state is set to TASKLET_STATE_SCHED,
39 * and the tasklet added to a queue
40 * - during the execution of its function, the tasklet state is set to
41 * TASKLET_STATE_RUN and the TASKLET_STATE_SCHED state is removed
44 struct tasklet_entry {
45 struct tasklet_struct *ts;
46 SLIST_ENTRY(tasklet_entry) tasklet_entries;
49 static struct lock tasklet_lock = LOCK_INITIALIZER("dltll", 0, LK_CANRECURSE);
51 static struct thread *tasklet_td = NULL;
52 SLIST_HEAD(tasklet_list_head, tasklet_entry) tlist = SLIST_HEAD_INITIALIZER(tlist);
53 SLIST_HEAD(tasklet_hi_list_head, tasklet_entry) tlist_hi = SLIST_HEAD_INITIALIZER(tlist_hi);
55 static int tasklet_pending = 0;
57 #define PROCESS_TASKLET_LIST(which_list) do { \
58 SLIST_FOREACH_MUTABLE(te, &which_list, tasklet_entries, tmp_te) { \
59 struct tasklet_struct *t = te->ts; \
62 This tasklet is dying, remove it from the list. \
63 We allow to it to run one last time if it has \
64 already been scheduled. \
66 if (test_bit(TASKLET_IS_DYING, &t->state)) { \
67 kprintf("tasklet: killing %p\n", t); \
68 SLIST_REMOVE(&which_list, te, tasklet_entry, tasklet_entries); \
72 /* This tasklet is not scheduled, try the next one */ \
73 if (!test_bit(TASKLET_STATE_SCHED, &t->state)) \
76 clear_bit(TASKLET_STATE_SCHED, &t->state); \
77 set_bit(TASKLET_STATE_RUN, &t->state); \
79 lockmgr(&tasklet_lock, LK_RELEASE); \
82 lockmgr(&tasklet_lock, LK_EXCLUSIVE); \
84 clear_bit(TASKLET_STATE_RUN, &t->state); \
88 /* XXX runners should be CPU-specific */
90 tasklet_runner(void *arg)
92 struct tasklet_entry *te, *tmp_te;
94 lockmgr(&tasklet_lock, LK_EXCLUSIVE);
97 Only sleep if we haven't been raced by a _schedule()
98 call during an unlock window
100 if (tasklet_pending == 0) {
101 lksleep(&tasklet_runner, &tasklet_lock, 0, "tkidle", 0);
105 /* Process hi tasklets first */
106 PROCESS_TASKLET_LIST(tlist_hi);
107 PROCESS_TASKLET_LIST(tlist);
109 lockmgr(&tasklet_lock, LK_RELEASE);
113 tasklet_init(struct tasklet_struct *t,
114 void (*func)(unsigned long), unsigned long data)
122 tasklet_schedule(struct tasklet_struct *t)
124 struct tasklet_entry *te;
126 lockmgr(&tasklet_lock, LK_EXCLUSIVE);
127 set_bit(TASKLET_STATE_SCHED, &t->state);
129 SLIST_FOREACH(te, &tlist, tasklet_entries) {
134 te = kzalloc(sizeof(struct tasklet_entry), M_WAITOK);
136 SLIST_INSERT_HEAD(&tlist, te, tasklet_entries);
139 /* schedule the runner thread on the local cpu core */
141 wakeup(&tasklet_runner);
142 lockmgr(&tasklet_lock, LK_RELEASE);
146 tasklet_hi_schedule(struct tasklet_struct *t)
148 struct tasklet_entry *te;
150 lockmgr(&tasklet_lock, LK_EXCLUSIVE);
151 set_bit(TASKLET_STATE_SCHED, &t->state);
153 SLIST_FOREACH(te, &tlist_hi, tasklet_entries) {
158 te = kzalloc(sizeof(struct tasklet_entry), M_WAITOK);
160 SLIST_INSERT_HEAD(&tlist_hi, te, tasklet_entries);
163 /* schedule the runner thread on the local cpu core */
165 wakeup(&tasklet_runner);
166 lockmgr(&tasklet_lock, LK_RELEASE);
170 tasklet_kill(struct tasklet_struct *t)
172 set_bit(TASKLET_IS_DYING, &t->state);
173 wakeup(&tasklet_runner);
176 static int init_tasklets(void *arg)
178 kthread_create(tasklet_runner, NULL, &tasklet_td, "tasklet_runner");
183 SYSINIT(linux_tasklet_init, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, init_tasklets, NULL);