Merge branch 'vendor/DHCPCD'
[dragonfly.git] / sys / dev / drm / linux_tasklet.c
1 /*
2  * Copyright (c) 2020 François Tigeot <ftigeot@wolfpond.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 #include <linux/interrupt.h>
28 #include <linux/slab.h>
29
30 #include <sys/kthread.h>
31
32 /*
33  * Linux tasklet constraints:
34  * - tasklets that have the same type cannot be run on multiple processors at
35  *   the same time
36  * - tasklets always run on the processor from which they were originally
37  *   submitted
38  * - when a tasklet is scheduled, its state is set to TASKLET_STATE_SCHED,
39  *   and the tasklet added to a queue
40  * - during the execution of its function, the tasklet state is set to
41  *   TASKLET_STATE_RUN and the TASKLET_STATE_SCHED state is removed
42  */
43
44 struct tasklet_entry {
45         struct tasklet_struct *ts;
46         SLIST_ENTRY(tasklet_entry) tasklet_entries;
47 };
48
49 static struct lock tasklet_lock = LOCK_INITIALIZER("dltll", 0, LK_CANRECURSE);
50
51 static struct thread *tasklet_td = NULL;
52 SLIST_HEAD(tasklet_list_head, tasklet_entry) tlist = SLIST_HEAD_INITIALIZER(tlist);
53 SLIST_HEAD(tasklet_hi_list_head, tasklet_entry) tlist_hi = SLIST_HEAD_INITIALIZER(tlist_hi);
54
55 static int tasklet_pending = 0;
56
57 #define PROCESS_TASKLET_LIST(which_list) do { \
58         SLIST_FOREACH_MUTABLE(te, &which_list, tasklet_entries, tmp_te) { \
59                 struct tasklet_struct *t = te->ts;                      \
60                                                                         \
61                 /*                                                      \
62                    This tasklet is dying, remove it from the list.      \
63                    We allow to it to run one last time if it has        \
64                    already been scheduled.                              \
65                 */                                                      \
66                 if (test_bit(TASKLET_IS_DYING, &t->state)) {            \
67                         kprintf("tasklet: killing %p\n", t);            \
68                         SLIST_REMOVE(&which_list, te, tasklet_entry, tasklet_entries); \
69                         kfree(te);                                      \
70                 }                                                       \
71                                                                         \
72                 /* This tasklet is not scheduled, try the next one */   \
73                 if (!test_bit(TASKLET_STATE_SCHED, &t->state))          \
74                         continue;                                       \
75                                                                         \
76                 clear_bit(TASKLET_STATE_SCHED, &t->state);              \
77                 set_bit(TASKLET_STATE_RUN, &t->state);                  \
78                                                                         \
79                 lockmgr(&tasklet_lock, LK_RELEASE);                     \
80                 if (t->func)                                            \
81                         t->func(t->data);                               \
82                 lockmgr(&tasklet_lock, LK_EXCLUSIVE);                   \
83                                                                         \
84                 clear_bit(TASKLET_STATE_RUN, &t->state);                \
85         }                                                               \
86 } while (0)
87
88 /* XXX runners should be CPU-specific */
89 static void
90 tasklet_runner(void *arg)
91 {
92         struct tasklet_entry *te, *tmp_te;
93
94         lockmgr(&tasklet_lock, LK_EXCLUSIVE);
95         while (1) {
96                 /*
97                    Only sleep if we haven't been raced by a _schedule()
98                    call during an unlock window
99                 */
100                 if (tasklet_pending == 0) {
101                         lksleep(&tasklet_runner, &tasklet_lock, 0, "tkidle", 0);
102                 }
103                 tasklet_pending = 0;
104
105                 /* Process hi tasklets first */
106                 PROCESS_TASKLET_LIST(tlist_hi);
107                 PROCESS_TASKLET_LIST(tlist);
108         }
109         lockmgr(&tasklet_lock, LK_RELEASE);
110 }
111
112 void
113 tasklet_init(struct tasklet_struct *t,
114              void (*func)(unsigned long), unsigned long data)
115 {
116         t->state = 0;
117         t->func = func;
118         t->data = data;
119 }
120
121 void
122 tasklet_schedule(struct tasklet_struct *t)
123 {
124         struct tasklet_entry *te;
125
126         lockmgr(&tasklet_lock, LK_EXCLUSIVE);
127         set_bit(TASKLET_STATE_SCHED, &t->state);
128
129         SLIST_FOREACH(te, &tlist, tasklet_entries) {
130                 if (te->ts == t)
131                         goto found_and_done;
132         }
133
134         te = kzalloc(sizeof(struct tasklet_entry), M_WAITOK);
135         te->ts = t;
136         SLIST_INSERT_HEAD(&tlist, te, tasklet_entries);
137
138 found_and_done:
139         /* schedule the runner thread on the local cpu core */
140         tasklet_pending = 1;
141         wakeup(&tasklet_runner);
142         lockmgr(&tasklet_lock, LK_RELEASE);
143 }
144
145 void
146 tasklet_hi_schedule(struct tasklet_struct *t)
147 {
148         struct tasklet_entry *te;
149
150         lockmgr(&tasklet_lock, LK_EXCLUSIVE);
151         set_bit(TASKLET_STATE_SCHED, &t->state);
152
153         SLIST_FOREACH(te, &tlist_hi, tasklet_entries) {
154                 if (te->ts == t)
155                         goto found_and_done;
156         }
157
158         te = kzalloc(sizeof(struct tasklet_entry), M_WAITOK);
159         te->ts = t;
160         SLIST_INSERT_HEAD(&tlist_hi, te, tasklet_entries);
161
162 found_and_done:
163         /* schedule the runner thread on the local cpu core */
164         tasklet_pending = 1;
165         wakeup(&tasklet_runner);
166         lockmgr(&tasklet_lock, LK_RELEASE);
167 }
168
169 void
170 tasklet_kill(struct tasklet_struct *t)
171 {
172         set_bit(TASKLET_IS_DYING, &t->state);
173         wakeup(&tasklet_runner);
174 }
175
176 static int init_tasklets(void *arg)
177 {
178         kthread_create(tasklet_runner, NULL, &tasklet_td, "tasklet_runner");
179
180         return 0;
181 }
182
183 SYSINIT(linux_tasklet_init, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, init_tasklets, NULL);