kernel - Rewrite the callout_*() API
[dragonfly.git] / sys / dev / drm / include / linux / workqueue.h
1 /*
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2014-2019 François Tigeot <ftigeot@wolfpond.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #ifndef _LINUX_WORKQUEUE_H_
31 #define _LINUX_WORKQUEUE_H_
32
33 #include <linux/timer.h>
34 #include <linux/bitops.h>
35 #include <linux/lockdep.h>
36 #include <linux/atomic.h>
37 #include <linux/cpumask.h>
38
39 #include <sys/taskqueue.h>
40
41 struct workqueue_struct {
42         struct taskqueue        *taskqueue;
43 };
44
45 struct work_struct {
46         struct  task            work_task;
47         struct  taskqueue       *taskqueue;
48         void                    (*func)(struct work_struct *);
49 };
50
51 struct delayed_work {
52         struct work_struct      work;
53         struct callout          timer;
54 };
55
56 static inline struct delayed_work *
57 to_delayed_work(struct work_struct *work)
58 {
59
60         return container_of(work, struct delayed_work, work);
61 }
62
63
64 static inline void
65 _work_fn(void *context, int pending)
66 {
67         struct work_struct *work;
68
69         work = context;
70         work->func(work);
71 }
72
73 #define INIT_WORK(work, _func)                                          \
74 do {                                                                    \
75         (work)->func = (_func);                                         \
76         (work)->taskqueue = NULL;                                       \
77         TASK_INIT(&(work)->work_task, 0, _work_fn, (work));             \
78 } while (0)
79
80 #define INIT_WORK_ONSTACK(work, _func)  INIT_WORK(work, _func)
81
82 #define INIT_DELAYED_WORK(_work, _func)                                 \
83 do {                                                                    \
84         INIT_WORK(&(_work)->work, _func);                               \
85         callout_init_mp(&(_work)->timer);                               \
86 } while (0)
87
88 #define INIT_DEFERRABLE_WORK    INIT_DELAYED_WORK
89
90 #define schedule_work(work)                                             \
91 do {                                                                    \
92         (work)->taskqueue = taskqueue_thread[mycpuid];                          \
93         taskqueue_enqueue(taskqueue_thread[mycpuid], &(work)->work_task);       \
94 } while (0)
95
96 #define flush_scheduled_work()  flush_taskqueue(taskqueue_thread[mycpuid])
97
98 static inline int queue_work(struct workqueue_struct *q, struct work_struct *work)
99 {
100         (work)->taskqueue = (q)->taskqueue;
101         /* Return opposite val to align with Linux logic */
102         return !taskqueue_enqueue((q)->taskqueue, &(work)->work_task);
103 }
104
105 static inline void
106 _delayed_work_fn(void *arg)
107 {
108         struct delayed_work *work;
109
110         work = arg;
111         taskqueue_enqueue(work->work.taskqueue, &work->work.work_task);
112 }
113
114 static inline int
115 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work,
116     unsigned long delay)
117 {
118         int pending;
119
120         pending = work->work.work_task.ta_pending;
121         work->work.taskqueue = wq->taskqueue;
122         if (delay != 0) {
123                 callout_reset(&work->timer, delay, _delayed_work_fn, work);
124         } else {
125                 _delayed_work_fn((void *)work);
126         }
127
128         return (!pending);
129 }
130
131 static inline bool schedule_delayed_work(struct delayed_work *dwork,
132                                          unsigned long delay)
133 {
134         struct workqueue_struct wq;
135         wq.taskqueue = taskqueue_thread[mycpuid];
136         return queue_delayed_work(&wq, dwork, delay);
137 }
138
139 struct workqueue_struct * _create_workqueue_common(char *name, int cpus);
140
141 #define create_singlethread_workqueue(name)                             \
142         _create_workqueue_common(name, 1)
143
144 #define create_workqueue(name)                                          \
145         _create_workqueue_common(name, MAXCPU)
146
147 #define alloc_ordered_workqueue(name, flags)                            \
148         _create_workqueue_common(name, 1)
149
150 #define alloc_workqueue(name, flags, max_active)                        \
151         _create_workqueue_common(name, max_active)
152
153 void destroy_workqueue(struct workqueue_struct *wq);
154
155 #define flush_workqueue(wq)     flush_taskqueue((wq)->taskqueue)
156
157 static inline void
158 _flush_fn(void *context, int pending)
159 {
160 }
161
162 static inline void
163 flush_taskqueue(struct taskqueue *tq)
164 {
165         struct task flushtask;
166
167         PHOLD(curproc);
168         TASK_INIT(&flushtask, 0, _flush_fn, NULL);
169         taskqueue_enqueue(tq, &flushtask);
170         taskqueue_drain(tq, &flushtask);
171         PRELE(curproc);
172 }
173
174 static inline int
175 cancel_work_sync(struct work_struct *work)
176 {
177         if (work->taskqueue &&
178             taskqueue_cancel(work->taskqueue, &work->work_task, NULL))
179                 taskqueue_drain(work->taskqueue, &work->work_task);
180         return 0;
181 }
182
183 /*
184  * This may leave work running on another CPU as it does on Linux.
185  */
186 static inline int
187 cancel_delayed_work(struct delayed_work *work)
188 {
189
190         callout_stop(&work->timer);
191         if (work->work.taskqueue)
192                 return (taskqueue_cancel(work->work.taskqueue,
193                     &work->work.work_task, NULL) == 0);
194         return 0;
195 }
196
197 static inline int
198 cancel_delayed_work_sync(struct delayed_work *work)
199 {
200         callout_cancel(&work->timer);
201         if (work->work.taskqueue &&
202             taskqueue_cancel(work->work.taskqueue, &work->work.work_task, NULL))
203                 taskqueue_drain(work->work.taskqueue, &work->work.work_task);
204         return 0;
205 }
206
207 static inline bool
208 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
209                                       unsigned long delay)
210 {
211         cancel_delayed_work(dwork);
212         queue_delayed_work(wq, dwork, delay);
213         return false;
214 }
215
216 static inline bool
217 flush_work(struct work_struct *work)
218 {
219         if (work->taskqueue != NULL)
220                 taskqueue_drain(work->taskqueue, &work->work_task);
221         return true;
222 }
223
224 static inline void
225 destroy_work_on_stack(struct work_struct *work)
226 {
227 }
228
229 /* System-wide workqueues */
230 extern struct workqueue_struct *system_wq;
231 extern struct workqueue_struct *system_long_wq;
232 extern struct workqueue_struct *system_power_efficient_wq;
233 extern struct workqueue_struct *system_unbound_wq;
234
235 static inline unsigned int
236 work_busy(struct work_struct *work)
237 {
238         /* Just pretend nothing is busy, this function is unreliable anyway */
239         return 0;
240 }
241
242 #endif  /* _LINUX_WORKQUEUE_H_ */