drm/i915: Update to Linux 3.16
[dragonfly.git] / sys / dev / drm / include / linux / workqueue.h
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2014 François Tigeot
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #ifndef _LINUX_WORKQUEUE_H_
31 #define _LINUX_WORKQUEUE_H_
32
33 #include <sys/types.h>
34 #include <sys/malloc.h>
35 #include <sys/proc.h>
36 #include <linux/types.h>
37 #include <linux/kernel.h>
38 #include <linux/timer.h>
39
40 #include <sys/taskqueue.h>
41
42 struct workqueue_struct {
43         struct taskqueue        *taskqueue;
44 };
45
46 struct work_struct {
47         struct  task            work_task;
48         struct  taskqueue       *taskqueue;
49         void                    (*fn)(struct work_struct *);
50 };
51
52 struct delayed_work {
53         struct work_struct      work;
54         struct callout          timer;
55         struct lwkt_token       token;
56 };
57
58 static inline struct delayed_work *
59 to_delayed_work(struct work_struct *work)
60 {
61
62         return container_of(work, struct delayed_work, work);
63 }
64
65
66 static inline void
67 _work_fn(void *context, int pending)
68 {
69         struct work_struct *work;
70
71         work = context;
72         work->fn(work);
73 }
74
75 #define INIT_WORK(work, func)                                           \
76 do {                                                                    \
77         (work)->fn = (func);                                            \
78         (work)->taskqueue = NULL;                                       \
79         TASK_INIT(&(work)->work_task, 0, _work_fn, (work));             \
80 } while (0)
81
82 #define INIT_DELAYED_WORK(_work, func)                                  \
83 do {                                                                    \
84         INIT_WORK(&(_work)->work, func);                                \
85         lwkt_token_init(&(_work)->token, "workqueue token");            \
86         callout_init_mp(&(_work)->timer);                               \
87 } while (0)
88
89 #define INIT_DEFERRABLE_WORK    INIT_DELAYED_WORK
90
91 #define schedule_work(work)                                             \
92 do {                                                                    \
93         (work)->taskqueue = taskqueue_thread[mycpuid];                          \
94         taskqueue_enqueue(taskqueue_thread[mycpuid], &(work)->work_task);       \
95 } while (0)
96
97 #define flush_scheduled_work()  flush_taskqueue(taskqueue_thread[mycpuid])
98
99 static inline int queue_work(struct workqueue_struct *q, struct work_struct *work)
100 {
101         (work)->taskqueue = (q)->taskqueue;
102         /* Return opposite val to align with Linux logic */
103         return !taskqueue_enqueue((q)->taskqueue, &(work)->work_task);
104 }
105
106 static inline void
107 _delayed_work_fn(void *arg)
108 {
109         struct delayed_work *work;
110
111         work = arg;
112         taskqueue_enqueue(work->work.taskqueue, &work->work.work_task);
113 }
114
115 static inline int
116 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work,
117     unsigned long delay)
118 {
119         int pending;
120
121         pending = work->work.work_task.ta_pending;
122         work->work.taskqueue = wq->taskqueue;
123         if (delay != 0) {
124                 lwkt_gettoken(&work->token);
125                 callout_reset(&work->timer, delay, _delayed_work_fn, work);
126                 lwkt_reltoken(&work->token);
127         } else {
128                 _delayed_work_fn((void *)work);
129         }
130
131         return (!pending);
132 }
133
134 static inline bool schedule_delayed_work(struct delayed_work *dwork,
135                                          unsigned long delay)
136 {
137         struct workqueue_struct wq;
138         wq.taskqueue = taskqueue_thread[mycpuid];
139         return queue_delayed_work(&wq, dwork, delay);
140 }
141
142 static inline struct workqueue_struct *
143 _create_workqueue_common(char *name, int cpus)
144 {
145         struct workqueue_struct *wq;
146
147         wq = kmalloc(sizeof(*wq), M_DRM, M_WAITOK);
148         wq->taskqueue = taskqueue_create((name), M_WAITOK,
149             taskqueue_thread_enqueue,  &wq->taskqueue);
150         taskqueue_start_threads(&wq->taskqueue, cpus, 0, -1, "%s", name);
151
152         return (wq);
153 }
154
155
156 #define create_singlethread_workqueue(name)                             \
157         _create_workqueue_common(name, 1)
158
159 #define create_workqueue(name)                                          \
160         _create_workqueue_common(name, MAXCPU)
161
162 #define alloc_ordered_workqueue(name, flags)                            \
163         _create_workqueue_common(name, 1)
164
165 static inline void
166 destroy_workqueue(struct workqueue_struct *wq)
167 {
168         taskqueue_free(wq->taskqueue);
169         kfree(wq);
170 }
171
172 #define flush_workqueue(wq)     flush_taskqueue((wq)->taskqueue)
173
174 static inline void
175 _flush_fn(void *context, int pending)
176 {
177 }
178
179 static inline void
180 flush_taskqueue(struct taskqueue *tq)
181 {
182         struct task flushtask;
183
184         PHOLD(curproc);
185         TASK_INIT(&flushtask, 0, _flush_fn, NULL);
186         taskqueue_enqueue(tq, &flushtask);
187         taskqueue_drain(tq, &flushtask);
188         PRELE(curproc);
189 }
190
191 static inline int
192 cancel_work_sync(struct work_struct *work)
193 {
194         if (work->taskqueue &&
195             taskqueue_cancel(work->taskqueue, &work->work_task, NULL))
196                 taskqueue_drain(work->taskqueue, &work->work_task);
197         return 0;
198 }
199
200 /*
201  * This may leave work running on another CPU as it does on Linux.
202  */
203 static inline int
204 cancel_delayed_work(struct delayed_work *work)
205 {
206
207         lwkt_gettoken(&work->token);
208         callout_stop(&work->timer);
209         lwkt_reltoken(&work->token);
210         if (work->work.taskqueue)
211                 return (taskqueue_cancel(work->work.taskqueue,
212                     &work->work.work_task, NULL) == 0);
213         return 0;
214 }
215
216 static inline int
217 cancel_delayed_work_sync(struct delayed_work *work)
218 {
219
220         lwkt_gettoken(&work->token);
221         callout_drain(&work->timer);
222         lwkt_reltoken(&work->token);
223         if (work->work.taskqueue &&
224             taskqueue_cancel(work->work.taskqueue, &work->work.work_task, NULL))
225                 taskqueue_drain(work->work.taskqueue, &work->work.work_task);
226         return 0;
227 }
228
229 static inline bool
230 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
231                                       unsigned long delay)
232 {
233         cancel_delayed_work(dwork);
234         queue_delayed_work(wq, dwork, delay);
235         return false;
236 }
237
238 #endif  /* _LINUX_WORKQUEUE_H_ */