kernel - usched_dfly revamp (6), reimplement shared spinlocks & misc others
[dragonfly.git] / sys / sys / spinlock2.h
1 /*
2  * Copyright (c) 2005 Jeffrey M. Hsu.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Jeffrey M. Hsu.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of The DragonFly Project nor the names of its
16  *    contributors may be used to endorse or promote products derived
17  *    from this software without specific, prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32
33 #ifndef _SYS_SPINLOCK2_H_
34 #define _SYS_SPINLOCK2_H_
35
36 #ifndef _KERNEL
37
38 #error "This file should not be included by userland programs."
39
40 #else
41
42 #ifndef _SYS_SYSTM_H_
43 #include <sys/systm.h>
44 #endif
45 #ifndef _SYS_THREAD2_H_
46 #include <sys/thread2.h>
47 #endif
48 #ifndef _SYS_GLOBALDATA_H_
49 #include <sys/globaldata.h>
50 #endif
51 #include <machine/atomic.h>
52 #include <machine/cpufunc.h>
53
54 extern struct spinlock pmap_spin;
55
56 #ifdef SMP
57
58 int spin_trylock_contested(struct spinlock *spin);
59 void spin_lock_contested(struct spinlock *spin);
60 void spin_lock_shared_contested(struct spinlock *spin);
61 void _spin_pool_lock(void *chan);
62 void _spin_pool_unlock(void *chan);
63
64 #endif
65
66 #ifdef SMP
67
68 /*
69  * Attempt to obtain an exclusive spinlock.  Returns FALSE on failure,
70  * TRUE on success.
71  */
72 static __inline boolean_t
73 spin_trylock(struct spinlock *spin)
74 {
75         globaldata_t gd = mycpu;
76
77         ++gd->gd_curthread->td_critcount;
78         cpu_ccfence();
79         ++gd->gd_spinlocks;
80         if (atomic_cmpset_int(&spin->counta, 0, 1) == 0)
81                 return (spin_trylock_contested(spin));
82 #ifdef DEBUG_LOCKS
83         int i;
84         for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
85                 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
86                         gd->gd_curthread->td_spinlock_stack_id[i] = 1;
87                         gd->gd_curthread->td_spinlock_stack[i] = spin;
88                         gd->gd_curthread->td_spinlock_caller_pc[i] =
89                                 __builtin_return_address(0);
90                         break;
91                 }
92         }
93 #endif
94         return (TRUE);
95 }
96
97 #else
98
99 static __inline boolean_t
100 spin_trylock(struct spinlock *spin)
101 {
102         globaldata_t gd = mycpu;
103
104         ++gd->gd_curthread->td_critcount;
105         cpu_ccfence();
106         ++gd->gd_spinlocks;
107         return (TRUE);
108 }
109
110 #endif
111
112 /*
113  * Return TRUE if the spinlock is held (we can't tell by whom, though)
114  */
115 static __inline int
116 spin_held(struct spinlock *spin)
117 {
118         return(spin->counta != 0);
119 }
120
121 /*
122  * Obtain an exclusive spinlock and return.
123  */
124 static __inline void
125 spin_lock_quick(globaldata_t gd, struct spinlock *spin)
126 {
127         ++gd->gd_curthread->td_critcount;
128         cpu_ccfence();
129         ++gd->gd_spinlocks;
130 #ifdef SMP
131         atomic_add_int(&spin->counta, 1);
132         if (spin->counta != 1)
133                 spin_lock_contested(spin);
134 #ifdef DEBUG_LOCKS
135         int i;
136         for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
137                 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
138                         gd->gd_curthread->td_spinlock_stack_id[i] = 1;
139                         gd->gd_curthread->td_spinlock_stack[i] = spin;
140                         gd->gd_curthread->td_spinlock_caller_pc[i] =
141                                 __builtin_return_address(0);
142                         break;
143                 }
144         }
145 #endif
146 #endif
147 }
148
149 static __inline void
150 spin_lock(struct spinlock *spin)
151 {
152         spin_lock_quick(mycpu, spin);
153 }
154
155 /*
156  * Release an exclusive spinlock.  We can just do this passively, only
157  * ensuring that our spinlock count is left intact until the mutex is
158  * cleared.
159  */
160 static __inline void
161 spin_unlock_quick(globaldata_t gd, struct spinlock *spin)
162 {
163 #ifdef SMP
164 #ifdef DEBUG_LOCKS
165         int i;
166         for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
167                 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
168                     (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
169                         gd->gd_curthread->td_spinlock_stack_id[i] = 0;
170                         gd->gd_curthread->td_spinlock_stack[i] = NULL;
171                         gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
172                         break;
173                 }
174         }
175 #endif
176         /*
177          * Don't use a locked instruction here.  To reduce latency we avoid
178          * reading spin->counta prior to writing to it.
179          */
180 #ifdef DEBUG_LOCKS
181         KKASSERT(spin->counta != 0);
182 #endif
183         cpu_sfence();
184         atomic_add_int(&spin->counta, -1);
185         cpu_sfence();
186 #endif
187 #ifdef DEBUG_LOCKS
188         KKASSERT(gd->gd_spinlocks > 0);
189 #endif
190         --gd->gd_spinlocks;
191         cpu_ccfence();
192         --gd->gd_curthread->td_critcount;
193 }
194
195 static __inline void
196 spin_unlock(struct spinlock *spin)
197 {
198         spin_unlock_quick(mycpu, spin);
199 }
200
201 /*
202  * Shared spinlocks
203  */
204 static __inline void
205 spin_lock_shared_quick(globaldata_t gd, struct spinlock *spin)
206 {
207         ++gd->gd_curthread->td_critcount;
208         cpu_ccfence();
209         ++gd->gd_spinlocks;
210 #ifdef SMP
211         atomic_add_int(&spin->counta, 1);
212         if (spin->counta == 1)
213                 atomic_set_int(&spin->counta, SPINLOCK_SHARED);
214         if ((spin->counta & SPINLOCK_SHARED) == 0)
215                 spin_lock_shared_contested(spin);
216 #ifdef DEBUG_LOCKS
217         int i;
218         for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
219                 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
220                         gd->gd_curthread->td_spinlock_stack_id[i] = 1;
221                         gd->gd_curthread->td_spinlock_stack[i] = spin;
222                         gd->gd_curthread->td_spinlock_caller_pc[i] =
223                                 __builtin_return_address(0);
224                         break;
225                 }
226         }
227 #endif
228 #endif
229 }
230
231 static __inline void
232 spin_unlock_shared_quick(globaldata_t gd, struct spinlock *spin)
233 {
234 #ifdef SMP
235 #ifdef DEBUG_LOCKS
236         int i;
237         for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
238                 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
239                     (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
240                         gd->gd_curthread->td_spinlock_stack_id[i] = 0;
241                         gd->gd_curthread->td_spinlock_stack[i] = NULL;
242                         gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
243                         break;
244                 }
245         }
246 #endif
247 #ifdef DEBUG_LOCKS
248         KKASSERT(spin->counta != 0);
249 #endif
250         cpu_sfence();
251         atomic_add_int(&spin->counta, -1);
252
253         /*
254          * Make sure SPINLOCK_SHARED is cleared.  If another cpu tries to
255          * get a shared or exclusive lock this loop will break out.  We're
256          * only talking about a very trivial edge case here.
257          */
258         while (spin->counta == SPINLOCK_SHARED) {
259                 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED, 0))
260                         break;
261         }
262         cpu_sfence();
263 #endif
264 #ifdef DEBUG_LOCKS
265         KKASSERT(gd->gd_spinlocks > 0);
266 #endif
267         --gd->gd_spinlocks;
268         cpu_ccfence();
269         --gd->gd_curthread->td_critcount;
270 }
271
272 static __inline void
273 spin_lock_shared(struct spinlock *spin)
274 {
275         spin_lock_shared_quick(mycpu, spin);
276 }
277
278 static __inline void
279 spin_unlock_shared(struct spinlock *spin)
280 {
281         spin_unlock_shared_quick(mycpu, spin);
282 }
283
284 static __inline void
285 spin_pool_lock(void *chan)
286 {
287 #ifdef SMP
288         _spin_pool_lock(chan);
289 #else
290         spin_lock(NULL);
291 #endif
292 }
293
294 static __inline void
295 spin_pool_unlock(void *chan)
296 {
297 #ifdef SMP
298         _spin_pool_unlock(chan);
299 #else
300         spin_unlock(NULL);
301 #endif
302 }
303
304 static __inline void
305 spin_init(struct spinlock *spin)
306 {
307         spin->counta = 0;
308         spin->countb = 0;
309 }
310
311 static __inline void
312 spin_uninit(struct spinlock *spin)
313 {
314         /* unused */
315 }
316
317 #endif  /* _KERNEL */
318 #endif  /* _SYS_SPINLOCK2_H_ */
319