MPSAFE - mutex - better exclusive lock sequencer, bug fixes, abort
[dragonfly.git] / sys / sys / mutex2.h
CommitLineData
33b0b87c
MD
1/*
2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#ifndef _SYS_MUTEX2_H_
36#define _SYS_MUTEX2_H_
37
38#ifndef _SYS_MUTEX_H_
39#include <sys/mutex.h>
40#endif
41#ifndef _MACHINE_ATOMIC_H_
42#include <machine/atomic.h>
43#endif
44
45/*
46 * Initialize a new mutex, placing it in an unlocked state with no refs.
47 */
48static __inline void
49mtx_init(mtx_t mtx)
50{
51 mtx->mtx_lock = 0;
52 mtx->mtx_refs = 0;
53 mtx->mtx_owner = NULL;
685ebdab
MD
54 mtx->mtx_link = NULL;
55}
56
57static __inline void
58mtx_link_init(mtx_link_t link)
59{
60 link->state = MTX_LINK_IDLE;
33b0b87c
MD
61}
62
63/*
64 * Deinitialize a mutex
65 */
66static __inline void
67mtx_uninit(mtx_t mtx)
68{
69 /* empty */
70}
71
72/*
685ebdab
MD
73 * Exclusive-lock a mutex, block until acquired or aborted. Recursion
74 * is allowed.
75 *
76 * This version of the function allows the mtx_link to be passed in, thus
77 * giving the caller visibility for the link structure which is required
78 * when calling mtx_abort_ex_link().
79 *
80 * The mutex may be aborted at any time while the passed link structure
81 * is valid.
82 */
83static __inline int
84mtx_lock_ex_link(mtx_t mtx, struct mtx_link *link,
85 const char *ident, int flags, int to)
86{
87 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
88 return(_mtx_lock_ex_link(mtx, link, ident, flags, to));
89 mtx->mtx_owner = curthread;
90 return(0);
91}
92
93/*
33b0b87c 94 * Exclusive-lock a mutex, block until acquired. Recursion is allowed.
7355baa5
MD
95 *
96 * Returns 0 on success, or the tsleep() return code on failure.
97 * An error can only be returned if PCATCH is specified in the flags.
33b0b87c 98 */
7355baa5
MD
99static __inline int
100mtx_lock_ex(mtx_t mtx, const char *ident, int flags, int to)
101{
102 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
103 return(_mtx_lock_ex(mtx, ident, flags, to));
104 mtx->mtx_owner = curthread;
105 return(0);
106}
107
108static __inline int
109mtx_lock_ex_quick(mtx_t mtx, const char *ident)
33b0b87c
MD
110{
111 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
7355baa5 112 return(_mtx_lock_ex_quick(mtx, ident));
33b0b87c 113 mtx->mtx_owner = curthread;
7355baa5 114 return(0);
33b0b87c
MD
115}
116
117/*
118 * Share-lock a mutex, block until acquired. Recursion is allowed.
7355baa5
MD
119 *
120 * Returns 0 on success, or the tsleep() return code on failure.
121 * An error can only be returned if PCATCH is specified in the flags.
33b0b87c 122 */
7355baa5
MD
123static __inline int
124mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
125{
126 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
127 return(_mtx_lock_sh(mtx, ident, flags, to));
128 return(0);
129}
130
131static __inline int
132mtx_lock_sh_quick(mtx_t mtx, const char *ident)
33b0b87c
MD
133{
134 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
7355baa5
MD
135 return(_mtx_lock_sh_quick(mtx, ident));
136 return(0);
33b0b87c
MD
137}
138
139/*
140 * Exclusive-lock a mutex, spin until acquired. Recursion is allowed.
141 */
142static __inline void
143mtx_spinlock_ex(mtx_t mtx)
144{
145 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
146 _mtx_spinlock_ex(mtx);
147}
148
149/*
150 * Share-lock a mutex, spin until acquired. Recursion is allowed.
151 */
152static __inline void
153mtx_spinlock_sh(mtx_t mtx)
154{
155 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
156 _mtx_spinlock_sh(mtx);
157}
158
159/*
160 * Attempt to exclusive-lock a mutex, return 0 on success and
161 * EAGAIN on failure.
162 */
163static __inline int
164mtx_lock_ex_try(mtx_t mtx)
165{
166 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
167 return (_mtx_lock_ex_try(mtx));
168 mtx->mtx_owner = curthread;
169 return (0);
170}
171
172/*
173 * Attempt to share-lock a mutex, return 0 on success and
174 * EAGAIN on failure.
175 */
176static __inline int
177mtx_lock_sh_try(mtx_t mtx)
178{
179 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
180 return (_mtx_lock_sh_try(mtx));
181 return (0);
182}
183
184/*
185 * If the lock is held exclusively it must be owned by the caller. If the
186 * lock is already a shared lock this operation is a NOP. A panic will
187 * occur if the lock is not held either shared or exclusive.
188 *
189 * The exclusive count is converted to a shared count.
190 */
191static __inline void
192mtx_downgrade(mtx_t mtx)
193{
194 mtx->mtx_owner = NULL;
195 if (atomic_cmpset_int(&mtx->mtx_lock, MTX_EXCLUSIVE | 1, 0) == 0)
196 _mtx_downgrade(mtx);
197}
198
199/*
200 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if
201 * the shared lock has a count other then 1. Optimize the most likely case
202 * but note that a single cmpset can fail due to WANTED races.
203 *
204 * If the lock is held exclusively it must be owned by the caller and
205 * this function will simply return without doing anything. A panic will
206 * occur if the lock is held exclusively by someone other then the caller.
207 *
208 * Returns 0 on success, EDEADLK on failure.
209 */
210static __inline int
211mtx_upgrade_try(mtx_t mtx)
212{
213 if (atomic_cmpset_int(&mtx->mtx_lock, 1, MTX_EXCLUSIVE | 1))
214 return(0);
215 return (_mtx_upgrade_try(mtx));
216}
217
218/*
219 * Optimized unlock cases.
220 */
221static __inline void
222mtx_unlock(mtx_t mtx)
223{
224 u_int lock = mtx->mtx_lock;
225
226 if (lock == (MTX_EXCLUSIVE | 1)) {
227 mtx->mtx_owner = NULL;
228 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
229 _mtx_unlock(mtx);
230 } else if (lock == 1) {
231 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
232 _mtx_unlock(mtx);
233 } else {
234 _mtx_unlock(mtx);
235 }
236}
237
238static __inline void
239mtx_unlock_ex(mtx_t mtx)
240{
241 u_int lock = mtx->mtx_lock;
242
243 if (lock == (MTX_EXCLUSIVE | 1)) {
244 mtx->mtx_owner = NULL;
245 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
246 _mtx_unlock(mtx);
247 } else {
248 _mtx_unlock(mtx);
249 }
250}
251
252static __inline void
253mtx_unlock_sh(mtx_t mtx)
254{
255 if (atomic_cmpset_int(&mtx->mtx_lock, 1, 0) == 0)
256 _mtx_unlock(mtx);
257}
258
259/*
17386740
MD
260 * Return TRUE (non-zero) if the mutex is locked shared or exclusive by
261 * anyone, including the owner.
262 */
263static __inline int
264mtx_islocked(mtx_t mtx)
265{
266 return(mtx->mtx_lock != 0);
267}
268
269/*
270 * Return TRUE (non-zero) if the mutex is locked exclusively by anyone,
271 * including the owner.
272 *
273 * The mutex may in an unlocked or shared lock state.
274 */
275static __inline int
276mtx_islocked_ex(mtx_t mtx)
277{
278 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
279}
280
281/*
282 * Return TRUE (non-zero) if the mutex is not locked.
283 */
284static __inline int
285mtx_notlocked(mtx_t mtx)
286{
287 return(mtx->mtx_lock == 0);
288}
289
290/*
291 * Return TRUE (non-zero) if the mutex is not locked exclusively.
292 * The mutex may in an unlocked or shared lock state.
293 */
294static __inline int
295mtx_notlocked_ex(mtx_t mtx)
296{
297 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
298}
299
300/*
301 * Return TRUE (non-zero) if the mutex is exclusively locked by
302 * the caller.
303 */
304static __inline int
305mtx_owned(mtx_t mtx)
306{
307 return((mtx->mtx_lock & MTX_EXCLUSIVE) && mtx->mtx_owner == curthread);
308}
309
310/*
311 * Return TRUE (non-zero) if the mutex is not exclusively locked by
312 * the caller.
313 */
314static __inline int
315mtx_notowned(mtx_t mtx)
316{
317 return((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 ||
318 mtx->mtx_owner != curthread);
319}
320
321/*
322 * Return the shared or exclusive lock count. A return value of 0
323 * indicate that the mutex is not locked.
324 *
325 * NOTE: If the mutex is held exclusively by someone other then the
326 * caller the lock count for the other owner is still returned.
327 */
328static __inline int
329mtx_lockrefs(mtx_t mtx)
330{
331 return(mtx->mtx_lock & MTX_MASK);
332}
333
334/*
33b0b87c
MD
335 * Bump the lock's ref count. This field is independent of the lock.
336 */
337static __inline void
338mtx_hold(mtx_t mtx)
339{
340 atomic_add_acq_int(&mtx->mtx_refs, 1);
341}
342
343/*
344 * Drop the lock's ref count. This field is independent of the lock.
345 *
346 * Returns the previous ref count, interlocked so testing against
347 * 1 means you won the 1->0 transition
348 */
349static __inline int
350mtx_drop(mtx_t mtx)
351{
352 return (atomic_fetchadd_int(&mtx->mtx_refs, -1));
353}
354
355#endif