kernel - revamp mtx_spinlock()
[dragonfly.git] / sys / sys / mutex2.h
CommitLineData
33b0b87c
MD
1/*
2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#ifndef _SYS_MUTEX2_H_
36#define _SYS_MUTEX2_H_
37
38#ifndef _SYS_MUTEX_H_
39#include <sys/mutex.h>
40#endif
c40689e9
MD
41#ifndef _SYS_THREAD2_H_
42#include <sys/thread2.h>
43#endif
44#ifndef _SYS_GLOBALDATA_H_
45#include <sys/globaldata.h>
46#endif
33b0b87c
MD
47#ifndef _MACHINE_ATOMIC_H_
48#include <machine/atomic.h>
49#endif
50
51/*
52 * Initialize a new mutex, placing it in an unlocked state with no refs.
53 */
54static __inline void
55mtx_init(mtx_t mtx)
56{
57 mtx->mtx_lock = 0;
58 mtx->mtx_refs = 0;
59 mtx->mtx_owner = NULL;
685ebdab
MD
60 mtx->mtx_link = NULL;
61}
62
63static __inline void
64mtx_link_init(mtx_link_t link)
65{
66 link->state = MTX_LINK_IDLE;
33b0b87c
MD
67}
68
69/*
70 * Deinitialize a mutex
71 */
72static __inline void
73mtx_uninit(mtx_t mtx)
74{
75 /* empty */
76}
77
78/*
685ebdab
MD
79 * Exclusive-lock a mutex, block until acquired or aborted. Recursion
80 * is allowed.
81 *
82 * This version of the function allows the mtx_link to be passed in, thus
83 * giving the caller visibility for the link structure which is required
84 * when calling mtx_abort_ex_link().
85 *
86 * The mutex may be aborted at any time while the passed link structure
87 * is valid.
88 */
89static __inline int
90mtx_lock_ex_link(mtx_t mtx, struct mtx_link *link,
91 const char *ident, int flags, int to)
92{
93 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
94 return(_mtx_lock_ex_link(mtx, link, ident, flags, to));
95 mtx->mtx_owner = curthread;
96 return(0);
97}
98
99/*
57f5048a
MD
100 * Short-form exclusive-lock a mutex, block until acquired. Recursion is
101 * allowed. This is equivalent to mtx_lock_ex(mtx, "mtxex", 0, 0).
102 */
103static __inline void
104mtx_lock(mtx_t mtx)
105{
106 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) {
107 _mtx_lock_ex(mtx, "mtxex", 0, 0);
108 return;
109 }
110 mtx->mtx_owner = curthread;
111}
112
113/*
33b0b87c 114 * Exclusive-lock a mutex, block until acquired. Recursion is allowed.
7355baa5
MD
115 *
116 * Returns 0 on success, or the tsleep() return code on failure.
117 * An error can only be returned if PCATCH is specified in the flags.
33b0b87c 118 */
7355baa5
MD
119static __inline int
120mtx_lock_ex(mtx_t mtx, const char *ident, int flags, int to)
121{
122 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
123 return(_mtx_lock_ex(mtx, ident, flags, to));
124 mtx->mtx_owner = curthread;
125 return(0);
126}
127
128static __inline int
129mtx_lock_ex_quick(mtx_t mtx, const char *ident)
33b0b87c
MD
130{
131 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
7355baa5 132 return(_mtx_lock_ex_quick(mtx, ident));
33b0b87c 133 mtx->mtx_owner = curthread;
7355baa5 134 return(0);
33b0b87c
MD
135}
136
137/*
138 * Share-lock a mutex, block until acquired. Recursion is allowed.
7355baa5
MD
139 *
140 * Returns 0 on success, or the tsleep() return code on failure.
141 * An error can only be returned if PCATCH is specified in the flags.
33b0b87c 142 */
7355baa5
MD
143static __inline int
144mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
145{
146 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
147 return(_mtx_lock_sh(mtx, ident, flags, to));
148 return(0);
149}
150
151static __inline int
152mtx_lock_sh_quick(mtx_t mtx, const char *ident)
33b0b87c
MD
153{
154 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
7355baa5
MD
155 return(_mtx_lock_sh_quick(mtx, ident));
156 return(0);
33b0b87c
MD
157}
158
159/*
c40689e9
MD
160 * Short-form exclusive spinlock a mutex. Must be paired with
161 * mtx_spinunlock().
57f5048a
MD
162 */
163static __inline void
164mtx_spinlock(mtx_t mtx)
165{
c40689e9 166 globaldata_t gd = mycpu;
57f5048a 167
c40689e9
MD
168 /*
169 * Predispose a hard critical section
170 */
171 ++gd->gd_curthread->td_critcount;
172 cpu_ccfence();
173 ++gd->gd_spinlocks_wr;
174
175 /*
176 * If we cannot get it trivially get it the hard way.
177 *
178 * Note that mtx_owner will be set twice if we fail to get it
179 * trivially, but there's no point conditionalizing it as a
180 * conditional will be slower.
181 */
33b0b87c 182 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
c40689e9
MD
183 _mtx_spinlock(mtx);
184 mtx->mtx_owner = gd->gd_curthread;
33b0b87c
MD
185}
186
c40689e9
MD
187static __inline int
188mtx_spinlock_try(mtx_t mtx)
33b0b87c 189{
c40689e9
MD
190 globaldata_t gd = mycpu;
191
192 /*
193 * Predispose a hard critical section
194 */
195 ++gd->gd_curthread->td_critcount;
196 cpu_ccfence();
197 ++gd->gd_spinlocks_wr;
198
199 /*
200 * If we cannot get it trivially call _mtx_spinlock_try(). This
201 * function will clean up the hard critical section if it fails.
202 */
203 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
204 return(_mtx_spinlock_try(mtx));
205 mtx->mtx_owner = gd->gd_curthread;
206 return (0);
33b0b87c
MD
207}
208
209/*
c40689e9
MD
210 * Short-form exclusive-lock a mutex, spin until acquired. Recursion is
211 * allowed. This form is identical to mtx_spinlock_ex().
212 *
33b0b87c
MD
213 * Attempt to exclusive-lock a mutex, return 0 on success and
214 * EAGAIN on failure.
215 */
216static __inline int
217mtx_lock_ex_try(mtx_t mtx)
218{
219 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
220 return (_mtx_lock_ex_try(mtx));
221 mtx->mtx_owner = curthread;
222 return (0);
223}
224
225/*
226 * Attempt to share-lock a mutex, return 0 on success and
227 * EAGAIN on failure.
228 */
229static __inline int
230mtx_lock_sh_try(mtx_t mtx)
231{
232 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
233 return (_mtx_lock_sh_try(mtx));
234 return (0);
235}
236
237/*
238 * If the lock is held exclusively it must be owned by the caller. If the
239 * lock is already a shared lock this operation is a NOP. A panic will
240 * occur if the lock is not held either shared or exclusive.
241 *
242 * The exclusive count is converted to a shared count.
243 */
244static __inline void
245mtx_downgrade(mtx_t mtx)
246{
247 mtx->mtx_owner = NULL;
248 if (atomic_cmpset_int(&mtx->mtx_lock, MTX_EXCLUSIVE | 1, 0) == 0)
249 _mtx_downgrade(mtx);
250}
251
252/*
253 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if
254 * the shared lock has a count other then 1. Optimize the most likely case
255 * but note that a single cmpset can fail due to WANTED races.
256 *
257 * If the lock is held exclusively it must be owned by the caller and
258 * this function will simply return without doing anything. A panic will
259 * occur if the lock is held exclusively by someone other then the caller.
260 *
261 * Returns 0 on success, EDEADLK on failure.
262 */
263static __inline int
264mtx_upgrade_try(mtx_t mtx)
265{
266 if (atomic_cmpset_int(&mtx->mtx_lock, 1, MTX_EXCLUSIVE | 1))
267 return(0);
268 return (_mtx_upgrade_try(mtx));
269}
270
271/*
272 * Optimized unlock cases.
57f5048a
MD
273 *
274 * NOTE: mtx_unlock() handles any type of mutex: exclusive, shared, and
275 * both blocking and spin methods.
276 *
277 * The mtx_unlock_ex/sh() forms are optimized for exclusive or shared
278 * mutexes and produce less code, but it is ok for code to just use
279 * mtx_unlock() and, in fact, if code uses the short-form mtx_lock()
280 * or mtx_spinlock() to lock it should also use mtx_unlock() to unlock.
33b0b87c
MD
281 */
282static __inline void
283mtx_unlock(mtx_t mtx)
284{
285 u_int lock = mtx->mtx_lock;
286
287 if (lock == (MTX_EXCLUSIVE | 1)) {
288 mtx->mtx_owner = NULL;
289 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
290 _mtx_unlock(mtx);
291 } else if (lock == 1) {
292 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
293 _mtx_unlock(mtx);
294 } else {
295 _mtx_unlock(mtx);
296 }
297}
298
299static __inline void
300mtx_unlock_ex(mtx_t mtx)
301{
302 u_int lock = mtx->mtx_lock;
303
304 if (lock == (MTX_EXCLUSIVE | 1)) {
305 mtx->mtx_owner = NULL;
306 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
307 _mtx_unlock(mtx);
308 } else {
309 _mtx_unlock(mtx);
310 }
311}
312
313static __inline void
314mtx_unlock_sh(mtx_t mtx)
315{
316 if (atomic_cmpset_int(&mtx->mtx_lock, 1, 0) == 0)
317 _mtx_unlock(mtx);
318}
319
320/*
c40689e9
MD
321 * NOTE: spinlocks are exclusive-only
322 */
323static __inline void
324mtx_spinunlock(mtx_t mtx)
325{
326 globaldata_t gd = mycpu;
327
328 mtx_unlock(mtx);
329
330 --gd->gd_spinlocks_wr;
331 cpu_ccfence();
332 --gd->gd_curthread->td_critcount;
333}
334
335/*
17386740
MD
336 * Return TRUE (non-zero) if the mutex is locked shared or exclusive by
337 * anyone, including the owner.
338 */
339static __inline int
340mtx_islocked(mtx_t mtx)
341{
342 return(mtx->mtx_lock != 0);
343}
344
345/*
346 * Return TRUE (non-zero) if the mutex is locked exclusively by anyone,
347 * including the owner.
348 *
349 * The mutex may in an unlocked or shared lock state.
350 */
351static __inline int
352mtx_islocked_ex(mtx_t mtx)
353{
354 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
355}
356
357/*
358 * Return TRUE (non-zero) if the mutex is not locked.
359 */
360static __inline int
361mtx_notlocked(mtx_t mtx)
362{
363 return(mtx->mtx_lock == 0);
364}
365
366/*
367 * Return TRUE (non-zero) if the mutex is not locked exclusively.
368 * The mutex may in an unlocked or shared lock state.
369 */
370static __inline int
371mtx_notlocked_ex(mtx_t mtx)
372{
373 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
374}
375
376/*
377 * Return TRUE (non-zero) if the mutex is exclusively locked by
378 * the caller.
379 */
380static __inline int
381mtx_owned(mtx_t mtx)
382{
383 return((mtx->mtx_lock & MTX_EXCLUSIVE) && mtx->mtx_owner == curthread);
384}
385
386/*
387 * Return TRUE (non-zero) if the mutex is not exclusively locked by
388 * the caller.
389 */
390static __inline int
391mtx_notowned(mtx_t mtx)
392{
393 return((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 ||
394 mtx->mtx_owner != curthread);
395}
396
397/*
398 * Return the shared or exclusive lock count. A return value of 0
399 * indicate that the mutex is not locked.
400 *
401 * NOTE: If the mutex is held exclusively by someone other then the
402 * caller the lock count for the other owner is still returned.
403 */
404static __inline int
405mtx_lockrefs(mtx_t mtx)
406{
407 return(mtx->mtx_lock & MTX_MASK);
408}
409
410/*
33b0b87c
MD
411 * Bump the lock's ref count. This field is independent of the lock.
412 */
413static __inline void
414mtx_hold(mtx_t mtx)
415{
416 atomic_add_acq_int(&mtx->mtx_refs, 1);
417}
418
419/*
420 * Drop the lock's ref count. This field is independent of the lock.
421 *
422 * Returns the previous ref count, interlocked so testing against
423 * 1 means you won the 1->0 transition
424 */
425static __inline int
426mtx_drop(mtx_t mtx)
427{
428 return (atomic_fetchadd_int(&mtx->mtx_refs, -1));
429}
430
431#endif