2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #ifndef _SYS_MUTEX2_H_
36 #define _SYS_MUTEX2_H_
39 #include <sys/mutex.h>
41 #ifndef _MACHINE_ATOMIC_H_
42 #include <machine/atomic.h>
46 * Initialize a new mutex, placing it in an unlocked state with no refs.
53 mtx->mtx_owner = NULL;
58 mtx_link_init(mtx_link_t link)
60 link->state = MTX_LINK_IDLE;
64 * Deinitialize a mutex
73 * Exclusive-lock a mutex, block until acquired or aborted. Recursion
76 * This version of the function allows the mtx_link to be passed in, thus
77 * giving the caller visibility for the link structure which is required
78 * when calling mtx_abort_ex_link().
80 * The mutex may be aborted at any time while the passed link structure
84 mtx_lock_ex_link(mtx_t mtx, struct mtx_link *link,
85 const char *ident, int flags, int to)
87 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
88 return(_mtx_lock_ex_link(mtx, link, ident, flags, to));
89 mtx->mtx_owner = curthread;
94 * Short-form exclusive-lock a mutex, block until acquired. Recursion is
95 * allowed. This is equivalent to mtx_lock_ex(mtx, "mtxex", 0, 0).
100 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) {
101 _mtx_lock_ex(mtx, "mtxex", 0, 0);
104 mtx->mtx_owner = curthread;
108 * Exclusive-lock a mutex, block until acquired. Recursion is allowed.
110 * Returns 0 on success, or the tsleep() return code on failure.
111 * An error can only be returned if PCATCH is specified in the flags.
114 mtx_lock_ex(mtx_t mtx, const char *ident, int flags, int to)
116 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
117 return(_mtx_lock_ex(mtx, ident, flags, to));
118 mtx->mtx_owner = curthread;
123 mtx_lock_ex_quick(mtx_t mtx, const char *ident)
125 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
126 return(_mtx_lock_ex_quick(mtx, ident));
127 mtx->mtx_owner = curthread;
132 * Share-lock a mutex, block until acquired. Recursion is allowed.
134 * Returns 0 on success, or the tsleep() return code on failure.
135 * An error can only be returned if PCATCH is specified in the flags.
138 mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
140 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
141 return(_mtx_lock_sh(mtx, ident, flags, to));
146 mtx_lock_sh_quick(mtx_t mtx, const char *ident)
148 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
149 return(_mtx_lock_sh_quick(mtx, ident));
154 * Short-form exclusive-lock a mutex, spin until acquired. Recursion is
155 * allowed. This form is identical to mtx_spinlock_ex().
158 mtx_spinlock(mtx_t mtx)
160 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
161 _mtx_spinlock_ex(mtx);
165 * Exclusive-lock a mutex, spin until acquired. Recursion is allowed.
168 mtx_spinlock_ex(mtx_t mtx)
170 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
171 _mtx_spinlock_ex(mtx);
175 * Share-lock a mutex, spin until acquired. Recursion is allowed.
178 mtx_spinlock_sh(mtx_t mtx)
180 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
181 _mtx_spinlock_sh(mtx);
185 * Attempt to exclusive-lock a mutex, return 0 on success and
189 mtx_lock_ex_try(mtx_t mtx)
191 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
192 return (_mtx_lock_ex_try(mtx));
193 mtx->mtx_owner = curthread;
198 * Attempt to share-lock a mutex, return 0 on success and
202 mtx_lock_sh_try(mtx_t mtx)
204 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
205 return (_mtx_lock_sh_try(mtx));
210 * If the lock is held exclusively it must be owned by the caller. If the
211 * lock is already a shared lock this operation is a NOP. A panic will
212 * occur if the lock is not held either shared or exclusive.
214 * The exclusive count is converted to a shared count.
217 mtx_downgrade(mtx_t mtx)
219 mtx->mtx_owner = NULL;
220 if (atomic_cmpset_int(&mtx->mtx_lock, MTX_EXCLUSIVE | 1, 0) == 0)
225 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if
226 * the shared lock has a count other then 1. Optimize the most likely case
227 * but note that a single cmpset can fail due to WANTED races.
229 * If the lock is held exclusively it must be owned by the caller and
230 * this function will simply return without doing anything. A panic will
231 * occur if the lock is held exclusively by someone other then the caller.
233 * Returns 0 on success, EDEADLK on failure.
236 mtx_upgrade_try(mtx_t mtx)
238 if (atomic_cmpset_int(&mtx->mtx_lock, 1, MTX_EXCLUSIVE | 1))
240 return (_mtx_upgrade_try(mtx));
244 * Optimized unlock cases.
246 * NOTE: mtx_unlock() handles any type of mutex: exclusive, shared, and
247 * both blocking and spin methods.
249 * The mtx_unlock_ex/sh() forms are optimized for exclusive or shared
250 * mutexes and produce less code, but it is ok for code to just use
251 * mtx_unlock() and, in fact, if code uses the short-form mtx_lock()
252 * or mtx_spinlock() to lock it should also use mtx_unlock() to unlock.
255 mtx_unlock(mtx_t mtx)
257 u_int lock = mtx->mtx_lock;
259 if (lock == (MTX_EXCLUSIVE | 1)) {
260 mtx->mtx_owner = NULL;
261 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
263 } else if (lock == 1) {
264 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
272 mtx_unlock_ex(mtx_t mtx)
274 u_int lock = mtx->mtx_lock;
276 if (lock == (MTX_EXCLUSIVE | 1)) {
277 mtx->mtx_owner = NULL;
278 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
286 mtx_unlock_sh(mtx_t mtx)
288 if (atomic_cmpset_int(&mtx->mtx_lock, 1, 0) == 0)
293 * Return TRUE (non-zero) if the mutex is locked shared or exclusive by
294 * anyone, including the owner.
297 mtx_islocked(mtx_t mtx)
299 return(mtx->mtx_lock != 0);
303 * Return TRUE (non-zero) if the mutex is locked exclusively by anyone,
304 * including the owner.
306 * The mutex may in an unlocked or shared lock state.
309 mtx_islocked_ex(mtx_t mtx)
311 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
315 * Return TRUE (non-zero) if the mutex is not locked.
318 mtx_notlocked(mtx_t mtx)
320 return(mtx->mtx_lock == 0);
324 * Return TRUE (non-zero) if the mutex is not locked exclusively.
325 * The mutex may in an unlocked or shared lock state.
328 mtx_notlocked_ex(mtx_t mtx)
330 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
334 * Return TRUE (non-zero) if the mutex is exclusively locked by
340 return((mtx->mtx_lock & MTX_EXCLUSIVE) && mtx->mtx_owner == curthread);
344 * Return TRUE (non-zero) if the mutex is not exclusively locked by
348 mtx_notowned(mtx_t mtx)
350 return((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 ||
351 mtx->mtx_owner != curthread);
355 * Return the shared or exclusive lock count. A return value of 0
356 * indicate that the mutex is not locked.
358 * NOTE: If the mutex is held exclusively by someone other then the
359 * caller the lock count for the other owner is still returned.
362 mtx_lockrefs(mtx_t mtx)
364 return(mtx->mtx_lock & MTX_MASK);
368 * Bump the lock's ref count. This field is independent of the lock.
373 atomic_add_acq_int(&mtx->mtx_refs, 1);
377 * Drop the lock's ref count. This field is independent of the lock.
379 * Returns the previous ref count, interlocked so testing against
380 * 1 means you won the 1->0 transition
385 return (atomic_fetchadd_int(&mtx->mtx_refs, -1));