2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #ifndef _SYS_MUTEX2_H_
36 #define _SYS_MUTEX2_H_
39 #include <sys/mutex.h>
41 #ifndef _MACHINE_ATOMIC_H_
42 #include <machine/atomic.h>
46 * Initialize a new mutex, placing it in an unlocked state with no refs.
53 mtx->mtx_owner = NULL;
57 * Deinitialize a mutex
66 * Exclusive-lock a mutex, block until acquired. Recursion is allowed.
69 mtx_lock_ex(mtx_t mtx, const char *ident, int flags)
71 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
72 _mtx_lock_ex(mtx, ident, flags);
73 mtx->mtx_owner = curthread;
77 * Share-lock a mutex, block until acquired. Recursion is allowed.
80 mtx_lock_sh(mtx_t mtx, const char *ident, int flags)
82 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
83 _mtx_lock_sh(mtx, ident, flags);
87 * Exclusive-lock a mutex, spin until acquired. Recursion is allowed.
90 mtx_spinlock_ex(mtx_t mtx)
92 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
93 _mtx_spinlock_ex(mtx);
97 * Share-lock a mutex, spin until acquired. Recursion is allowed.
100 mtx_spinlock_sh(mtx_t mtx)
102 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
103 _mtx_spinlock_sh(mtx);
107 * Attempt to exclusive-lock a mutex, return 0 on success and
111 mtx_lock_ex_try(mtx_t mtx)
113 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
114 return (_mtx_lock_ex_try(mtx));
115 mtx->mtx_owner = curthread;
120 * Attempt to share-lock a mutex, return 0 on success and
124 mtx_lock_sh_try(mtx_t mtx)
126 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
127 return (_mtx_lock_sh_try(mtx));
132 * If the lock is held exclusively it must be owned by the caller. If the
133 * lock is already a shared lock this operation is a NOP. A panic will
134 * occur if the lock is not held either shared or exclusive.
136 * The exclusive count is converted to a shared count.
139 mtx_downgrade(mtx_t mtx)
141 mtx->mtx_owner = NULL;
142 if (atomic_cmpset_int(&mtx->mtx_lock, MTX_EXCLUSIVE | 1, 0) == 0)
147 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if
148 * the shared lock has a count other then 1. Optimize the most likely case
149 * but note that a single cmpset can fail due to WANTED races.
151 * If the lock is held exclusively it must be owned by the caller and
152 * this function will simply return without doing anything. A panic will
153 * occur if the lock is held exclusively by someone other then the caller.
155 * Returns 0 on success, EDEADLK on failure.
158 mtx_upgrade_try(mtx_t mtx)
160 if (atomic_cmpset_int(&mtx->mtx_lock, 1, MTX_EXCLUSIVE | 1))
162 return (_mtx_upgrade_try(mtx));
166 * Optimized unlock cases.
169 mtx_unlock(mtx_t mtx)
171 u_int lock = mtx->mtx_lock;
173 if (lock == (MTX_EXCLUSIVE | 1)) {
174 mtx->mtx_owner = NULL;
175 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
177 } else if (lock == 1) {
178 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
186 mtx_unlock_ex(mtx_t mtx)
188 u_int lock = mtx->mtx_lock;
190 if (lock == (MTX_EXCLUSIVE | 1)) {
191 mtx->mtx_owner = NULL;
192 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
200 mtx_unlock_sh(mtx_t mtx)
202 if (atomic_cmpset_int(&mtx->mtx_lock, 1, 0) == 0)
207 * Bump the lock's ref count. This field is independent of the lock.
212 atomic_add_acq_int(&mtx->mtx_refs, 1);
216 * Drop the lock's ref count. This field is independent of the lock.
218 * Returns the previous ref count, interlocked so testing against
219 * 1 means you won the 1->0 transition
224 return (atomic_fetchadd_int(&mtx->mtx_refs, -1));