2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Implement fast persistent locks based on atomic_cmpset_int() with
36 * semantics similar to lockmgr locks but faster and taking up much less
37 * space. Taken from HAMMER's lock implementation.
39 * These are meant to complement our LWKT tokens. Tokens are only held
40 * while the thread is running. Mutexes can be held across blocking
43 * Most of the support is in sys/mutex[2].h. We mostly provide backoff
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/thread.h>
52 #include <sys/mutex.h>
54 #include <machine/cpufunc.h>
56 #include <sys/thread2.h>
57 #include <sys/mutex2.h>
59 static __int64_t mtx_contention_count;
60 static __int64_t mtx_collision_count;
61 static __int64_t mtx_wakeup_count;
63 SYSCTL_QUAD(_kern, OID_AUTO, mtx_contention_count, CTLFLAG_RW,
64 &mtx_contention_count, 0, "");
65 SYSCTL_QUAD(_kern, OID_AUTO, mtx_collision_count, CTLFLAG_RW,
66 &mtx_collision_count, 0, "");
67 SYSCTL_QUAD(_kern, OID_AUTO, mtx_wakeup_count, CTLFLAG_RW,
68 &mtx_wakeup_count, 0, "");
71 * Exclusive-lock a mutex, block until acquired. Recursion is allowed.
74 _mtx_lock_ex(mtx_t mtx, const char *ident, int flags)
82 nlock = MTX_EXCLUSIVE | 1;
83 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
84 /* mtx_owner set by caller */
87 } else if ((lock & MTX_EXCLUSIVE) &&
88 mtx->mtx_owner == curthread) {
89 KKASSERT((lock & MTX_MASK) != MTX_MASK);
91 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock))
94 nlock = lock | MTX_EXWANTED;
95 tsleep_interlock(&mtx->mtx_owner, 0);
96 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
97 ++mtx_contention_count;
98 tsleep(&mtx->mtx_owner, flags, ident, 0);
100 tsleep_remove(curthread);
103 ++mtx_collision_count;
108 * Share-lock a mutex, block until acquired. Recursion is allowed.
111 _mtx_lock_sh(mtx_t mtx, const char *ident, int flags)
117 lock = mtx->mtx_lock;
118 if ((lock & MTX_EXCLUSIVE) == 0) {
119 KKASSERT((lock & MTX_MASK) != MTX_MASK);
121 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
124 nlock = lock | MTX_SHWANTED;
125 tsleep_interlock(mtx, 0);
126 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
127 ++mtx_contention_count;
128 tsleep(mtx, flags, ident, 0);
130 tsleep_remove(curthread);
133 ++mtx_collision_count;
138 _mtx_spinlock_ex(mtx_t mtx)
146 lock = mtx->mtx_lock;
148 nlock = MTX_EXCLUSIVE | 1;
149 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
150 /* mtx_owner set by caller */
153 } else if ((lock & MTX_EXCLUSIVE) &&
154 mtx->mtx_owner == curthread) {
155 KKASSERT((lock & MTX_MASK) != MTX_MASK);
157 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock))
164 for (bo = 0; bo < bb; ++bo)
166 ++mtx_contention_count;
168 ++mtx_collision_count;
173 _mtx_spinlock_sh(mtx_t mtx)
181 lock = mtx->mtx_lock;
182 if ((lock & MTX_EXCLUSIVE) == 0) {
183 KKASSERT((lock & MTX_MASK) != MTX_MASK);
185 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
192 for (bo = 0; bo < bb; ++bo)
194 ++mtx_contention_count;
196 ++mtx_collision_count;
201 _mtx_lock_ex_try(mtx_t mtx)
208 lock = mtx->mtx_lock;
210 nlock = MTX_EXCLUSIVE | 1;
211 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
212 /* mtx_owner set by caller */
215 } else if ((lock & MTX_EXCLUSIVE) &&
216 mtx->mtx_owner == curthread) {
217 KKASSERT((lock & MTX_MASK) != MTX_MASK);
219 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock))
225 ++mtx_collision_count;
231 _mtx_lock_sh_try(mtx_t mtx)
238 lock = mtx->mtx_lock;
239 if ((lock & MTX_EXCLUSIVE) == 0) {
240 KKASSERT((lock & MTX_MASK) != MTX_MASK);
242 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
248 ++mtx_collision_count;
254 * If the lock is held exclusively it must be owned by the caller. If the
255 * lock is already a shared lock this operation is a NOP. A panic will
256 * occur if the lock is not held either shared or exclusive.
258 * The exclusive count is converted to a shared count.
261 _mtx_downgrade(mtx_t mtx)
267 lock = mtx->mtx_lock;
268 if ((lock & MTX_EXCLUSIVE) == 0) {
269 KKASSERT((lock & MTX_MASK) > 0);
272 KKASSERT(mtx->mtx_owner == curthread);
273 nlock = lock & ~(MTX_EXCLUSIVE | MTX_SHWANTED);
274 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
275 if (lock & MTX_SHWANTED) {
281 ++mtx_collision_count;
286 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if
287 * the shared lock has a count other then 1. Optimize the most likely case
288 * but note that a single cmpset can fail due to WANTED races.
290 * If the lock is held exclusively it must be owned by the caller and
291 * this function will simply return without doing anything. A panic will
292 * occur if the lock is held exclusively by someone other then the caller.
294 * Returns 0 on success, EDEADLK on failure.
297 _mtx_upgrade_try(mtx_t mtx)
304 lock = mtx->mtx_lock;
306 if ((lock & ~MTX_EXWANTED) == 1) {
307 nlock = lock | MTX_EXCLUSIVE;
308 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
309 mtx->mtx_owner = curthread;
312 } else if (lock & MTX_EXCLUSIVE) {
313 KKASSERT(mtx->mtx_owner == curthread);
319 ++mtx_collision_count;
325 * Unlock a lock. The caller must hold the lock either shared or exclusive.
328 _mtx_unlock(mtx_t mtx)
334 lock = mtx->mtx_lock;
335 nlock = (lock & (MTX_EXCLUSIVE | MTX_MASK)) - 1;
337 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0)) {
338 if (lock & MTX_SHWANTED) {
342 if (lock & MTX_EXWANTED) {
344 wakeup_one(&mtx->mtx_owner);
347 } else if (nlock == MTX_EXCLUSIVE) {
348 mtx->mtx_owner = NULL;
349 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0)) {
350 if (lock & MTX_SHWANTED) {
354 if (lock & MTX_EXWANTED) {
356 wakeup_one(&mtx->mtx_owner);
362 KKASSERT((nlock & MTX_MASK) != MTX_MASK);
363 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
366 ++mtx_collision_count;