2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #ifndef _SYS_SPINLOCK2_H_
34 #define _SYS_SPINLOCK2_H_
38 #error "This file should not be included by userland programs."
43 #include <sys/systm.h>
45 #ifndef _SYS_THREAD2_H_
46 #include <sys/thread2.h>
48 #ifndef _SYS_GLOBALDATA_H_
49 #include <sys/globaldata.h>
51 #include <machine/atomic.h>
52 #include <machine/cpufunc.h>
54 extern struct spinlock pmap_spin;
56 int spin_trylock_contested(struct spinlock *spin);
57 void _spin_lock_contested(struct spinlock *spin, const char *ident);
58 void _spin_lock_shared_contested(struct spinlock *spin, const char *ident);
59 void _spin_pool_lock(void *chan, const char *ident);
60 void _spin_pool_unlock(void *chan);
62 #define spin_lock(spin) _spin_lock(spin, __func__)
63 #define spin_lock_quick(spin) _spin_lock_quick(spin, __func__)
64 #define spin_lock_shared(spin) _spin_lock_shared(spin, __func__)
65 #define spin_lock_shared_quick(spin) _spin_lock_shared_quick(spin, __func__)
66 #define spin_pool_lock(chan) _spin_pool_lock(chan, __func__)
69 * Attempt to obtain an exclusive spinlock. Returns FALSE on failure,
72 static __inline boolean_t
73 spin_trylock(struct spinlock *spin)
75 globaldata_t gd = mycpu;
77 ++gd->gd_curthread->td_critcount;
80 if (atomic_cmpset_int(&spin->counta, 0, 1) == 0)
81 return (spin_trylock_contested(spin));
84 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
85 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
86 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
87 gd->gd_curthread->td_spinlock_stack[i] = spin;
88 gd->gd_curthread->td_spinlock_caller_pc[i] =
89 __builtin_return_address(0);
98 * Return TRUE if the spinlock is held (we can't tell by whom, though)
101 spin_held(struct spinlock *spin)
103 return(spin->counta != 0);
107 * Obtain an exclusive spinlock and return.
110 _spin_lock_quick(globaldata_t gd, struct spinlock *spin, const char *ident)
112 ++gd->gd_curthread->td_critcount;
115 atomic_add_int(&spin->counta, 1);
116 if (spin->counta != 1)
117 _spin_lock_contested(spin, ident);
120 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
121 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
122 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
123 gd->gd_curthread->td_spinlock_stack[i] = spin;
124 gd->gd_curthread->td_spinlock_caller_pc[i] =
125 __builtin_return_address(0);
133 _spin_lock(struct spinlock *spin, const char *ident)
135 _spin_lock_quick(mycpu, spin, ident);
139 * Release an exclusive spinlock. We can just do this passively, only
140 * ensuring that our spinlock count is left intact until the mutex is
144 spin_unlock_quick(globaldata_t gd, struct spinlock *spin)
148 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
149 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
150 (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
151 gd->gd_curthread->td_spinlock_stack_id[i] = 0;
152 gd->gd_curthread->td_spinlock_stack[i] = NULL;
153 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
159 * Don't use a locked instruction here. To reduce latency we avoid
160 * reading spin->counta prior to writing to it.
163 KKASSERT(spin->counta != 0);
166 atomic_add_int(&spin->counta, -1);
169 KKASSERT(gd->gd_spinlocks > 0);
173 --gd->gd_curthread->td_critcount;
177 spin_unlock(struct spinlock *spin)
179 spin_unlock_quick(mycpu, spin);
186 _spin_lock_shared_quick(globaldata_t gd, struct spinlock *spin,
189 ++gd->gd_curthread->td_critcount;
192 if (atomic_cmpset_int(&spin->counta, 0, SPINLOCK_SHARED | 1) == 0)
193 _spin_lock_shared_contested(spin, ident);
196 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
197 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
198 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
199 gd->gd_curthread->td_spinlock_stack[i] = spin;
200 gd->gd_curthread->td_spinlock_caller_pc[i] =
201 __builtin_return_address(0);
209 spin_unlock_shared_quick(globaldata_t gd, struct spinlock *spin)
213 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
214 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
215 (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
216 gd->gd_curthread->td_spinlock_stack_id[i] = 0;
217 gd->gd_curthread->td_spinlock_stack[i] = NULL;
218 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
224 KKASSERT(spin->counta != 0);
227 atomic_add_int(&spin->counta, -1);
230 * Make sure SPINLOCK_SHARED is cleared. If another cpu tries to
231 * get a shared or exclusive lock this loop will break out. We're
232 * only talking about a very trivial edge case here.
234 while (spin->counta == SPINLOCK_SHARED) {
235 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED, 0))
240 KKASSERT(gd->gd_spinlocks > 0);
244 --gd->gd_curthread->td_critcount;
248 _spin_lock_shared(struct spinlock *spin, const char *ident)
250 _spin_lock_shared_quick(mycpu, spin, ident);
254 spin_unlock_shared(struct spinlock *spin)
256 spin_unlock_shared_quick(mycpu, spin);
260 spin_pool_unlock(void *chan)
262 _spin_pool_unlock(chan);
266 spin_init(struct spinlock *spin)
273 spin_uninit(struct spinlock *spin)
279 #endif /* _SYS_SPINLOCK2_H_ */