2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #ifndef _SYS_SPINLOCK2_H_
34 #define _SYS_SPINLOCK2_H_
38 #error "This file should not be included by userland programs."
43 #include <sys/systm.h>
45 #ifndef _SYS_THREAD2_H_
46 #include <sys/thread2.h>
48 #ifndef _SYS_GLOBALDATA_H_
49 #include <sys/globaldata.h>
51 #include <machine/atomic.h>
52 #include <machine/cpufunc.h>
54 extern struct spinlock pmap_spin;
56 int spin_trylock_contested(struct spinlock *spin);
57 void spin_lock_contested(struct spinlock *spin);
58 void spin_lock_shared_contested(struct spinlock *spin);
59 void _spin_pool_lock(void *chan);
60 void _spin_pool_unlock(void *chan);
63 * Attempt to obtain an exclusive spinlock. Returns FALSE on failure,
66 static __inline boolean_t
67 spin_trylock(struct spinlock *spin)
69 globaldata_t gd = mycpu;
71 ++gd->gd_curthread->td_critcount;
74 if (atomic_cmpset_int(&spin->counta, 0, 1) == 0)
75 return (spin_trylock_contested(spin));
78 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
79 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
80 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
81 gd->gd_curthread->td_spinlock_stack[i] = spin;
82 gd->gd_curthread->td_spinlock_caller_pc[i] =
83 __builtin_return_address(0);
92 * Return TRUE if the spinlock is held (we can't tell by whom, though)
95 spin_held(struct spinlock *spin)
97 return(spin->counta != 0);
101 * Obtain an exclusive spinlock and return.
104 spin_lock_quick(globaldata_t gd, struct spinlock *spin)
106 ++gd->gd_curthread->td_critcount;
109 atomic_add_int(&spin->counta, 1);
110 if (spin->counta != 1)
111 spin_lock_contested(spin);
114 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
115 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
116 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
117 gd->gd_curthread->td_spinlock_stack[i] = spin;
118 gd->gd_curthread->td_spinlock_caller_pc[i] =
119 __builtin_return_address(0);
127 spin_lock(struct spinlock *spin)
129 spin_lock_quick(mycpu, spin);
133 * Release an exclusive spinlock. We can just do this passively, only
134 * ensuring that our spinlock count is left intact until the mutex is
138 spin_unlock_quick(globaldata_t gd, struct spinlock *spin)
142 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
143 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
144 (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
145 gd->gd_curthread->td_spinlock_stack_id[i] = 0;
146 gd->gd_curthread->td_spinlock_stack[i] = NULL;
147 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
153 * Don't use a locked instruction here. To reduce latency we avoid
154 * reading spin->counta prior to writing to it.
157 KKASSERT(spin->counta != 0);
160 atomic_add_int(&spin->counta, -1);
163 KKASSERT(gd->gd_spinlocks > 0);
167 --gd->gd_curthread->td_critcount;
171 spin_unlock(struct spinlock *spin)
173 spin_unlock_quick(mycpu, spin);
180 spin_lock_shared_quick(globaldata_t gd, struct spinlock *spin)
182 ++gd->gd_curthread->td_critcount;
185 atomic_add_int(&spin->counta, 1);
186 if (spin->counta == 1)
187 atomic_set_int(&spin->counta, SPINLOCK_SHARED);
188 if ((spin->counta & SPINLOCK_SHARED) == 0)
189 spin_lock_shared_contested(spin);
192 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
193 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
194 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
195 gd->gd_curthread->td_spinlock_stack[i] = spin;
196 gd->gd_curthread->td_spinlock_caller_pc[i] =
197 __builtin_return_address(0);
205 spin_unlock_shared_quick(globaldata_t gd, struct spinlock *spin)
209 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
210 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
211 (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
212 gd->gd_curthread->td_spinlock_stack_id[i] = 0;
213 gd->gd_curthread->td_spinlock_stack[i] = NULL;
214 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
220 KKASSERT(spin->counta != 0);
223 atomic_add_int(&spin->counta, -1);
226 * Make sure SPINLOCK_SHARED is cleared. If another cpu tries to
227 * get a shared or exclusive lock this loop will break out. We're
228 * only talking about a very trivial edge case here.
230 while (spin->counta == SPINLOCK_SHARED) {
231 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED, 0))
236 KKASSERT(gd->gd_spinlocks > 0);
240 --gd->gd_curthread->td_critcount;
244 spin_lock_shared(struct spinlock *spin)
246 spin_lock_shared_quick(mycpu, spin);
250 spin_unlock_shared(struct spinlock *spin)
252 spin_unlock_shared_quick(mycpu, spin);
256 spin_pool_lock(void *chan)
258 _spin_pool_lock(chan);
262 spin_pool_unlock(void *chan)
264 _spin_pool_unlock(chan);
268 spin_init(struct spinlock *spin)
275 spin_uninit(struct spinlock *spin)
281 #endif /* _SYS_SPINLOCK2_H_ */