2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #ifndef _SYS_SPINLOCK2_H_
34 #define _SYS_SPINLOCK2_H_
37 #error "This file should not be included by userland programs."
41 #include <sys/systm.h>
43 #ifndef _SYS_THREAD2_H_
44 #include <sys/thread2.h>
46 #ifndef _SYS_GLOBALDATA_H_
47 #include <sys/globaldata.h>
49 #include <machine/atomic.h>
50 #include <machine/cpufunc.h>
52 extern struct spinlock pmap_spin;
54 int spin_trylock_contested(struct spinlock *spin);
55 void _spin_lock_contested(struct spinlock *spin, const char *ident, int count);
56 void _spin_lock_shared_contested(struct spinlock *spin, const char *ident);
58 #define spin_lock(spin) _spin_lock(spin, __func__)
59 #define spin_lock_quick(spin) _spin_lock_quick(spin, __func__)
60 #define spin_lock_shared(spin) _spin_lock_shared(spin, __func__)
61 #define spin_lock_shared_quick(spin) _spin_lock_shared_quick(spin, __func__)
64 * Attempt to obtain an exclusive spinlock. Returns FALSE on failure,
67 static __inline boolean_t
68 spin_trylock(struct spinlock *spin)
70 globaldata_t gd = mycpu;
72 crit_enter_raw(gd->gd_curthread);
75 if (atomic_cmpset_int(&spin->lock, 0, 1) == 0)
76 return (spin_trylock_contested(spin));
79 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
80 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
81 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
82 gd->gd_curthread->td_spinlock_stack[i] = spin;
83 gd->gd_curthread->td_spinlock_caller_pc[i] =
84 __builtin_return_address(0);
93 * Return TRUE if the spinlock is held (we can't tell by whom, though)
96 spin_held(struct spinlock *spin)
98 return((spin->lock & ~SPINLOCK_SHARED) != 0);
102 * Obtain an exclusive spinlock and return. It is possible for the
103 * SPINLOCK_SHARED bit to already be set, in which case the contested
104 * code is called to fix it up.
107 _spin_lock_quick(globaldata_t gd, struct spinlock *spin, const char *ident)
111 crit_enter_raw(gd->gd_curthread);
115 count = atomic_fetchadd_int(&spin->lock, 1);
116 if (__predict_false(count != 0)) {
117 _spin_lock_contested(spin, ident, count);
121 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
122 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
123 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
124 gd->gd_curthread->td_spinlock_stack[i] = spin;
125 gd->gd_curthread->td_spinlock_caller_pc[i] =
126 __builtin_return_address(0);
134 _spin_lock(struct spinlock *spin, const char *ident)
136 _spin_lock_quick(mycpu, spin, ident);
140 * Release an exclusive spinlock. We can just do this passively, only
141 * ensuring that our spinlock count is left intact until the mutex is
144 * NOTE: Actually works for shared OR exclusive spinlocks. spin_unlock_any()
148 spin_unlock_quick(globaldata_t gd, struct spinlock *spin)
152 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
153 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
154 (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
155 gd->gd_curthread->td_spinlock_stack_id[i] = 0;
156 gd->gd_curthread->td_spinlock_stack[i] = NULL;
157 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
163 * Don't use a locked instruction here. To reduce latency we avoid
164 * reading spin->lock prior to writing to it.
167 KKASSERT(spin->lock != 0);
170 atomic_add_int(&spin->lock, -1);
173 KKASSERT(gd->gd_spinlocks > 0);
177 crit_exit_quick(gd->gd_curthread);
181 spin_unlock(struct spinlock *spin)
183 spin_unlock_quick(mycpu, spin);
187 spin_unlock_any(struct spinlock *spin)
189 spin_unlock_quick(mycpu, spin);
193 * Shared spinlock. Acquire a count, if SPINLOCK_SHARED is not already
194 * set then try a trivial conversion and drop into the contested code if
195 * the trivial cocnversion fails. The SHARED bit is 'cached' when lock
196 * counts go to 0 so the critical path is typically just the fetchadd.
198 * WARNING! Due to the way exclusive conflict resolution works, we cannot
199 * just unconditionally set the SHARED bit on previous-count == 0.
200 * Doing so will interfere with the exclusive contended code.
203 _spin_lock_shared_quick(globaldata_t gd, struct spinlock *spin,
208 crit_enter_raw(gd->gd_curthread);
212 lock = atomic_fetchadd_int(&spin->lock, 1);
213 if (__predict_false((lock & SPINLOCK_SHARED) == 0)) {
215 !atomic_cmpset_int(&spin->lock, 1, SPINLOCK_SHARED | 1)) {
216 _spin_lock_shared_contested(spin, ident);
221 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
222 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
223 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
224 gd->gd_curthread->td_spinlock_stack[i] = spin;
225 gd->gd_curthread->td_spinlock_caller_pc[i] =
226 __builtin_return_address(0);
234 * Unlock a shared lock. For convenience we allow the last transition
235 * to be to (SPINLOCK_SHARED|0), leaving the SPINLOCK_SHARED bit set
236 * with a count to 0 which will optimize the next shared lock obtained.
238 * WARNING! In order to implement shared and exclusive spinlocks, an
239 * exclusive request will convert a multiply-held shared lock
240 * to exclusive and wait for shared holders to unlock. So keep
241 * in mind that as of now the spinlock could actually be in an
245 spin_unlock_shared_quick(globaldata_t gd, struct spinlock *spin)
249 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
250 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
251 (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
252 gd->gd_curthread->td_spinlock_stack_id[i] = 0;
253 gd->gd_curthread->td_spinlock_stack[i] = NULL;
254 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
260 KKASSERT(spin->lock != 0);
263 atomic_add_int(&spin->lock, -1);
266 KKASSERT(gd->gd_spinlocks > 0);
270 crit_exit_quick(gd->gd_curthread);
274 _spin_lock_shared(struct spinlock *spin, const char *ident)
276 _spin_lock_shared_quick(mycpu, spin, ident);
280 spin_unlock_shared(struct spinlock *spin)
282 spin_unlock_shared_quick(mycpu, spin);
286 * Attempt to upgrade a shared spinlock to exclusive. Return non-zero
287 * on success, 0 on failure.
290 spin_lock_upgrade_try(struct spinlock *spin)
292 if (atomic_cmpset_int(&spin->lock, SPINLOCK_SHARED|1, 1))
299 spin_init(struct spinlock *spin, const char *descr __unused)
309 spin_uninit(struct spinlock *spin)
315 * SMP friendly update counter support. Allows protected structures to
316 * be accessed and retried without dirtying the cache line. Retries if
317 * modified, gains shared spin-lock if modification is underway.
319 * The returned value from spin_access_start() must be passed into
323 spin_access_start(struct spinlock *spin)
327 v = *(volatile int *)&spin->update;
329 if (__predict_false(v & 1))
330 spin_lock_shared(spin);
335 spin_access_end(struct spinlock *spin, int v)
337 if (__predict_false(v & 1)) {
338 spin_unlock_shared(spin);
342 return(*(volatile int *)&spin->update != v);
346 spin_lock_update(struct spinlock *spin)
349 atomic_add_int_nonlocked(&spin->update, 1);
351 KKASSERT_UNSPIN((spin->update & 1), spin);
355 spin_unlock_update(struct spinlock *spin)
358 atomic_add_int_nonlocked(&spin->update, 1);
359 KKASSERT_UNSPIN(((spin->update & 1) == 0), spin);
364 * API that doesn't integrate the acquisition of the spin-lock
367 spin_access_start_only(struct spinlock *spin)
371 v = *(volatile int *)&spin->update;
378 spin_access_check_inprog(int v)
384 spin_access_end_only(struct spinlock *spin, int v)
387 return(*(volatile int *)&spin->update != v);
391 spin_lock_update_only(struct spinlock *spin)
393 atomic_add_int_nonlocked(&spin->update, 1);
395 KKASSERT(spin->update & 1);
399 spin_unlock_update_only(struct spinlock *spin)
402 atomic_add_int_nonlocked(&spin->update, 1);
403 KKASSERT((spin->update & 1) == 0);
406 #endif /* _SYS_SPINLOCK2_H_ */