2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $DragonFly: src/sys/sys/spinlock2.h,v 1.8 2006/05/21 03:43:47 dillon Exp $
35 #ifndef _SYS_SPINLOCK2_H_
36 #define _SYS_SPINLOCK2_H_
40 #error "This file should not be included by userland programs."
44 #ifndef _SYS_THREAD2_H_
45 #include <sys/thread2.h>
47 #ifndef _MACHINE_ATOMIC_H_
48 #include <machine/atomic.h>
50 #ifndef _MACHINE_CPUFUNC_H_
51 #include <machine/cpufunc.h>
59 spin_lock_debug(thread_t td, int count)
61 td->td_spinlocks += count;
67 * Attempt to obtain a spinlock on behalf of the specified thread. Returns
68 * FALSE on failure, TRUE on success.
70 static __inline boolean_t
71 spin_trylock(thread_t td, struct spinlock *mtx)
73 if (atomic_swap_int(&mtx->lock, 1) == 0) {
75 spin_lock_debug(td, 1);
83 * Relase a spinlock obtained via spin_trylock() on behalf of the specified
84 * thread. This function always succeeds. It exists because the other
85 * standard release functions only operate on the current thread.
88 spin_tryunlock(thread_t td, struct spinlock *mtx)
91 if (td->td_spinlocks <= 0)
92 panic("spin_tryunlock: wasn't locked!");
93 spin_lock_debug(td, -1);
96 mtx->lock = 0; /* non-bus-locked lock release */
99 extern void spin_lock_contested(struct spinlock *mtx);
102 * The quick versions should be used only if you are already
103 * in a critical section or you know the spinlock will never
104 * be used by an hard interrupt, IPI, or soft interrupt.
106 * Obtain a spinlock and return.
109 spin_lock_quick(struct spinlock *mtx)
112 spin_lock_debug(curthread, 1);
114 if (atomic_swap_int(&mtx->lock, 1) != 0)
115 spin_lock_contested(mtx); /* slow path */
119 * Release a spinlock previously obtained by the current thread.
122 spin_unlock_quick(struct spinlock *mtx)
125 if (curthread->td_spinlocks <= 0)
126 panic("spin_unlock_quick: wasn't locked!");
127 spin_lock_debug(curthread, -1);
130 mtx->lock = 0; /* non-bus-locked lock release */
134 * Returns whether a spinlock is locked or not. 0 indicates not locked,
135 * non-zero indicates locked (by any thread, not necessarily the current
138 static __inline boolean_t
139 spin_is_locked(struct spinlock *mtx)
145 spin_init(struct spinlock *mtx)
151 spin_uninit(struct spinlock *mtx)
159 * There is no spin_trylock(), spin_tryunlock(), or spin_is_locked()
160 * for UP builds. These functions are used by the kernel only in
161 * situations where the spinlock actually has to work.
163 * We provide the rest of the calls for UP as degenerate inlines (note
164 * that the non-quick versions still obtain/release a critical section!).
165 * This way we don't have to have a billion #ifdef's floating around
166 * the rest of the kernel.
169 static __inline void spin_lock_quick(struct spinlock *mtx) { }
170 static __inline void spin_unlock_quick(struct spinlock *mtx) { }
171 static __inline void spin_init(struct spinlock *mtx) { }
176 * The normal spin_lock() API automatically enters and exits a
177 * critical section, preventing deadlocks from interrupt preemption
178 * if the interrupt thread accesses the same spinlock.
181 spin_lock(struct spinlock *mtx)
183 crit_enter_id("spin");
184 spin_lock_quick(mtx);
188 spin_unlock(struct spinlock *mtx)
190 spin_unlock_quick(mtx);
191 crit_exit_id("spin");
195 #endif /* _SYS_SPINLOCK2_H_ */