Clean up more #include files. Create an internal __boolean_t so two or
[dragonfly.git] / sys / sys / spinlock2.h
CommitLineData
35a832df
MD
1/*
2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey M. Hsu.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
03d6a592 32 * $DragonFly: src/sys/sys/spinlock2.h,v 1.8 2006/05/21 03:43:47 dillon Exp $
35a832df
MD
33 */
34
35#ifndef _SYS_SPINLOCK2_H_
36#define _SYS_SPINLOCK2_H_
37
03d6a592
MD
38#ifndef _KERNEL
39
40#error "This file should not be included by userland programs."
41
42#else
43
44#ifndef _SYS_THREAD2_H_
35a832df 45#include <sys/thread2.h>
03d6a592
MD
46#endif
47#ifndef _MACHINE_ATOMIC_H_
35a832df 48#include <machine/atomic.h>
03d6a592
MD
49#endif
50#ifndef _MACHINE_CPUFUNC_H_
35a832df 51#include <machine/cpufunc.h>
03d6a592 52#endif
35a832df
MD
53
54#ifdef SMP
55
9d265729
MD
56#ifdef INVARIANTS
57
69d78e99 58static __inline void
9d265729 59spin_lock_debug(thread_t td, int count)
69d78e99 60{
9d265729 61 td->td_spinlocks += count;
69d78e99
MD
62}
63
9d265729
MD
64#endif
65
66/*
67 * Attempt to obtain a spinlock on behalf of the specified thread. Returns
68 * FALSE on failure, TRUE on success.
69 */
35a832df 70static __inline boolean_t
9d265729 71spin_trylock(thread_t td, struct spinlock *mtx)
35a832df 72{
69d78e99 73 if (atomic_swap_int(&mtx->lock, 1) == 0) {
9d265729
MD
74#ifdef INVARIANTS
75 spin_lock_debug(td, 1);
76#endif
35a832df 77 return (TRUE);
69d78e99 78 }
35a832df
MD
79 return (FALSE);
80}
81
9d265729
MD
82/*
83 * Relase a spinlock obtained via spin_trylock() on behalf of the specified
84 * thread. This function always succeeds. It exists because the other
85 * standard release functions only operate on the current thread.
86 */
87static __inline void
88spin_tryunlock(thread_t td, struct spinlock *mtx)
89{
90#ifdef INVARIANTS
91 if (td->td_spinlocks <= 0)
92 panic("spin_tryunlock: wasn't locked!");
93 spin_lock_debug(td, -1);
94#endif
95 cpu_sfence();
96 mtx->lock = 0; /* non-bus-locked lock release */
97}
98
35a832df
MD
99extern void spin_lock_contested(struct spinlock *mtx);
100
69d78e99
MD
101/*
102 * The quick versions should be used only if you are already
103 * in a critical section or you know the spinlock will never
9d265729
MD
104 * be used by an hard interrupt, IPI, or soft interrupt.
105 *
106 * Obtain a spinlock and return.
69d78e99 107 */
35a832df 108static __inline void
69d78e99 109spin_lock_quick(struct spinlock *mtx)
35a832df 110{
9d265729
MD
111#ifdef INVARIANTS
112 spin_lock_debug(curthread, 1);
113#endif
35a832df
MD
114 if (atomic_swap_int(&mtx->lock, 1) != 0)
115 spin_lock_contested(mtx); /* slow path */
116}
117
9d265729
MD
118/*
119 * Release a spinlock previously obtained by the current thread.
120 */
35a832df 121static __inline void
69d78e99 122spin_unlock_quick(struct spinlock *mtx)
35a832df 123{
9d265729
MD
124#ifdef INVARIANTS
125 if (curthread->td_spinlocks <= 0)
126 panic("spin_unlock_quick: wasn't locked!");
127 spin_lock_debug(curthread, -1);
128#endif
35a832df
MD
129 cpu_sfence();
130 mtx->lock = 0; /* non-bus-locked lock release */
131}
132
9d265729
MD
133/*
134 * Returns whether a spinlock is locked or not. 0 indicates not locked,
135 * non-zero indicates locked (by any thread, not necessarily the current
136 * thread).
137 */
35a832df
MD
138static __inline boolean_t
139spin_is_locked(struct spinlock *mtx)
140{
141 return (mtx->lock);
142}
143
144static __inline void
145spin_init(struct spinlock *mtx)
146{
147 mtx->lock = 0;
148}
149
503a0d52
MD
150static __inline void
151spin_uninit(struct spinlock *mtx)
152{
153 /* unused */
154}
155
35a832df
MD
156#else /* SMP */
157
dd55d707
MD
158/*
159 * There is no spin_trylock(), spin_tryunlock(), or spin_is_locked()
160 * for UP builds. These functions are used by the kernel only in
161 * situations where the spinlock actually has to work.
162 *
163 * We provide the rest of the calls for UP as degenerate inlines (note
164 * that the non-quick versions still obtain/release a critical section!).
165 * This way we don't have to have a billion #ifdef's floating around
166 * the rest of the kernel.
167 */
35a832df 168
a2323cfa
YT
169static __inline void spin_lock_quick(struct spinlock *mtx) { }
170static __inline void spin_unlock_quick(struct spinlock *mtx) { }
35a832df
MD
171static __inline void spin_init(struct spinlock *mtx) { }
172
173#endif /* SMP */
174
69d78e99
MD
175/*
176 * The normal spin_lock() API automatically enters and exits a
177 * critical section, preventing deadlocks from interrupt preemption
178 * if the interrupt thread accesses the same spinlock.
179 */
35a832df 180static __inline void
69d78e99 181spin_lock(struct spinlock *mtx)
35a832df 182{
16523a43 183 crit_enter_id("spin");
69d78e99 184 spin_lock_quick(mtx);
35a832df
MD
185}
186
187static __inline void
69d78e99 188spin_unlock(struct spinlock *mtx)
35a832df 189{
69d78e99 190 spin_unlock_quick(mtx);
16523a43 191 crit_exit_id("spin");
35a832df
MD
192}
193
03d6a592
MD
194#endif /* _KERNEL */
195#endif /* _SYS_SPINLOCK2_H_ */
35a832df 196