kernel: Make SMP support default (and non-optional).
[dragonfly.git] / sys / sys / spinlock2.h
CommitLineData
35a832df
MD
1/*
2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey M. Hsu.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
35a832df
MD
31 */
32
33#ifndef _SYS_SPINLOCK2_H_
34#define _SYS_SPINLOCK2_H_
35
03d6a592
MD
36#ifndef _KERNEL
37
38#error "This file should not be included by userland programs."
39
40#else
41
bbb31c5d
MD
42#ifndef _SYS_SYSTM_H_
43#include <sys/systm.h>
44#endif
03d6a592 45#ifndef _SYS_THREAD2_H_
35a832df 46#include <sys/thread2.h>
03d6a592 47#endif
d666840a
MD
48#ifndef _SYS_GLOBALDATA_H_
49#include <sys/globaldata.h>
50#endif
35a832df
MD
51#include <machine/atomic.h>
52#include <machine/cpufunc.h>
53
b12defdc
MD
54extern struct spinlock pmap_spin;
55
b12defdc
MD
56int spin_trylock_contested(struct spinlock *spin);
57void spin_lock_contested(struct spinlock *spin);
0846e4ce 58void spin_lock_shared_contested(struct spinlock *spin);
b12defdc
MD
59void _spin_pool_lock(void *chan);
60void _spin_pool_unlock(void *chan);
9d265729 61
d666840a
MD
62/*
63 * Attempt to obtain an exclusive spinlock. Returns FALSE on failure,
e124976c 64 * TRUE on success.
d666840a
MD
65 */
66static __inline boolean_t
b12defdc 67spin_trylock(struct spinlock *spin)
69d78e99 68{
d666840a 69 globaldata_t gd = mycpu;
d666840a 70
77912481
MD
71 ++gd->gd_curthread->td_critcount;
72 cpu_ccfence();
0846e4ce
MD
73 ++gd->gd_spinlocks;
74 if (atomic_cmpset_int(&spin->counta, 0, 1) == 0)
b12defdc 75 return (spin_trylock_contested(spin));
1a474e56
VS
76#ifdef DEBUG_LOCKS
77 int i;
78 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
79 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
80 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
b12defdc 81 gd->gd_curthread->td_spinlock_stack[i] = spin;
1a474e56 82 gd->gd_curthread->td_spinlock_caller_pc[i] =
0846e4ce 83 __builtin_return_address(0);
1a474e56
VS
84 break;
85 }
86 }
1a474e56 87#endif
d666840a 88 return (TRUE);
69d78e99
MD
89}
90
b12defdc
MD
91/*
92 * Return TRUE if the spinlock is held (we can't tell by whom, though)
93 */
94static __inline int
95spin_held(struct spinlock *spin)
96{
97 return(spin->counta != 0);
98}
99
9d265729 100/*
8f165b8c 101 * Obtain an exclusive spinlock and return.
9d265729 102 */
d666840a 103static __inline void
b12defdc 104spin_lock_quick(globaldata_t gd, struct spinlock *spin)
35a832df 105{
77912481
MD
106 ++gd->gd_curthread->td_critcount;
107 cpu_ccfence();
0846e4ce 108 ++gd->gd_spinlocks;
0846e4ce
MD
109 atomic_add_int(&spin->counta, 1);
110 if (spin->counta != 1)
b12defdc 111 spin_lock_contested(spin);
1a474e56
VS
112#ifdef DEBUG_LOCKS
113 int i;
114 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
115 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
116 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
b12defdc 117 gd->gd_curthread->td_spinlock_stack[i] = spin;
1a474e56
VS
118 gd->gd_curthread->td_spinlock_caller_pc[i] =
119 __builtin_return_address(0);
120 break;
121 }
122 }
123#endif
d666840a
MD
124}
125
126static __inline void
b12defdc 127spin_lock(struct spinlock *spin)
d666840a 128{
b12defdc 129 spin_lock_quick(mycpu, spin);
35a832df
MD
130}
131
9d265729 132/*
d666840a
MD
133 * Release an exclusive spinlock. We can just do this passively, only
134 * ensuring that our spinlock count is left intact until the mutex is
135 * cleared.
9d265729 136 */
35a832df 137static __inline void
b12defdc 138spin_unlock_quick(globaldata_t gd, struct spinlock *spin)
35a832df 139{
1a474e56
VS
140#ifdef DEBUG_LOCKS
141 int i;
142 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
143 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
b12defdc 144 (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
1a474e56
VS
145 gd->gd_curthread->td_spinlock_stack_id[i] = 0;
146 gd->gd_curthread->td_spinlock_stack[i] = NULL;
147 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
148 break;
149 }
150 }
151#endif
b12defdc 152 /*
fc9ed34d
MD
153 * Don't use a locked instruction here. To reduce latency we avoid
154 * reading spin->counta prior to writing to it.
b12defdc 155 */
fc9ed34d 156#ifdef DEBUG_LOCKS
b12defdc 157 KKASSERT(spin->counta != 0);
fc9ed34d 158#endif
b12defdc 159 cpu_sfence();
0846e4ce 160 atomic_add_int(&spin->counta, -1);
b12defdc 161 cpu_sfence();
fc9ed34d 162#ifdef DEBUG_LOCKS
0846e4ce 163 KKASSERT(gd->gd_spinlocks > 0);
fc9ed34d 164#endif
0846e4ce 165 --gd->gd_spinlocks;
77912481
MD
166 cpu_ccfence();
167 --gd->gd_curthread->td_critcount;
35a832df
MD
168}
169
503a0d52 170static __inline void
b12defdc 171spin_unlock(struct spinlock *spin)
503a0d52 172{
b12defdc 173 spin_unlock_quick(mycpu, spin);
503a0d52
MD
174}
175
0846e4ce
MD
176/*
177 * Shared spinlocks
178 */
179static __inline void
180spin_lock_shared_quick(globaldata_t gd, struct spinlock *spin)
181{
182 ++gd->gd_curthread->td_critcount;
183 cpu_ccfence();
184 ++gd->gd_spinlocks;
0846e4ce
MD
185 atomic_add_int(&spin->counta, 1);
186 if (spin->counta == 1)
187 atomic_set_int(&spin->counta, SPINLOCK_SHARED);
188 if ((spin->counta & SPINLOCK_SHARED) == 0)
189 spin_lock_shared_contested(spin);
190#ifdef DEBUG_LOCKS
191 int i;
192 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
193 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
194 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
195 gd->gd_curthread->td_spinlock_stack[i] = spin;
196 gd->gd_curthread->td_spinlock_caller_pc[i] =
197 __builtin_return_address(0);
198 break;
199 }
200 }
201#endif
0846e4ce
MD
202}
203
204static __inline void
205spin_unlock_shared_quick(globaldata_t gd, struct spinlock *spin)
206{
0846e4ce
MD
207#ifdef DEBUG_LOCKS
208 int i;
209 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
210 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
211 (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
212 gd->gd_curthread->td_spinlock_stack_id[i] = 0;
213 gd->gd_curthread->td_spinlock_stack[i] = NULL;
214 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
215 break;
216 }
217 }
218#endif
219#ifdef DEBUG_LOCKS
220 KKASSERT(spin->counta != 0);
221#endif
222 cpu_sfence();
223 atomic_add_int(&spin->counta, -1);
224
225 /*
226 * Make sure SPINLOCK_SHARED is cleared. If another cpu tries to
227 * get a shared or exclusive lock this loop will break out. We're
228 * only talking about a very trivial edge case here.
229 */
230 while (spin->counta == SPINLOCK_SHARED) {
231 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED, 0))
232 break;
233 }
234 cpu_sfence();
0846e4ce
MD
235#ifdef DEBUG_LOCKS
236 KKASSERT(gd->gd_spinlocks > 0);
237#endif
238 --gd->gd_spinlocks;
239 cpu_ccfence();
240 --gd->gd_curthread->td_critcount;
241}
242
243static __inline void
244spin_lock_shared(struct spinlock *spin)
245{
246 spin_lock_shared_quick(mycpu, spin);
247}
248
249static __inline void
250spin_unlock_shared(struct spinlock *spin)
251{
252 spin_unlock_shared_quick(mycpu, spin);
253}
254
35a832df 255static __inline void
b12defdc 256spin_pool_lock(void *chan)
35a832df 257{
b12defdc 258 _spin_pool_lock(chan);
35a832df
MD
259}
260
261static __inline void
b12defdc 262spin_pool_unlock(void *chan)
35a832df 263{
b12defdc 264 _spin_pool_unlock(chan);
35a832df
MD
265}
266
b12defdc
MD
267static __inline void
268spin_init(struct spinlock *spin)
269{
270 spin->counta = 0;
271 spin->countb = 0;
272}
273
274static __inline void
275spin_uninit(struct spinlock *spin)
276{
277 /* unused */
278}
9e2ac34a 279
03d6a592
MD
280#endif /* _KERNEL */
281#endif /* _SYS_SPINLOCK2_H_ */
35a832df 282