vga_pci: Allocate resource method requires resource owner CPUID
[dragonfly.git] / sys / sys / spinlock2.h
1 /*
2  * Copyright (c) 2005 Jeffrey M. Hsu.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Jeffrey M. Hsu.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of The DragonFly Project nor the names of its
16  *    contributors may be used to endorse or promote products derived
17  *    from this software without specific, prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32
33 #ifndef _SYS_SPINLOCK2_H_
34 #define _SYS_SPINLOCK2_H_
35
36 #ifndef _KERNEL
37
38 #error "This file should not be included by userland programs."
39
40 #else
41
42 #ifndef _SYS_SYSTM_H_
43 #include <sys/systm.h>
44 #endif
45 #ifndef _SYS_THREAD2_H_
46 #include <sys/thread2.h>
47 #endif
48 #ifndef _SYS_GLOBALDATA_H_
49 #include <sys/globaldata.h>
50 #endif
51 #include <machine/atomic.h>
52 #include <machine/cpufunc.h>
53
54 extern struct spinlock pmap_spin;
55
56 int spin_trylock_contested(struct spinlock *spin);
57 void spin_lock_contested(struct spinlock *spin);
58 void spin_lock_shared_contested2(struct spinlock *spin);
59 void _spin_pool_lock(void *chan);
60 void _spin_pool_unlock(void *chan);
61
62 /*
63  * Attempt to obtain an exclusive spinlock.  Returns FALSE on failure,
64  * TRUE on success.
65  */
66 static __inline boolean_t
67 spin_trylock(struct spinlock *spin)
68 {
69         globaldata_t gd = mycpu;
70
71         ++gd->gd_curthread->td_critcount;
72         cpu_ccfence();
73         ++gd->gd_spinlocks;
74         if (atomic_cmpset_int(&spin->counta, 0, 1) == 0)
75                 return (spin_trylock_contested(spin));
76 #ifdef DEBUG_LOCKS
77         int i;
78         for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
79                 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
80                         gd->gd_curthread->td_spinlock_stack_id[i] = 1;
81                         gd->gd_curthread->td_spinlock_stack[i] = spin;
82                         gd->gd_curthread->td_spinlock_caller_pc[i] =
83                                 __builtin_return_address(0);
84                         break;
85                 }
86         }
87 #endif
88         return (TRUE);
89 }
90
91 /*
92  * Return TRUE if the spinlock is held (we can't tell by whom, though)
93  */
94 static __inline int
95 spin_held(struct spinlock *spin)
96 {
97         return(spin->counta != 0);
98 }
99
100 /*
101  * Obtain an exclusive spinlock and return.
102  */
103 static __inline void
104 spin_lock_quick(globaldata_t gd, struct spinlock *spin)
105 {
106         ++gd->gd_curthread->td_critcount;
107         cpu_ccfence();
108         ++gd->gd_spinlocks;
109         atomic_add_int(&spin->counta, 1);
110         if (spin->counta != 1)
111                 spin_lock_contested(spin);
112 #ifdef DEBUG_LOCKS
113         int i;
114         for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
115                 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
116                         gd->gd_curthread->td_spinlock_stack_id[i] = 1;
117                         gd->gd_curthread->td_spinlock_stack[i] = spin;
118                         gd->gd_curthread->td_spinlock_caller_pc[i] =
119                                 __builtin_return_address(0);
120                         break;
121                 }
122         }
123 #endif
124 }
125
126 static __inline void
127 spin_lock(struct spinlock *spin)
128 {
129         spin_lock_quick(mycpu, spin);
130 }
131
132 /*
133  * Release an exclusive spinlock.  We can just do this passively, only
134  * ensuring that our spinlock count is left intact until the mutex is
135  * cleared.
136  */
137 static __inline void
138 spin_unlock_quick(globaldata_t gd, struct spinlock *spin)
139 {
140 #ifdef DEBUG_LOCKS
141         int i;
142         for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
143                 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
144                     (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
145                         gd->gd_curthread->td_spinlock_stack_id[i] = 0;
146                         gd->gd_curthread->td_spinlock_stack[i] = NULL;
147                         gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
148                         break;
149                 }
150         }
151 #endif
152         /*
153          * Don't use a locked instruction here.  To reduce latency we avoid
154          * reading spin->counta prior to writing to it.
155          */
156 #ifdef DEBUG_LOCKS
157         KKASSERT(spin->counta != 0);
158 #endif
159         cpu_sfence();
160         atomic_add_int(&spin->counta, -1);
161         cpu_sfence();
162 #ifdef DEBUG_LOCKS
163         KKASSERT(gd->gd_spinlocks > 0);
164 #endif
165         --gd->gd_spinlocks;
166         cpu_ccfence();
167         --gd->gd_curthread->td_critcount;
168 }
169
170 static __inline void
171 spin_unlock(struct spinlock *spin)
172 {
173         spin_unlock_quick(mycpu, spin);
174 }
175
176 /*
177  * Shared spinlocks
178  */
179 static __inline void
180 spin_lock_shared_quick(globaldata_t gd, struct spinlock *spin)
181 {
182         ++gd->gd_curthread->td_critcount;
183         cpu_ccfence();
184         ++gd->gd_spinlocks;
185         if (atomic_cmpset_int(&spin->counta, 0, SPINLOCK_SHARED | 1) == 0)
186                 spin_lock_shared_contested2(spin);
187 #ifdef DEBUG_LOCKS
188         int i;
189         for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
190                 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
191                         gd->gd_curthread->td_spinlock_stack_id[i] = 1;
192                         gd->gd_curthread->td_spinlock_stack[i] = spin;
193                         gd->gd_curthread->td_spinlock_caller_pc[i] =
194                                 __builtin_return_address(0);
195                         break;
196                 }
197         }
198 #endif
199 }
200
201 static __inline void
202 spin_unlock_shared_quick(globaldata_t gd, struct spinlock *spin)
203 {
204 #ifdef DEBUG_LOCKS
205         int i;
206         for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
207                 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
208                     (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
209                         gd->gd_curthread->td_spinlock_stack_id[i] = 0;
210                         gd->gd_curthread->td_spinlock_stack[i] = NULL;
211                         gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
212                         break;
213                 }
214         }
215 #endif
216 #ifdef DEBUG_LOCKS
217         KKASSERT(spin->counta != 0);
218 #endif
219         cpu_sfence();
220         atomic_add_int(&spin->counta, -1);
221
222         /*
223          * Make sure SPINLOCK_SHARED is cleared.  If another cpu tries to
224          * get a shared or exclusive lock this loop will break out.  We're
225          * only talking about a very trivial edge case here.
226          */
227         while (spin->counta == SPINLOCK_SHARED) {
228                 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED, 0))
229                         break;
230         }
231         cpu_sfence();
232 #ifdef DEBUG_LOCKS
233         KKASSERT(gd->gd_spinlocks > 0);
234 #endif
235         --gd->gd_spinlocks;
236         cpu_ccfence();
237         --gd->gd_curthread->td_critcount;
238 }
239
240 static __inline void
241 spin_lock_shared(struct spinlock *spin)
242 {
243         spin_lock_shared_quick(mycpu, spin);
244 }
245
246 static __inline void
247 spin_unlock_shared(struct spinlock *spin)
248 {
249         spin_unlock_shared_quick(mycpu, spin);
250 }
251
252 static __inline void
253 spin_pool_lock(void *chan)
254 {
255         _spin_pool_lock(chan);
256 }
257
258 static __inline void
259 spin_pool_unlock(void *chan)
260 {
261         _spin_pool_unlock(chan);
262 }
263
264 static __inline void
265 spin_init(struct spinlock *spin)
266 {
267         spin->counta = 0;
268         spin->countb = 0;
269 }
270
271 static __inline void
272 spin_uninit(struct spinlock *spin)
273 {
274         /* unused */
275 }
276
277 #endif  /* _KERNEL */
278 #endif  /* _SYS_SPINLOCK2_H_ */
279