kernel - Major SMP performance patch / VM system, bus-fault/seg-fault fixes
[dragonfly.git] / sys / sys / spinlock2.h
1 /*
2  * Copyright (c) 2005 Jeffrey M. Hsu.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Jeffrey M. Hsu.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of The DragonFly Project nor the names of its
16  *    contributors may be used to endorse or promote products derived
17  *    from this software without specific, prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32
33 #ifndef _SYS_SPINLOCK2_H_
34 #define _SYS_SPINLOCK2_H_
35
36 #ifndef _KERNEL
37
38 #error "This file should not be included by userland programs."
39
40 #else
41
42 #ifndef _SYS_SYSTM_H_
43 #include <sys/systm.h>
44 #endif
45 #ifndef _SYS_THREAD2_H_
46 #include <sys/thread2.h>
47 #endif
48 #ifndef _SYS_GLOBALDATA_H_
49 #include <sys/globaldata.h>
50 #endif
51 #include <machine/atomic.h>
52 #include <machine/cpufunc.h>
53
54 extern struct spinlock pmap_spin;
55
56 #ifdef SMP
57
58 int spin_trylock_contested(struct spinlock *spin);
59 void spin_lock_contested(struct spinlock *spin);
60 void _spin_pool_lock(void *chan);
61 void _spin_pool_unlock(void *chan);
62
63 #endif
64
65 #ifdef SMP
66
67 /*
68  * Attempt to obtain an exclusive spinlock.  Returns FALSE on failure,
69  * TRUE on success.
70  */
71 static __inline boolean_t
72 spin_trylock(struct spinlock *spin)
73 {
74         globaldata_t gd = mycpu;
75
76         ++gd->gd_curthread->td_critcount;
77         cpu_ccfence();
78         ++gd->gd_spinlocks_wr;
79         if (atomic_swap_int(&spin->counta, 1))
80                 return (spin_trylock_contested(spin));
81 #ifdef DEBUG_LOCKS
82         int i;
83         for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
84                 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
85                         gd->gd_curthread->td_spinlock_stack_id[i] = 1;
86                         gd->gd_curthread->td_spinlock_stack[i] = spin;
87                         gd->gd_curthread->td_spinlock_caller_pc[i] =
88                                                 __builtin_return_address(0);
89                         break;
90                 }
91         }
92 #endif
93         return (TRUE);
94 }
95
96 #else
97
98 static __inline boolean_t
99 spin_trylock(struct spinlock *spin)
100 {
101         globaldata_t gd = mycpu;
102
103         ++gd->gd_curthread->td_critcount;
104         cpu_ccfence();
105         ++gd->gd_spinlocks_wr;
106         return (TRUE);
107 }
108
109 #endif
110
111 /*
112  * Return TRUE if the spinlock is held (we can't tell by whom, though)
113  */
114 static __inline int
115 spin_held(struct spinlock *spin)
116 {
117         return(spin->counta != 0);
118 }
119
120 /*
121  * Obtain an exclusive spinlock and return.
122  */
123 static __inline void
124 spin_lock_quick(globaldata_t gd, struct spinlock *spin)
125 {
126         ++gd->gd_curthread->td_critcount;
127         cpu_ccfence();
128         ++gd->gd_spinlocks_wr;
129 #ifdef SMP
130         if (atomic_swap_int(&spin->counta, 1))
131                 spin_lock_contested(spin);
132 #ifdef DEBUG_LOCKS
133         int i;
134         for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
135                 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
136                         gd->gd_curthread->td_spinlock_stack_id[i] = 1;
137                         gd->gd_curthread->td_spinlock_stack[i] = spin;
138                         gd->gd_curthread->td_spinlock_caller_pc[i] =
139                                 __builtin_return_address(0);
140                         break;
141                 }
142         }
143 #endif
144 #endif
145 }
146
147 static __inline void
148 spin_lock(struct spinlock *spin)
149 {
150         spin_lock_quick(mycpu, spin);
151 }
152
153 /*
154  * Release an exclusive spinlock.  We can just do this passively, only
155  * ensuring that our spinlock count is left intact until the mutex is
156  * cleared.
157  */
158 static __inline void
159 spin_unlock_quick(globaldata_t gd, struct spinlock *spin)
160 {
161 #ifdef SMP
162 #ifdef DEBUG_LOCKS
163         int i;
164         for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
165                 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
166                     (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
167                         gd->gd_curthread->td_spinlock_stack_id[i] = 0;
168                         gd->gd_curthread->td_spinlock_stack[i] = NULL;
169                         gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
170                         break;
171                 }
172         }
173 #endif
174         /*
175          * Don't use a locked instruction here.
176          */
177         KKASSERT(spin->counta != 0);
178         cpu_sfence();
179         spin->counta = 0;
180         cpu_sfence();
181 #endif
182         KKASSERT(gd->gd_spinlocks_wr > 0);
183         --gd->gd_spinlocks_wr;
184         cpu_ccfence();
185         --gd->gd_curthread->td_critcount;
186 #if 0
187         if (__predict_false(gd->gd_reqflags & RQF_IDLECHECK_MASK))
188                 lwkt_maybe_splz(gd->gd_curthread);
189 #endif
190 }
191
192 static __inline void
193 spin_unlock(struct spinlock *spin)
194 {
195         spin_unlock_quick(mycpu, spin);
196 }
197
198 static __inline void
199 spin_pool_lock(void *chan)
200 {
201 #ifdef SMP
202         _spin_pool_lock(chan);
203 #else
204         spin_lock(NULL);
205 #endif
206 }
207
208 static __inline void
209 spin_pool_unlock(void *chan)
210 {
211 #ifdef SMP
212         _spin_pool_unlock(chan);
213 #else
214         spin_unlock(NULL);
215 #endif
216 }
217
218 static __inline void
219 spin_init(struct spinlock *spin)
220 {
221         spin->counta = 0;
222         spin->countb = 0;
223 }
224
225 static __inline void
226 spin_uninit(struct spinlock *spin)
227 {
228         /* unused */
229 }
230
231 #endif  /* _KERNEL */
232 #endif  /* _SYS_SPINLOCK2_H_ */
233