| 1 | /* |
| 2 | * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved. |
| 3 | * |
| 4 | * This code is derived from software contributed to The DragonFly Project |
| 5 | * by Jeffrey M. Hsu. |
| 6 | * |
| 7 | * Redistribution and use in source and binary forms, with or without |
| 8 | * modification, are permitted provided that the following conditions |
| 9 | * are met: |
| 10 | * 1. Redistributions of source code must retain the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer. |
| 12 | * 2. Redistributions in binary form must reproduce the above copyright |
| 13 | * notice, this list of conditions and the following disclaimer in the |
| 14 | * documentation and/or other materials provided with the distribution. |
| 15 | * 3. Neither the name of The DragonFly Project nor the names of its |
| 16 | * contributors may be used to endorse or promote products derived |
| 17 | * from this software without specific, prior written permission. |
| 18 | * |
| 19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 22 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 23 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 24 | * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| 25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 26 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
| 27 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 28 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 29 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 30 | * SUCH DAMAGE. |
| 31 | */ |
| 32 | |
| 33 | #ifndef _SYS_SPINLOCK2_H_ |
| 34 | #define _SYS_SPINLOCK2_H_ |
| 35 | |
| 36 | #ifndef _KERNEL |
| 37 | |
| 38 | #error "This file should not be included by userland programs." |
| 39 | |
| 40 | #else |
| 41 | |
| 42 | #ifndef _SYS_SYSTM_H_ |
| 43 | #include <sys/systm.h> |
| 44 | #endif |
| 45 | #ifndef _SYS_THREAD2_H_ |
| 46 | #include <sys/thread2.h> |
| 47 | #endif |
| 48 | #ifndef _SYS_GLOBALDATA_H_ |
| 49 | #include <sys/globaldata.h> |
| 50 | #endif |
| 51 | #include <machine/atomic.h> |
| 52 | #include <machine/cpufunc.h> |
| 53 | |
| 54 | extern struct spinlock pmap_spin; |
| 55 | |
| 56 | int spin_trylock_contested(struct spinlock *spin); |
| 57 | void spin_lock_contested(struct spinlock *spin); |
| 58 | void spin_lock_shared_contested(struct spinlock *spin); |
| 59 | void _spin_pool_lock(void *chan); |
| 60 | void _spin_pool_unlock(void *chan); |
| 61 | |
| 62 | /* |
| 63 | * Attempt to obtain an exclusive spinlock. Returns FALSE on failure, |
| 64 | * TRUE on success. |
| 65 | */ |
| 66 | static __inline boolean_t |
| 67 | spin_trylock(struct spinlock *spin) |
| 68 | { |
| 69 | globaldata_t gd = mycpu; |
| 70 | |
| 71 | ++gd->gd_curthread->td_critcount; |
| 72 | cpu_ccfence(); |
| 73 | ++gd->gd_spinlocks; |
| 74 | if (atomic_cmpset_int(&spin->counta, 0, 1) == 0) |
| 75 | return (spin_trylock_contested(spin)); |
| 76 | #ifdef DEBUG_LOCKS |
| 77 | int i; |
| 78 | for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) { |
| 79 | if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) { |
| 80 | gd->gd_curthread->td_spinlock_stack_id[i] = 1; |
| 81 | gd->gd_curthread->td_spinlock_stack[i] = spin; |
| 82 | gd->gd_curthread->td_spinlock_caller_pc[i] = |
| 83 | __builtin_return_address(0); |
| 84 | break; |
| 85 | } |
| 86 | } |
| 87 | #endif |
| 88 | return (TRUE); |
| 89 | } |
| 90 | |
| 91 | /* |
| 92 | * Return TRUE if the spinlock is held (we can't tell by whom, though) |
| 93 | */ |
| 94 | static __inline int |
| 95 | spin_held(struct spinlock *spin) |
| 96 | { |
| 97 | return(spin->counta != 0); |
| 98 | } |
| 99 | |
| 100 | /* |
| 101 | * Obtain an exclusive spinlock and return. |
| 102 | */ |
| 103 | static __inline void |
| 104 | spin_lock_quick(globaldata_t gd, struct spinlock *spin) |
| 105 | { |
| 106 | ++gd->gd_curthread->td_critcount; |
| 107 | cpu_ccfence(); |
| 108 | ++gd->gd_spinlocks; |
| 109 | atomic_add_int(&spin->counta, 1); |
| 110 | if (spin->counta != 1) |
| 111 | spin_lock_contested(spin); |
| 112 | #ifdef DEBUG_LOCKS |
| 113 | int i; |
| 114 | for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) { |
| 115 | if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) { |
| 116 | gd->gd_curthread->td_spinlock_stack_id[i] = 1; |
| 117 | gd->gd_curthread->td_spinlock_stack[i] = spin; |
| 118 | gd->gd_curthread->td_spinlock_caller_pc[i] = |
| 119 | __builtin_return_address(0); |
| 120 | break; |
| 121 | } |
| 122 | } |
| 123 | #endif |
| 124 | } |
| 125 | |
| 126 | static __inline void |
| 127 | spin_lock(struct spinlock *spin) |
| 128 | { |
| 129 | spin_lock_quick(mycpu, spin); |
| 130 | } |
| 131 | |
| 132 | /* |
| 133 | * Release an exclusive spinlock. We can just do this passively, only |
| 134 | * ensuring that our spinlock count is left intact until the mutex is |
| 135 | * cleared. |
| 136 | */ |
| 137 | static __inline void |
| 138 | spin_unlock_quick(globaldata_t gd, struct spinlock *spin) |
| 139 | { |
| 140 | #ifdef DEBUG_LOCKS |
| 141 | int i; |
| 142 | for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) { |
| 143 | if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) && |
| 144 | (gd->gd_curthread->td_spinlock_stack[i] == spin)) { |
| 145 | gd->gd_curthread->td_spinlock_stack_id[i] = 0; |
| 146 | gd->gd_curthread->td_spinlock_stack[i] = NULL; |
| 147 | gd->gd_curthread->td_spinlock_caller_pc[i] = NULL; |
| 148 | break; |
| 149 | } |
| 150 | } |
| 151 | #endif |
| 152 | /* |
| 153 | * Don't use a locked instruction here. To reduce latency we avoid |
| 154 | * reading spin->counta prior to writing to it. |
| 155 | */ |
| 156 | #ifdef DEBUG_LOCKS |
| 157 | KKASSERT(spin->counta != 0); |
| 158 | #endif |
| 159 | cpu_sfence(); |
| 160 | atomic_add_int(&spin->counta, -1); |
| 161 | cpu_sfence(); |
| 162 | #ifdef DEBUG_LOCKS |
| 163 | KKASSERT(gd->gd_spinlocks > 0); |
| 164 | #endif |
| 165 | --gd->gd_spinlocks; |
| 166 | cpu_ccfence(); |
| 167 | --gd->gd_curthread->td_critcount; |
| 168 | } |
| 169 | |
| 170 | static __inline void |
| 171 | spin_unlock(struct spinlock *spin) |
| 172 | { |
| 173 | spin_unlock_quick(mycpu, spin); |
| 174 | } |
| 175 | |
| 176 | /* |
| 177 | * Shared spinlocks |
| 178 | */ |
| 179 | static __inline void |
| 180 | spin_lock_shared_quick(globaldata_t gd, struct spinlock *spin) |
| 181 | { |
| 182 | ++gd->gd_curthread->td_critcount; |
| 183 | cpu_ccfence(); |
| 184 | ++gd->gd_spinlocks; |
| 185 | atomic_add_int(&spin->counta, 1); |
| 186 | if (spin->counta == 1) |
| 187 | atomic_set_int(&spin->counta, SPINLOCK_SHARED); |
| 188 | if ((spin->counta & SPINLOCK_SHARED) == 0) |
| 189 | spin_lock_shared_contested(spin); |
| 190 | #ifdef DEBUG_LOCKS |
| 191 | int i; |
| 192 | for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) { |
| 193 | if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) { |
| 194 | gd->gd_curthread->td_spinlock_stack_id[i] = 1; |
| 195 | gd->gd_curthread->td_spinlock_stack[i] = spin; |
| 196 | gd->gd_curthread->td_spinlock_caller_pc[i] = |
| 197 | __builtin_return_address(0); |
| 198 | break; |
| 199 | } |
| 200 | } |
| 201 | #endif |
| 202 | } |
| 203 | |
| 204 | static __inline void |
| 205 | spin_unlock_shared_quick(globaldata_t gd, struct spinlock *spin) |
| 206 | { |
| 207 | #ifdef DEBUG_LOCKS |
| 208 | int i; |
| 209 | for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) { |
| 210 | if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) && |
| 211 | (gd->gd_curthread->td_spinlock_stack[i] == spin)) { |
| 212 | gd->gd_curthread->td_spinlock_stack_id[i] = 0; |
| 213 | gd->gd_curthread->td_spinlock_stack[i] = NULL; |
| 214 | gd->gd_curthread->td_spinlock_caller_pc[i] = NULL; |
| 215 | break; |
| 216 | } |
| 217 | } |
| 218 | #endif |
| 219 | #ifdef DEBUG_LOCKS |
| 220 | KKASSERT(spin->counta != 0); |
| 221 | #endif |
| 222 | cpu_sfence(); |
| 223 | atomic_add_int(&spin->counta, -1); |
| 224 | |
| 225 | /* |
| 226 | * Make sure SPINLOCK_SHARED is cleared. If another cpu tries to |
| 227 | * get a shared or exclusive lock this loop will break out. We're |
| 228 | * only talking about a very trivial edge case here. |
| 229 | */ |
| 230 | while (spin->counta == SPINLOCK_SHARED) { |
| 231 | if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED, 0)) |
| 232 | break; |
| 233 | } |
| 234 | cpu_sfence(); |
| 235 | #ifdef DEBUG_LOCKS |
| 236 | KKASSERT(gd->gd_spinlocks > 0); |
| 237 | #endif |
| 238 | --gd->gd_spinlocks; |
| 239 | cpu_ccfence(); |
| 240 | --gd->gd_curthread->td_critcount; |
| 241 | } |
| 242 | |
| 243 | static __inline void |
| 244 | spin_lock_shared(struct spinlock *spin) |
| 245 | { |
| 246 | spin_lock_shared_quick(mycpu, spin); |
| 247 | } |
| 248 | |
| 249 | static __inline void |
| 250 | spin_unlock_shared(struct spinlock *spin) |
| 251 | { |
| 252 | spin_unlock_shared_quick(mycpu, spin); |
| 253 | } |
| 254 | |
| 255 | static __inline void |
| 256 | spin_pool_lock(void *chan) |
| 257 | { |
| 258 | _spin_pool_lock(chan); |
| 259 | } |
| 260 | |
| 261 | static __inline void |
| 262 | spin_pool_unlock(void *chan) |
| 263 | { |
| 264 | _spin_pool_unlock(chan); |
| 265 | } |
| 266 | |
| 267 | static __inline void |
| 268 | spin_init(struct spinlock *spin) |
| 269 | { |
| 270 | spin->counta = 0; |
| 271 | spin->countb = 0; |
| 272 | } |
| 273 | |
| 274 | static __inline void |
| 275 | spin_uninit(struct spinlock *spin) |
| 276 | { |
| 277 | /* unused */ |
| 278 | } |
| 279 | |
| 280 | #endif /* _KERNEL */ |
| 281 | #endif /* _SYS_SPINLOCK2_H_ */ |
| 282 | |