Commit | Line | Data |
---|---|---|
33b0b87c MD |
1 | /* |
2 | * Copyright (c) 2009 The DragonFly Project. All rights reserved. | |
3 | * | |
4 | * This code is derived from software contributed to The DragonFly Project | |
5 | * by Matthew Dillon <dillon@backplane.com> | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * | |
11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in | |
15 | * the documentation and/or other materials provided with the | |
16 | * distribution. | |
17 | * 3. Neither the name of The DragonFly Project nor the names of its | |
18 | * contributors may be used to endorse or promote products derived | |
19 | * from this software without specific, prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
22 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
24 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |
25 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |
26 | * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, | |
27 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
28 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED | |
29 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |
30 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | |
31 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
32 | * SUCH DAMAGE. | |
33 | */ | |
34 | ||
35 | #ifndef _SYS_MUTEX2_H_ | |
36 | #define _SYS_MUTEX2_H_ | |
37 | ||
38 | #ifndef _SYS_MUTEX_H_ | |
39 | #include <sys/mutex.h> | |
40 | #endif | |
38c3ee9b MD |
41 | #ifndef _SYS_THREAD2_H_ |
42 | #include <sys/thread2.h> | |
43 | #endif | |
44 | #ifndef _SYS_GLOBALDATA_H_ | |
45 | #include <sys/globaldata.h> | |
46 | #endif | |
33b0b87c | 47 | #include <machine/atomic.h> |
33b0b87c MD |
48 | |
49 | /* | |
50 | * Initialize a new mutex, placing it in an unlocked state with no refs. | |
51 | */ | |
52 | static __inline void | |
cabfc9f6 | 53 | mtx_init(mtx_t *mtx, const char *ident) |
33b0b87c MD |
54 | { |
55 | mtx->mtx_lock = 0; | |
5b49787b MD |
56 | mtx->mtx_flags = 0; |
57 | mtx->mtx_owner = NULL; | |
58 | mtx->mtx_exlink = NULL; | |
59 | mtx->mtx_shlink = NULL; | |
60 | mtx->mtx_ident = ident; | |
61 | } | |
62 | ||
63 | static __inline void | |
64 | mtx_init_flags(mtx_t *mtx, const char *ident, uint32_t flags) | |
65 | { | |
66 | mtx->mtx_lock = 0; | |
67 | mtx->mtx_flags = flags; | |
33b0b87c | 68 | mtx->mtx_owner = NULL; |
d66b88f3 MD |
69 | mtx->mtx_exlink = NULL; |
70 | mtx->mtx_shlink = NULL; | |
cabfc9f6 | 71 | mtx->mtx_ident = ident; |
685ebdab MD |
72 | } |
73 | ||
d66b88f3 MD |
74 | /* |
75 | * Initialize a mtx link structure for deeper control over the mutex | |
76 | * operation. | |
77 | */ | |
685ebdab | 78 | static __inline void |
d66b88f3 | 79 | mtx_link_init(mtx_link_t *link) |
685ebdab MD |
80 | { |
81 | link->state = MTX_LINK_IDLE; | |
d66b88f3 MD |
82 | link->callback = NULL; |
83 | link->arg = NULL; | |
84 | } | |
85 | ||
86 | /* | |
87 | * A link structure initialized this way causes mutex operations to not block, | |
88 | * caller must specify a callback. Caller may still abort the mutex via | |
89 | * the link. | |
90 | */ | |
91 | static __inline void | |
92 | mtx_link_init_async(mtx_link_t *link, | |
93 | void (*callback)(mtx_link_t *link, void *arg, int error), | |
94 | void *arg) | |
95 | { | |
96 | link->state = MTX_LINK_IDLE; | |
97 | link->callback = callback; | |
98 | link->arg = arg; | |
33b0b87c MD |
99 | } |
100 | ||
101 | /* | |
102 | * Deinitialize a mutex | |
103 | */ | |
104 | static __inline void | |
d66b88f3 | 105 | mtx_uninit(mtx_t *mtx) |
33b0b87c MD |
106 | { |
107 | /* empty */ | |
108 | } | |
109 | ||
685ebdab MD |
110 | /* |
111 | * Exclusive-lock a mutex, block until acquired or aborted. Recursion | |
112 | * is allowed. | |
113 | * | |
114 | * This version of the function allows the mtx_link to be passed in, thus | |
115 | * giving the caller visibility for the link structure which is required | |
d66b88f3 | 116 | * when calling mtx_abort_ex_link() or when requesting an asynchronous lock. |
685ebdab MD |
117 | * |
118 | * The mutex may be aborted at any time while the passed link structure | |
119 | * is valid. | |
120 | */ | |
121 | static __inline int | |
cabfc9f6 | 122 | mtx_lock_ex_link(mtx_t *mtx, mtx_link_t *link, int flags, int to) |
685ebdab MD |
123 | { |
124 | if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) | |
cabfc9f6 | 125 | return(_mtx_lock_ex_link(mtx, link, flags, to)); |
685ebdab | 126 | mtx->mtx_owner = curthread; |
d66b88f3 MD |
127 | link->state = MTX_LINK_ACQUIRED; |
128 | ||
685ebdab MD |
129 | return(0); |
130 | } | |
131 | ||
57f5048a MD |
132 | /* |
133 | * Short-form exclusive-lock a mutex, block until acquired. Recursion is | |
5b77ed3b | 134 | * allowed. This is equivalent to mtx_lock_ex(mtx, 0, 0). |
57f5048a MD |
135 | */ |
136 | static __inline void | |
d66b88f3 | 137 | mtx_lock(mtx_t *mtx) |
57f5048a MD |
138 | { |
139 | if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) { | |
cabfc9f6 | 140 | _mtx_lock_ex(mtx, 0, 0); |
57f5048a MD |
141 | return; |
142 | } | |
143 | mtx->mtx_owner = curthread; | |
144 | } | |
145 | ||
33b0b87c MD |
146 | /* |
147 | * Exclusive-lock a mutex, block until acquired. Recursion is allowed. | |
7355baa5 MD |
148 | * |
149 | * Returns 0 on success, or the tsleep() return code on failure. | |
150 | * An error can only be returned if PCATCH is specified in the flags. | |
33b0b87c | 151 | */ |
7355baa5 | 152 | static __inline int |
cabfc9f6 | 153 | mtx_lock_ex(mtx_t *mtx, int flags, int to) |
7355baa5 MD |
154 | { |
155 | if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) | |
cabfc9f6 | 156 | return(_mtx_lock_ex(mtx, flags, to)); |
7355baa5 MD |
157 | mtx->mtx_owner = curthread; |
158 | return(0); | |
159 | } | |
160 | ||
161 | static __inline int | |
cabfc9f6 | 162 | mtx_lock_ex_quick(mtx_t *mtx) |
33b0b87c MD |
163 | { |
164 | if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) | |
cabfc9f6 | 165 | return(_mtx_lock_ex_quick(mtx)); |
33b0b87c | 166 | mtx->mtx_owner = curthread; |
7355baa5 | 167 | return(0); |
33b0b87c MD |
168 | } |
169 | ||
d66b88f3 | 170 | static __inline int |
cabfc9f6 | 171 | mtx_lock_sh_link(mtx_t *mtx, mtx_link_t *link, int flags, int to) |
d66b88f3 MD |
172 | { |
173 | if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) | |
cabfc9f6 | 174 | return(_mtx_lock_sh_link(mtx, link, flags, to)); |
d66b88f3 MD |
175 | link->state = MTX_LINK_ACQUIRED; |
176 | return(0); | |
177 | } | |
178 | ||
33b0b87c MD |
179 | /* |
180 | * Share-lock a mutex, block until acquired. Recursion is allowed. | |
7355baa5 MD |
181 | * |
182 | * Returns 0 on success, or the tsleep() return code on failure. | |
183 | * An error can only be returned if PCATCH is specified in the flags. | |
33b0b87c | 184 | */ |
7355baa5 | 185 | static __inline int |
cabfc9f6 | 186 | mtx_lock_sh(mtx_t *mtx, int flags, int to) |
7355baa5 MD |
187 | { |
188 | if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) | |
cabfc9f6 | 189 | return(_mtx_lock_sh(mtx, flags, to)); |
7355baa5 MD |
190 | return(0); |
191 | } | |
192 | ||
193 | static __inline int | |
cabfc9f6 | 194 | mtx_lock_sh_quick(mtx_t *mtx) |
33b0b87c MD |
195 | { |
196 | if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) | |
cabfc9f6 | 197 | return(_mtx_lock_sh_quick(mtx)); |
7355baa5 | 198 | return(0); |
33b0b87c MD |
199 | } |
200 | ||
2efe493b MD |
201 | /* |
202 | * Adds a shared lock reference to a lock already locked shared, | |
203 | * does not block on pending exclusive request. | |
204 | */ | |
205 | static __inline void | |
206 | mtx_lock_sh_again(mtx_t *mtx) | |
207 | { | |
208 | KKASSERT((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 && | |
209 | (mtx->mtx_lock & MTX_MASK) > 0); | |
210 | atomic_add_int(&mtx->mtx_lock, 1); | |
211 | } | |
212 | ||
57f5048a | 213 | /* |
38c3ee9b MD |
214 | * Short-form exclusive spinlock a mutex. Must be paired with |
215 | * mtx_spinunlock(). | |
57f5048a MD |
216 | */ |
217 | static __inline void | |
d66b88f3 | 218 | mtx_spinlock(mtx_t *mtx) |
57f5048a | 219 | { |
38c3ee9b | 220 | globaldata_t gd = mycpu; |
57f5048a | 221 | |
38c3ee9b MD |
222 | /* |
223 | * Predispose a hard critical section | |
224 | */ | |
e8b1691f | 225 | crit_enter_quick(gd->gd_curthread); |
0846e4ce | 226 | ++gd->gd_spinlocks; |
a4d95680 | 227 | cpu_ccfence(); |
38c3ee9b MD |
228 | |
229 | /* | |
230 | * If we cannot get it trivially get it the hard way. | |
231 | * | |
232 | * Note that mtx_owner will be set twice if we fail to get it | |
233 | * trivially, but there's no point conditionalizing it as a | |
234 | * conditional will be slower. | |
235 | */ | |
33b0b87c | 236 | if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) |
38c3ee9b MD |
237 | _mtx_spinlock(mtx); |
238 | mtx->mtx_owner = gd->gd_curthread; | |
33b0b87c MD |
239 | } |
240 | ||
38c3ee9b | 241 | static __inline int |
d66b88f3 | 242 | mtx_spinlock_try(mtx_t *mtx) |
33b0b87c | 243 | { |
38c3ee9b MD |
244 | globaldata_t gd = mycpu; |
245 | ||
246 | /* | |
247 | * Predispose a hard critical section | |
248 | */ | |
e8b1691f | 249 | crit_enter_quick(gd->gd_curthread); |
0846e4ce | 250 | ++gd->gd_spinlocks; |
a4d95680 | 251 | cpu_ccfence(); |
38c3ee9b MD |
252 | |
253 | /* | |
254 | * If we cannot get it trivially call _mtx_spinlock_try(). This | |
255 | * function will clean up the hard critical section if it fails. | |
256 | */ | |
257 | if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) | |
258 | return(_mtx_spinlock_try(mtx)); | |
259 | mtx->mtx_owner = gd->gd_curthread; | |
260 | return (0); | |
33b0b87c MD |
261 | } |
262 | ||
263 | /* | |
38c3ee9b MD |
264 | * Short-form exclusive-lock a mutex, spin until acquired. Recursion is |
265 | * allowed. This form is identical to mtx_spinlock_ex(). | |
266 | * | |
33b0b87c MD |
267 | * Attempt to exclusive-lock a mutex, return 0 on success and |
268 | * EAGAIN on failure. | |
269 | */ | |
270 | static __inline int | |
d66b88f3 | 271 | mtx_lock_ex_try(mtx_t *mtx) |
33b0b87c MD |
272 | { |
273 | if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) | |
274 | return (_mtx_lock_ex_try(mtx)); | |
275 | mtx->mtx_owner = curthread; | |
276 | return (0); | |
277 | } | |
278 | ||
279 | /* | |
280 | * Attempt to share-lock a mutex, return 0 on success and | |
281 | * EAGAIN on failure. | |
282 | */ | |
283 | static __inline int | |
d66b88f3 | 284 | mtx_lock_sh_try(mtx_t *mtx) |
33b0b87c MD |
285 | { |
286 | if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0) | |
287 | return (_mtx_lock_sh_try(mtx)); | |
288 | return (0); | |
289 | } | |
290 | ||
291 | /* | |
292 | * If the lock is held exclusively it must be owned by the caller. If the | |
293 | * lock is already a shared lock this operation is a NOP. A panic will | |
294 | * occur if the lock is not held either shared or exclusive. | |
295 | * | |
296 | * The exclusive count is converted to a shared count. | |
297 | */ | |
298 | static __inline void | |
d66b88f3 | 299 | mtx_downgrade(mtx_t *mtx) |
33b0b87c | 300 | { |
34c80c48 | 301 | globaldata_t gd __debugvar = mycpu; |
b5c75996 MD |
302 | |
303 | KKASSERT((mtx->mtx_lock & MTX_EXCLUSIVE) && | |
304 | mtx->mtx_owner == gd->gd_curthread); | |
33b0b87c | 305 | mtx->mtx_owner = NULL; |
cabfc9f6 | 306 | if (atomic_cmpset_int(&mtx->mtx_lock, MTX_EXCLUSIVE | 1, 1) == 0) |
33b0b87c MD |
307 | _mtx_downgrade(mtx); |
308 | } | |
309 | ||
310 | /* | |
311 | * Upgrade a shared lock to an exclusive lock. The upgrade will fail if | |
312 | * the shared lock has a count other then 1. Optimize the most likely case | |
313 | * but note that a single cmpset can fail due to WANTED races. | |
314 | * | |
315 | * If the lock is held exclusively it must be owned by the caller and | |
316 | * this function will simply return without doing anything. A panic will | |
317 | * occur if the lock is held exclusively by someone other then the caller. | |
318 | * | |
319 | * Returns 0 on success, EDEADLK on failure. | |
320 | */ | |
321 | static __inline int | |
d66b88f3 | 322 | mtx_upgrade_try(mtx_t *mtx) |
33b0b87c | 323 | { |
fb85f6cf MD |
324 | if (atomic_cmpset_int(&mtx->mtx_lock, 1, MTX_EXCLUSIVE | 1)) { |
325 | mtx->mtx_owner = curthread; | |
33b0b87c | 326 | return(0); |
fb85f6cf | 327 | } |
33b0b87c MD |
328 | return (_mtx_upgrade_try(mtx)); |
329 | } | |
330 | ||
331 | /* | |
332 | * Optimized unlock cases. | |
57f5048a MD |
333 | * |
334 | * NOTE: mtx_unlock() handles any type of mutex: exclusive, shared, and | |
335 | * both blocking and spin methods. | |
336 | * | |
337 | * The mtx_unlock_ex/sh() forms are optimized for exclusive or shared | |
338 | * mutexes and produce less code, but it is ok for code to just use | |
339 | * mtx_unlock() and, in fact, if code uses the short-form mtx_lock() | |
340 | * or mtx_spinlock() to lock it should also use mtx_unlock() to unlock. | |
33b0b87c MD |
341 | */ |
342 | static __inline void | |
d66b88f3 | 343 | mtx_unlock(mtx_t *mtx) |
33b0b87c | 344 | { |
34c80c48 | 345 | globaldata_t gd __debugvar = mycpu; |
33b0b87c MD |
346 | u_int lock = mtx->mtx_lock; |
347 | ||
b5c75996 MD |
348 | KKASSERT((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 || |
349 | mtx->mtx_owner == gd->gd_curthread); | |
33b0b87c MD |
350 | if (lock == (MTX_EXCLUSIVE | 1)) { |
351 | mtx->mtx_owner = NULL; | |
352 | if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0) | |
353 | _mtx_unlock(mtx); | |
354 | } else if (lock == 1) { | |
355 | if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0) | |
356 | _mtx_unlock(mtx); | |
357 | } else { | |
358 | _mtx_unlock(mtx); | |
359 | } | |
360 | } | |
361 | ||
362 | static __inline void | |
d66b88f3 | 363 | mtx_unlock_ex(mtx_t *mtx) |
33b0b87c | 364 | { |
34c80c48 | 365 | globaldata_t gd __debugvar = mycpu; |
33b0b87c MD |
366 | u_int lock = mtx->mtx_lock; |
367 | ||
b5c75996 MD |
368 | KKASSERT((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 || |
369 | mtx->mtx_owner == gd->gd_curthread); | |
33b0b87c MD |
370 | if (lock == (MTX_EXCLUSIVE | 1)) { |
371 | mtx->mtx_owner = NULL; | |
372 | if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0) | |
373 | _mtx_unlock(mtx); | |
374 | } else { | |
375 | _mtx_unlock(mtx); | |
376 | } | |
377 | } | |
378 | ||
379 | static __inline void | |
d66b88f3 | 380 | mtx_unlock_sh(mtx_t *mtx) |
33b0b87c MD |
381 | { |
382 | if (atomic_cmpset_int(&mtx->mtx_lock, 1, 0) == 0) | |
383 | _mtx_unlock(mtx); | |
384 | } | |
385 | ||
38c3ee9b MD |
386 | /* |
387 | * NOTE: spinlocks are exclusive-only | |
388 | */ | |
389 | static __inline void | |
d66b88f3 | 390 | mtx_spinunlock(mtx_t *mtx) |
38c3ee9b MD |
391 | { |
392 | globaldata_t gd = mycpu; | |
393 | ||
394 | mtx_unlock(mtx); | |
395 | ||
38c3ee9b | 396 | cpu_ccfence(); |
a4d95680 | 397 | --gd->gd_spinlocks; |
e8b1691f | 398 | crit_exit_quick(gd->gd_curthread); |
38c3ee9b MD |
399 | } |
400 | ||
17386740 MD |
401 | /* |
402 | * Return TRUE (non-zero) if the mutex is locked shared or exclusive by | |
403 | * anyone, including the owner. | |
404 | */ | |
405 | static __inline int | |
d66b88f3 | 406 | mtx_islocked(mtx_t *mtx) |
17386740 MD |
407 | { |
408 | return(mtx->mtx_lock != 0); | |
409 | } | |
410 | ||
411 | /* | |
412 | * Return TRUE (non-zero) if the mutex is locked exclusively by anyone, | |
cabfc9f6 MD |
413 | * including the owner. Returns FALSE (0) if the mutex is unlocked or |
414 | * if it is locked shared by one or more entities. | |
17386740 | 415 | * |
cabfc9f6 MD |
416 | * A caller wishing to check whether a lock is owned exclusively by it |
417 | * should use mtx_owned(). | |
17386740 MD |
418 | */ |
419 | static __inline int | |
d66b88f3 | 420 | mtx_islocked_ex(mtx_t *mtx) |
17386740 MD |
421 | { |
422 | return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0); | |
423 | } | |
424 | ||
425 | /* | |
426 | * Return TRUE (non-zero) if the mutex is not locked. | |
427 | */ | |
428 | static __inline int | |
d66b88f3 | 429 | mtx_notlocked(mtx_t *mtx) |
17386740 MD |
430 | { |
431 | return(mtx->mtx_lock == 0); | |
432 | } | |
433 | ||
434 | /* | |
435 | * Return TRUE (non-zero) if the mutex is not locked exclusively. | |
436 | * The mutex may in an unlocked or shared lock state. | |
437 | */ | |
438 | static __inline int | |
d66b88f3 | 439 | mtx_notlocked_ex(mtx_t *mtx) |
17386740 MD |
440 | { |
441 | return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0); | |
442 | } | |
443 | ||
444 | /* | |
445 | * Return TRUE (non-zero) if the mutex is exclusively locked by | |
446 | * the caller. | |
447 | */ | |
448 | static __inline int | |
d66b88f3 | 449 | mtx_owned(mtx_t *mtx) |
17386740 MD |
450 | { |
451 | return((mtx->mtx_lock & MTX_EXCLUSIVE) && mtx->mtx_owner == curthread); | |
452 | } | |
453 | ||
454 | /* | |
455 | * Return TRUE (non-zero) if the mutex is not exclusively locked by | |
456 | * the caller. | |
457 | */ | |
458 | static __inline int | |
d66b88f3 | 459 | mtx_notowned(mtx_t *mtx) |
17386740 MD |
460 | { |
461 | return((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 || | |
462 | mtx->mtx_owner != curthread); | |
463 | } | |
464 | ||
465 | /* | |
466 | * Return the shared or exclusive lock count. A return value of 0 | |
467 | * indicate that the mutex is not locked. | |
468 | * | |
469 | * NOTE: If the mutex is held exclusively by someone other then the | |
470 | * caller the lock count for the other owner is still returned. | |
471 | */ | |
cabfc9f6 MD |
472 | static __inline |
473 | int | |
d66b88f3 | 474 | mtx_lockrefs(mtx_t *mtx) |
17386740 MD |
475 | { |
476 | return(mtx->mtx_lock & MTX_MASK); | |
477 | } | |
478 | ||
cabfc9f6 MD |
479 | /* |
480 | * Lock must held and will be released on return. Returns state | |
481 | * which can be passed to mtx_lock_temp_restore() to return the | |
482 | * lock to its previous state. | |
483 | */ | |
484 | static __inline | |
485 | mtx_state_t | |
486 | mtx_lock_temp_release(mtx_t *mtx) | |
487 | { | |
488 | mtx_state_t state; | |
489 | ||
490 | state = (mtx->mtx_lock & MTX_EXCLUSIVE); | |
491 | mtx_unlock(mtx); | |
492 | ||
493 | return state; | |
494 | } | |
495 | ||
496 | /* | |
497 | * Restore the previous state of a lock released with | |
498 | * mtx_lock_temp_release() or mtx_lock_upgrade(). | |
499 | */ | |
500 | static __inline | |
501 | void | |
502 | mtx_lock_temp_restore(mtx_t *mtx, mtx_state_t state) | |
503 | { | |
504 | if (state & MTX_EXCLUSIVE) | |
505 | mtx_lock_ex_quick(mtx); | |
506 | else | |
507 | mtx_lock_sh_quick(mtx); | |
508 | } | |
509 | ||
33b0b87c | 510 | #endif |