2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $DragonFly: src/sys/sys/spinlock2.h,v 1.2 2005/11/08 22:40:00 dillon Exp $
35 #ifndef _SYS_SPINLOCK2_H_
36 #define _SYS_SPINLOCK2_H_
38 #include <sys/thread2.h>
39 #include <machine/atomic.h>
40 #include <machine/cpufunc.h>
45 spin_lock_debug(int count)
48 curthread->td_spinlocks += count;
52 static __inline boolean_t
53 spin_trylock(struct spinlock *mtx)
55 if (atomic_swap_int(&mtx->lock, 1) == 0) {
62 extern void spin_lock_contested(struct spinlock *mtx);
65 * The quick versions should be used only if you are already
66 * in a critical section or you know the spinlock will never
67 * be used by an hard interrupt or soft interrupt.
70 spin_lock_quick(struct spinlock *mtx)
73 if (atomic_swap_int(&mtx->lock, 1) != 0)
74 spin_lock_contested(mtx); /* slow path */
78 spin_unlock_quick(struct spinlock *mtx)
82 mtx->lock = 0; /* non-bus-locked lock release */
85 static __inline boolean_t
86 spin_is_locked(struct spinlock *mtx)
92 spin_init(struct spinlock *mtx)
99 static __inline boolean_t
100 spin_trylock(struct spinlock *mtx)
105 static __inline boolean_t
106 spin_is_locked(struct spinlock *mtx)
111 static __inline void spin_lock(struct spinlock *mtx) { }
112 static __inline void spin_unlock(struct spinlock *mtx) { }
113 static __inline void spin_init(struct spinlock *mtx) { }
118 * The normal spin_lock() API automatically enters and exits a
119 * critical section, preventing deadlocks from interrupt preemption
120 * if the interrupt thread accesses the same spinlock.
123 spin_lock(struct spinlock *mtx)
126 spin_lock_quick(mtx);
130 spin_unlock(struct spinlock *mtx)
132 spin_unlock_quick(mtx);