Convert most manual accesses to filedesc->fd_files[] into the appropriate
[dragonfly.git] / sys / sys / spinlock2.h
CommitLineData
35a832df
MD
1/*
2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey M. Hsu.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
9d265729 32 * $DragonFly: src/sys/sys/spinlock2.h,v 1.6 2006/05/18 16:25:20 dillon Exp $
35a832df
MD
33 */
34
35#ifndef _SYS_SPINLOCK2_H_
36#define _SYS_SPINLOCK2_H_
37
38#include <sys/thread2.h>
39#include <machine/atomic.h>
40#include <machine/cpufunc.h>
41
42#ifdef SMP
43
9d265729
MD
44#ifdef INVARIANTS
45
69d78e99 46static __inline void
9d265729 47spin_lock_debug(thread_t td, int count)
69d78e99 48{
9d265729 49 td->td_spinlocks += count;
69d78e99
MD
50}
51
9d265729
MD
52#endif
53
54/*
55 * Attempt to obtain a spinlock on behalf of the specified thread. Returns
56 * FALSE on failure, TRUE on success.
57 */
35a832df 58static __inline boolean_t
9d265729 59spin_trylock(thread_t td, struct spinlock *mtx)
35a832df 60{
69d78e99 61 if (atomic_swap_int(&mtx->lock, 1) == 0) {
9d265729
MD
62#ifdef INVARIANTS
63 spin_lock_debug(td, 1);
64#endif
35a832df 65 return (TRUE);
69d78e99 66 }
35a832df
MD
67 return (FALSE);
68}
69
9d265729
MD
70/*
71 * Relase a spinlock obtained via spin_trylock() on behalf of the specified
72 * thread. This function always succeeds. It exists because the other
73 * standard release functions only operate on the current thread.
74 */
75static __inline void
76spin_tryunlock(thread_t td, struct spinlock *mtx)
77{
78#ifdef INVARIANTS
79 if (td->td_spinlocks <= 0)
80 panic("spin_tryunlock: wasn't locked!");
81 spin_lock_debug(td, -1);
82#endif
83 cpu_sfence();
84 mtx->lock = 0; /* non-bus-locked lock release */
85}
86
35a832df
MD
87extern void spin_lock_contested(struct spinlock *mtx);
88
69d78e99
MD
89/*
90 * The quick versions should be used only if you are already
91 * in a critical section or you know the spinlock will never
9d265729
MD
92 * be used by an hard interrupt, IPI, or soft interrupt.
93 *
94 * Obtain a spinlock and return.
69d78e99 95 */
35a832df 96static __inline void
69d78e99 97spin_lock_quick(struct spinlock *mtx)
35a832df 98{
9d265729
MD
99#ifdef INVARIANTS
100 spin_lock_debug(curthread, 1);
101#endif
35a832df
MD
102 if (atomic_swap_int(&mtx->lock, 1) != 0)
103 spin_lock_contested(mtx); /* slow path */
104}
105
9d265729
MD
106/*
107 * Release a spinlock previously obtained by the current thread.
108 */
35a832df 109static __inline void
69d78e99 110spin_unlock_quick(struct spinlock *mtx)
35a832df 111{
9d265729
MD
112#ifdef INVARIANTS
113 if (curthread->td_spinlocks <= 0)
114 panic("spin_unlock_quick: wasn't locked!");
115 spin_lock_debug(curthread, -1);
116#endif
35a832df
MD
117 cpu_sfence();
118 mtx->lock = 0; /* non-bus-locked lock release */
119}
120
9d265729
MD
121/*
122 * Returns whether a spinlock is locked or not. 0 indicates not locked,
123 * non-zero indicates locked (by any thread, not necessarily the current
124 * thread).
125 */
35a832df
MD
126static __inline boolean_t
127spin_is_locked(struct spinlock *mtx)
128{
129 return (mtx->lock);
130}
131
132static __inline void
133spin_init(struct spinlock *mtx)
134{
135 mtx->lock = 0;
136}
137
503a0d52
MD
138static __inline void
139spin_uninit(struct spinlock *mtx)
140{
141 /* unused */
142}
143
35a832df
MD
144#else /* SMP */
145
146static __inline boolean_t
9d265729 147spin_trylock(thread_t td, struct spinlock *mtx)
35a832df
MD
148{
149 return (TRUE);
150}
151
9d265729
MD
152static __inline void
153spin_tryunlock(thread_t td, struct spinlock *mtx)
154{
155}
156
35a832df
MD
157static __inline boolean_t
158spin_is_locked(struct spinlock *mtx)
159{
160 return (FALSE);
161}
162
a2323cfa
YT
163static __inline void spin_lock_quick(struct spinlock *mtx) { }
164static __inline void spin_unlock_quick(struct spinlock *mtx) { }
35a832df
MD
165static __inline void spin_init(struct spinlock *mtx) { }
166
167#endif /* SMP */
168
69d78e99
MD
169/*
170 * The normal spin_lock() API automatically enters and exits a
171 * critical section, preventing deadlocks from interrupt preemption
172 * if the interrupt thread accesses the same spinlock.
173 */
35a832df 174static __inline void
69d78e99 175spin_lock(struct spinlock *mtx)
35a832df 176{
16523a43 177 crit_enter_id("spin");
69d78e99 178 spin_lock_quick(mtx);
35a832df
MD
179}
180
181static __inline void
69d78e99 182spin_unlock(struct spinlock *mtx)
35a832df 183{
69d78e99 184 spin_unlock_quick(mtx);
16523a43 185 crit_exit_id("spin");
35a832df
MD
186}
187
188#endif
189