Remove old 802.11 sources code, they have been repo copied into
[dragonfly.git] / sys / sys / spinlock2.h
CommitLineData
35a832df
MD
1/*
2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey M. Hsu.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
503a0d52 32 * $DragonFly: src/sys/sys/spinlock2.h,v 1.5 2006/04/22 22:19:19 dillon Exp $
35a832df
MD
33 */
34
35#ifndef _SYS_SPINLOCK2_H_
36#define _SYS_SPINLOCK2_H_
37
38#include <sys/thread2.h>
39#include <machine/atomic.h>
40#include <machine/cpufunc.h>
41
42#ifdef SMP
43
69d78e99
MD
44static __inline void
45spin_lock_debug(int count)
46{
47#ifdef INVARIANTS
48 curthread->td_spinlocks += count;
49#endif
50}
51
35a832df
MD
52static __inline boolean_t
53spin_trylock(struct spinlock *mtx)
54{
69d78e99
MD
55 if (atomic_swap_int(&mtx->lock, 1) == 0) {
56 spin_lock_debug(1);
35a832df 57 return (TRUE);
69d78e99 58 }
35a832df
MD
59 return (FALSE);
60}
61
62extern void spin_lock_contested(struct spinlock *mtx);
63
69d78e99
MD
64/*
65 * The quick versions should be used only if you are already
66 * in a critical section or you know the spinlock will never
67 * be used by an hard interrupt or soft interrupt.
68 */
35a832df 69static __inline void
69d78e99 70spin_lock_quick(struct spinlock *mtx)
35a832df 71{
69d78e99 72 spin_lock_debug(1);
35a832df
MD
73 if (atomic_swap_int(&mtx->lock, 1) != 0)
74 spin_lock_contested(mtx); /* slow path */
75}
76
77static __inline void
69d78e99 78spin_unlock_quick(struct spinlock *mtx)
35a832df 79{
69d78e99 80 spin_lock_debug(-1);
35a832df
MD
81 cpu_sfence();
82 mtx->lock = 0; /* non-bus-locked lock release */
83}
84
85static __inline boolean_t
86spin_is_locked(struct spinlock *mtx)
87{
88 return (mtx->lock);
89}
90
91static __inline void
92spin_init(struct spinlock *mtx)
93{
94 mtx->lock = 0;
95}
96
503a0d52
MD
97static __inline void
98spin_uninit(struct spinlock *mtx)
99{
100 /* unused */
101}
102
35a832df
MD
103#else /* SMP */
104
105static __inline boolean_t
106spin_trylock(struct spinlock *mtx)
107{
108 return (TRUE);
109}
110
111static __inline boolean_t
112spin_is_locked(struct spinlock *mtx)
113{
114 return (FALSE);
115}
116
a2323cfa
YT
117static __inline void spin_lock_quick(struct spinlock *mtx) { }
118static __inline void spin_unlock_quick(struct spinlock *mtx) { }
35a832df
MD
119static __inline void spin_init(struct spinlock *mtx) { }
120
121#endif /* SMP */
122
69d78e99
MD
123/*
124 * The normal spin_lock() API automatically enters and exits a
125 * critical section, preventing deadlocks from interrupt preemption
126 * if the interrupt thread accesses the same spinlock.
127 */
35a832df 128static __inline void
69d78e99 129spin_lock(struct spinlock *mtx)
35a832df 130{
16523a43 131 crit_enter_id("spin");
69d78e99 132 spin_lock_quick(mtx);
35a832df
MD
133}
134
135static __inline void
69d78e99 136spin_unlock(struct spinlock *mtx)
35a832df 137{
69d78e99 138 spin_unlock_quick(mtx);
16523a43 139 crit_exit_id("spin");
35a832df
MD
140}
141
142#endif
143