2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6 * Copyright (c) 2013-2020 François Tigeot <ftigeot@wolfpond.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice unmodified, this list of conditions, and the following
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef _LINUX_ATOMIC_H_
32 #define _LINUX_ATOMIC_H_
34 #include <asm/atomic.h>
35 #include <asm/barrier.h>
38 volatile u_int counter;
41 #define atomic_add(i, v) atomic_add_return((i), (v))
42 #define atomic_sub(i, v) atomic_sub_return((i), (v))
43 #define atomic_inc_return(v) atomic_add_return(1, (v))
44 #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
45 #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
46 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
47 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
48 #define atomic_dec_return(v) atomic_sub_return(1, (v))
50 #define atomic64_add(i, v) atomic_add_return_long((i), (v))
51 #define atomic64_sub(i, v) atomic_sub_return_long((i), (v))
53 #define atomic_xchg(p, v) atomic_swap_int(&((p)->counter), v)
54 #define atomic64_xchg(p, v) atomic_swap_long(&((p)->counter), v)
56 #define atomic_cmpset(p, o, n) atomic_cmpset_32(&((p)->counter), o, n)
58 #define atomic64_cmpxchg(p, o, n) \
59 (atomic_cmpset_long((volatile uint64_t *)(p),(o),(n)) ? (o) : (0))
62 atomic_add_return(int i, atomic_t *v)
64 return i + atomic_fetchadd_int(&v->counter, i);
68 atomic_add_return_long(int64_t i, atomic64_t *v)
70 return i + atomic_fetchadd_long(&v->counter, i);
74 atomic_sub_return(int i, atomic_t *v)
76 return atomic_fetchadd_int(&v->counter, -i) - i;
80 atomic_sub_return_long(int64_t i, atomic64_t *v)
82 return atomic_fetchadd_long(&v->counter, -i) - i;
86 atomic_set(atomic_t *v, int i)
88 atomic_store_rel_int(&v->counter, i);
92 atomic64_set(atomic64_t *v, long i)
94 atomic_store_rel_long(&v->counter, i);
98 atomic_read(atomic_t *v)
100 return atomic_load_acq_int(&v->counter);
103 static inline int64_t
104 atomic64_read(atomic64_t *v)
106 return atomic_load_acq_long(&v->counter);
110 atomic_inc(atomic_t *v)
112 return atomic_fetchadd_int(&v->counter, 1) + 1;
116 atomic_dec(atomic_t *v)
118 return atomic_fetchadd_int(&v->counter, -1) - 1;
121 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
123 return atomic_cmpxchg_int(&v->counter, old, new);
126 static inline int atomic_add_unless(atomic_t *v, int add, int unless)
131 if (unlikely(c == unless))
133 old = atomic_cmpxchg_int(&v->counter, c, c + add);
134 if (likely(old == c))
141 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
143 /* atomic_clear_mask: atomically clears a variable from the bit set in mask */
144 #define atomic_clear_mask(mask, addr) \
145 /* atomic *addr &= ~mask; */ \
146 __asm __volatile("lock andl %0, %1" \
148 : "r" (~mask), "m" (*addr) \
151 #define smp_mb__before_atomic() cpu_ccfence()
152 #define smp_mb__after_atomic() cpu_ccfence()
155 atomic_andnot(int i, atomic_t *v)
157 /* v->counter = v->counter & ~i; */
158 atomic_clear_int(&v->counter, i);
161 #define cmpxchg(ptr, old, new) ({ \
162 __typeof(*(ptr)) __ret; \
164 CTASSERT(sizeof(__ret) == 1 || sizeof(__ret) == 2 || \
165 sizeof(__ret) == 4 || sizeof(__ret) == 8); \
168 switch (sizeof(__ret)) { \
170 while (!atomic_fcmpset_8((volatile int8_t *)(ptr), \
171 (int8_t *)&__ret, (new)) && __ret == (old)) \
175 while (!atomic_fcmpset_16((volatile int16_t *)(ptr), \
176 (int16_t *)&__ret, (new)) && __ret == (old)) \
180 while (!atomic_fcmpset_32((volatile int32_t *)(ptr), \
181 (int32_t *)&__ret, (new)) && __ret == (old)) \
185 while (!atomic_fcmpset_64((volatile int64_t *)(ptr), \
186 (int64_t *)&__ret, (new)) && __ret == (old)) \
193 #define cmpxchg_relaxed(...) cmpxchg(__VA_ARGS__)
195 #define atomic64_inc_return(p) __sync_add_and_fetch_8(p, 1)
198 atomic_set_release(atomic_t *v, int i)
200 atomic_store_rel_int(&v->counter, i);
203 /* Returns the old value of v->counter */
205 atomic_fetch_xor(int i, atomic_t *v)
207 int val = READ_ONCE(v->counter);
209 while (atomic_cmpxchg_int(&v->counter, val, val ^ i) == 0) {
215 #include <asm-generic/atomic-long.h>
217 #endif /* _LINUX_ATOMIC_H_ */