3 // Copyright (C) 2008, 2009
4 // Free Software Foundation, Inc.
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
26 /** @file bits/atomic_2.h
27 * This is an internal header file, included by other library headers.
28 * You should not attempt to use it directly.
31 #ifndef _GLIBCXX_ATOMIC_2_H
32 #define _GLIBCXX_ATOMIC_2_H 1
34 #pragma GCC system_header
36 // _GLIBCXX_BEGIN_NAMESPACE(std)
38 // 2 == __atomic2 == Always lock-free
40 // _GLIBCXX_ATOMIC_BUILTINS_1
41 // _GLIBCXX_ATOMIC_BUILTINS_2
42 // _GLIBCXX_ATOMIC_BUILTINS_4
43 // _GLIBCXX_ATOMIC_BUILTINS_8
47 struct atomic_flag : public __atomic_flag_base
49 atomic_flag() = default;
50 ~atomic_flag() = default;
51 atomic_flag(const atomic_flag&) = delete;
52 atomic_flag& operator=(const atomic_flag&) = delete;
54 // Conversion to ATOMIC_FLAG_INIT.
55 atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
58 test_and_set(memory_order __m = memory_order_seq_cst) volatile
60 // Redundant synchronize if built-in for lock is a full barrier.
61 if (__m != memory_order_acquire && __m != memory_order_acq_rel)
63 return __sync_lock_test_and_set(&_M_i, 1);
67 clear(memory_order __m = memory_order_seq_cst) volatile
69 __glibcxx_assert(__m != memory_order_consume);
70 __glibcxx_assert(__m != memory_order_acquire);
71 __glibcxx_assert(__m != memory_order_acq_rel);
73 __sync_lock_release(&_M_i);
74 if (__m != memory_order_acquire && __m != memory_order_acq_rel)
80 /// 29.4.2, address types
87 atomic_address() = default;
88 ~atomic_address() = default;
89 atomic_address(const atomic_address&) = delete;
90 atomic_address& operator=(const atomic_address&) = delete;
92 atomic_address(void* __v) { _M_i = __v; }
95 is_lock_free() const volatile
99 store(void* __v, memory_order __m = memory_order_seq_cst) volatile
101 __glibcxx_assert(__m != memory_order_acquire);
102 __glibcxx_assert(__m != memory_order_acq_rel);
103 __glibcxx_assert(__m != memory_order_consume);
105 if (__m == memory_order_relaxed)
109 // write_mem_barrier();
111 if (__m == memory_order_seq_cst)
112 __sync_synchronize();
117 load(memory_order __m = memory_order_seq_cst) const volatile
119 __glibcxx_assert(__m != memory_order_release);
120 __glibcxx_assert(__m != memory_order_acq_rel);
122 __sync_synchronize();
124 __sync_synchronize();
129 exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile
131 // XXX built-in assumes memory_order_acquire.
132 return __sync_lock_test_and_set(&_M_i, __v);
136 compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
137 memory_order __m2) volatile
138 { return compare_exchange_strong(__v1, __v2, __m1, __m2); }
141 compare_exchange_weak(void*& __v1, void* __v2,
142 memory_order __m = memory_order_seq_cst) volatile
144 return compare_exchange_weak(__v1, __v2, __m,
145 __calculate_memory_order(__m));
149 compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
150 memory_order __m2) volatile
152 __glibcxx_assert(__m2 != memory_order_release);
153 __glibcxx_assert(__m2 != memory_order_acq_rel);
154 __glibcxx_assert(__m2 <= __m1);
157 void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
159 // Assume extra stores (of same value) allowed in true case.
161 return __v1o == __v1n;
165 compare_exchange_strong(void*& __v1, void* __v2,
166 memory_order __m = memory_order_seq_cst) volatile
168 return compare_exchange_strong(__v1, __v2, __m,
169 __calculate_memory_order(__m));
173 fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
174 { return __sync_fetch_and_add(&_M_i, __d); }
177 fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
178 { return __sync_fetch_and_sub(&_M_i, __d); }
180 operator void*() const volatile
184 operator=(void* __v) // XXX volatile
191 operator+=(ptrdiff_t __d) volatile
192 { return __sync_add_and_fetch(&_M_i, __d); }
195 operator-=(ptrdiff_t __d) volatile
196 { return __sync_sub_and_fetch(&_M_i, __d); }
199 // 29.3.1 atomic integral types
200 // For each of the integral types, define atomic_[integral type] struct
204 // atomic_schar signed char
205 // atomic_uchar unsigned char
206 // atomic_short short
207 // atomic_ushort unsigned short
209 // atomic_uint unsigned int
211 // atomic_ulong unsigned long
212 // atomic_llong long long
213 // atomic_ullong unsigned long long
214 // atomic_char16_t char16_t
215 // atomic_char32_t char32_t
216 // atomic_wchar_t wchar_t
219 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or 8 bytes,
220 // since that is what GCC built-in functions for atomic memory access work on.
221 template<typename _ITp>
225 typedef _ITp __integral_type;
227 __integral_type _M_i;
230 __atomic_base() = default;
231 ~__atomic_base() = default;
232 __atomic_base(const __atomic_base&) = delete;
233 __atomic_base& operator=(const __atomic_base&) = delete;
235 // Requires __integral_type convertible to _M_base._M_i.
236 __atomic_base(__integral_type __i) { _M_i = __i; }
238 operator __integral_type() const volatile
242 operator=(__integral_type __i) // XXX volatile
249 operator++(int) volatile
250 { return fetch_add(1); }
253 operator--(int) volatile
254 { return fetch_sub(1); }
257 operator++() volatile
258 { return __sync_add_and_fetch(&_M_i, 1); }
261 operator--() volatile
262 { return __sync_sub_and_fetch(&_M_i, 1); }
265 operator+=(__integral_type __i) volatile
266 { return __sync_add_and_fetch(&_M_i, __i); }
269 operator-=(__integral_type __i) volatile
270 { return __sync_sub_and_fetch(&_M_i, __i); }
273 operator&=(__integral_type __i) volatile
274 { return __sync_and_and_fetch(&_M_i, __i); }
277 operator|=(__integral_type __i) volatile
278 { return __sync_or_and_fetch(&_M_i, __i); }
281 operator^=(__integral_type __i) volatile
282 { return __sync_xor_and_fetch(&_M_i, __i); }
285 is_lock_free() const volatile
289 store(__integral_type __i,
290 memory_order __m = memory_order_seq_cst) volatile
292 __glibcxx_assert(__m != memory_order_acquire);
293 __glibcxx_assert(__m != memory_order_acq_rel);
294 __glibcxx_assert(__m != memory_order_consume);
296 if (__m == memory_order_relaxed)
300 // write_mem_barrier();
302 if (__m == memory_order_seq_cst)
303 __sync_synchronize();
308 load(memory_order __m = memory_order_seq_cst) const volatile
310 __glibcxx_assert(__m != memory_order_release);
311 __glibcxx_assert(__m != memory_order_acq_rel);
313 __sync_synchronize();
314 __integral_type __ret = _M_i;
315 __sync_synchronize();
320 exchange(__integral_type __i,
321 memory_order __m = memory_order_seq_cst) volatile
323 // XXX built-in assumes memory_order_acquire.
324 return __sync_lock_test_and_set(&_M_i, __i);
328 compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
329 memory_order __m1, memory_order __m2) volatile
330 { return compare_exchange_strong(__i1, __i2, __m1, __m2); }
333 compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
334 memory_order __m = memory_order_seq_cst) volatile
336 return compare_exchange_weak(__i1, __i2, __m,
337 __calculate_memory_order(__m));
341 compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
342 memory_order __m1, memory_order __m2) volatile
344 __glibcxx_assert(__m2 != memory_order_release);
345 __glibcxx_assert(__m2 != memory_order_acq_rel);
346 __glibcxx_assert(__m2 <= __m1);
348 __integral_type __i1o = __i1;
349 __integral_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
351 // Assume extra stores (of same value) allowed in true case.
353 return __i1o == __i1n;
357 compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
358 memory_order __m = memory_order_seq_cst) volatile
360 return compare_exchange_strong(__i1, __i2, __m,
361 __calculate_memory_order(__m));
365 fetch_add(__integral_type __i,
366 memory_order __m = memory_order_seq_cst) volatile
367 { return __sync_fetch_and_add(&_M_i, __i); }
370 fetch_sub(__integral_type __i,
371 memory_order __m = memory_order_seq_cst) volatile
372 { return __sync_fetch_and_sub(&_M_i, __i); }
375 fetch_and(__integral_type __i,
376 memory_order __m = memory_order_seq_cst) volatile
377 { return __sync_fetch_and_and(&_M_i, __i); }
380 fetch_or(__integral_type __i,
381 memory_order __m = memory_order_seq_cst) volatile
382 { return __sync_fetch_and_or(&_M_i, __i); }
385 fetch_xor(__integral_type __i,
386 memory_order __m = memory_order_seq_cst) volatile
387 { return __sync_fetch_and_xor(&_M_i, __i); }
392 // NB: No operators or fetch-operations for this type.
396 __atomic_base<bool> _M_base;
399 atomic_bool() = default;
400 ~atomic_bool() = default;
401 atomic_bool(const atomic_bool&) = delete;
402 atomic_bool& operator=(const atomic_bool&) = delete;
404 atomic_bool(bool __i) : _M_base(__i) { }
407 operator=(bool __i) // XXX volatile
408 { return _M_base.operator=(__i); }
410 operator bool() const volatile
411 { return _M_base.load(); }
414 is_lock_free() const volatile
415 { return _M_base.is_lock_free(); }
418 store(bool __i, memory_order __m = memory_order_seq_cst) volatile
419 { _M_base.store(__i, __m); }
422 load(memory_order __m = memory_order_seq_cst) const volatile
423 { return _M_base.load(__m); }
426 exchange(bool __i, memory_order __m = memory_order_seq_cst) volatile
427 { return _M_base.exchange(__i, __m); }
430 compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
431 memory_order __m2) volatile
432 { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
435 compare_exchange_weak(bool& __i1, bool __i2,
436 memory_order __m = memory_order_seq_cst) volatile
437 { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
440 compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
441 memory_order __m2) volatile
442 { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
446 compare_exchange_strong(bool& __i1, bool __i2,
447 memory_order __m = memory_order_seq_cst) volatile
448 { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
450 } // namespace __atomic2
452 // _GLIBCXX_END_NAMESPACE