Merge branch 'vendor/GCC44'
[dragonfly.git] / contrib / gcc-4.4 / libstdc++-v3 / include / bits / atomic_0.h
CommitLineData
c251ad9e
SS
1// -*- C++ -*- header.
2
3// Copyright (C) 2008, 2009
4// Free Software Foundation, Inc.
5//
6// This file is part of the GNU ISO C++ Library. This library is free
7// software; you can redistribute it and/or modify it under the
8// terms of the GNU General Public License as published by the
9// Free Software Foundation; either version 3, or (at your option)
10// any later version.
11
12// This library is distributed in the hope that it will be useful,
13// but WITHOUT ANY WARRANTY; without even the implied warranty of
14// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15// GNU General Public License for more details.
16
17// Under Section 7 of GPL version 3, you are granted additional
18// permissions described in the GCC Runtime Library Exception, version
19// 3.1, as published by the Free Software Foundation.
20
21// You should have received a copy of the GNU General Public License and
22// a copy of the GCC Runtime Library Exception along with this program;
23// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24// <http://www.gnu.org/licenses/>.
25
26/** @file bits/atomic_0.h
27 * This is an internal header file, included by other library headers.
28 * You should not attempt to use it directly.
29 */
30
31#ifndef _GLIBCXX_ATOMIC_0_H
32#define _GLIBCXX_ATOMIC_0_H 1
33
34#pragma GCC system_header
35
36// _GLIBCXX_BEGIN_NAMESPACE(std)
37
38 // 0 == __atomic0 == Never lock-free
39namespace __atomic0
40{
41 struct atomic_flag;
42
43 // Implementation specific defines.
44#define _ATOMIC_LOAD_(__a, __x) \
45 ({ volatile __typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_; \
46 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
47 __atomic_flag_wait_explicit(__g, __x); \
48 __typeof__ _ATOMIC_MEMBER_ __r = *__p; \
49 atomic_flag_clear_explicit(__g, __x); \
50 __r; })
51
52#define _ATOMIC_STORE_(__a, __m, __x) \
53 ({ volatile __typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_; \
54 __typeof__(__m) __v = (__m); \
55 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
56 __atomic_flag_wait_explicit(__g, __x); \
57 *__p = __v; \
58 atomic_flag_clear_explicit(__g, __x); \
59 __v; })
60
61#define _ATOMIC_MODIFY_(__a, __o, __m, __x) \
62 ({ volatile __typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_; \
63 __typeof__(__m) __v = (__m); \
64 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
65 __atomic_flag_wait_explicit(__g, __x); \
66 __typeof__ _ATOMIC_MEMBER_ __r = *__p; \
67 *__p __o __v; \
68 atomic_flag_clear_explicit(__g, __x); \
69 __r; })
70
71#define _ATOMIC_CMPEXCHNG_(__a, __e, __m, __x) \
72 ({ volatile __typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_; \
73 __typeof__(__e) __q = (__e); \
74 __typeof__(__m) __v = (__m); \
75 bool __r; \
76 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
77 __atomic_flag_wait_explicit(__g, __x); \
78 __typeof__ _ATOMIC_MEMBER_ __t__ = *__p; \
79 if (__t__ == *__q) { *__p = __v; __r = true; } \
80 else { *__q = __t__; __r = false; } \
81 atomic_flag_clear_explicit(__g, __x); \
82 __r; })
83
84 /// atomic_flag
4b1e227d 85 struct atomic_flag : public __atomic_flag_base
c251ad9e
SS
86 {
87 atomic_flag() = default;
88 ~atomic_flag() = default;
89 atomic_flag(const atomic_flag&) = delete;
90 atomic_flag& operator=(const atomic_flag&) = delete;
91
4b1e227d
SW
92 // Conversion to ATOMIC_FLAG_INIT.
93 atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
c251ad9e
SS
94
95 bool
96 test_and_set(memory_order __m = memory_order_seq_cst) volatile;
97
98 void
99 clear(memory_order __m = memory_order_seq_cst) volatile;
100 };
101
102 /// 29.4.2, address types
103 struct atomic_address
104 {
105 private:
106 void* _M_i;
107
108 public:
109 atomic_address() = default;
110 ~atomic_address() = default;
111 atomic_address(const atomic_address&) = delete;
112 atomic_address& operator=(const atomic_address&) = delete;
113
114 atomic_address(void* __v) { _M_i = __v; }
115
116 bool
117 is_lock_free() const volatile
118 { return false; }
119
120 void
121 store(void* __v, memory_order __m = memory_order_seq_cst) volatile
122 {
123 __glibcxx_assert(__m != memory_order_acquire);
124 __glibcxx_assert(__m != memory_order_acq_rel);
125 __glibcxx_assert(__m != memory_order_consume);
126 _ATOMIC_STORE_(this, __v, __m);
127 }
128
129 void*
130 load(memory_order __m = memory_order_seq_cst) const volatile
131 {
132 __glibcxx_assert(__m != memory_order_release);
133 __glibcxx_assert(__m != memory_order_acq_rel);
134 return _ATOMIC_LOAD_(this, __m);
135 }
136
137 void*
138 exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile
139 { return _ATOMIC_MODIFY_(this, =, __v, __m); }
140
141 bool
142 compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
143 memory_order __m2) volatile
144 {
145 __glibcxx_assert(__m2 != memory_order_release);
146 __glibcxx_assert(__m2 != memory_order_acq_rel);
147 __glibcxx_assert(__m2 <= __m1);
148 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
149 }
150
151 bool
152 compare_exchange_weak(void*& __v1, void* __v2,
153 memory_order __m = memory_order_seq_cst) volatile
154 {
155 return compare_exchange_weak(__v1, __v2, __m,
156 __calculate_memory_order(__m));
157 }
158
159 bool
160 compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
161 memory_order __m2) volatile
162 {
163 __glibcxx_assert(__m2 != memory_order_release);
164 __glibcxx_assert(__m2 != memory_order_acq_rel);
165 __glibcxx_assert(__m2 <= __m1);
166 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
167 }
168
169 bool
170 compare_exchange_strong(void*& __v1, void* __v2,
171 memory_order __m = memory_order_seq_cst) volatile
172 {
173 return compare_exchange_strong(__v1, __v2, __m,
174 __calculate_memory_order(__m));
175 }
176
177 void*
178 fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
179 {
180 void* volatile* __p = &(_M_i);
181 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p);
182 __atomic_flag_wait_explicit(__g, __m);
183 void* __r = *__p;
184 *__p = (void*)((char*)(*__p) + __d);
185 atomic_flag_clear_explicit(__g, __m);
186 return __r;
187 }
188
189 void*
190 fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
191 {
192 void* volatile* __p = &(_M_i);
193 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p);
194 __atomic_flag_wait_explicit(__g, __m);
195 void* __r = *__p;
196 *__p = (void*)((char*)(*__p) - __d);
197 atomic_flag_clear_explicit(__g, __m);
198 return __r;
199 }
200
201 operator void*() const volatile
202 { return load(); }
203
204 void*
205 operator=(void* __v) // XXX volatile
206 {
207 store(__v);
208 return __v;
209 }
210
211 void*
212 operator+=(ptrdiff_t __d) volatile
213 { return fetch_add(__d) + __d; }
214
215 void*
216 operator-=(ptrdiff_t __d) volatile
217 { return fetch_sub(__d) - __d; }
218 };
219
220
221 // 29.3.1 atomic integral types
222 // For each of the integral types, define atomic_[integral type] struct
223 //
224 // atomic_bool bool
225 // atomic_char char
226 // atomic_schar signed char
227 // atomic_uchar unsigned char
228 // atomic_short short
229 // atomic_ushort unsigned short
230 // atomic_int int
231 // atomic_uint unsigned int
232 // atomic_long long
233 // atomic_ulong unsigned long
234 // atomic_llong long long
235 // atomic_ullong unsigned long long
236 // atomic_char16_t char16_t
237 // atomic_char32_t char32_t
238 // atomic_wchar_t wchar_t
239
240 // Base type.
241 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or 8 bytes,
242 // since that is what GCC built-in functions for atomic memory access work on.
243 template<typename _ITp>
244 struct __atomic_base
245 {
246 private:
247 typedef _ITp __integral_type;
248
249 __integral_type _M_i;
250
251 public:
252 __atomic_base() = default;
253 ~__atomic_base() = default;
254 __atomic_base(const __atomic_base&) = delete;
255 __atomic_base& operator=(const __atomic_base&) = delete;
256
257 // Requires __integral_type convertible to _M_base._M_i.
258 __atomic_base(__integral_type __i) { _M_i = __i; }
259
260 operator __integral_type() const volatile
261 { return load(); }
262
263 __integral_type
264 operator=(__integral_type __i) // XXX volatile
265 {
266 store(__i);
267 return __i;
268 }
269
270 __integral_type
271 operator++(int) volatile
272 { return fetch_add(1); }
273
274 __integral_type
275 operator--(int) volatile
276 { return fetch_sub(1); }
277
278 __integral_type
279 operator++() volatile
280 { return fetch_add(1) + 1; }
281
282 __integral_type
283 operator--() volatile
284 { return fetch_sub(1) - 1; }
285
286 __integral_type
287 operator+=(__integral_type __i) volatile
288 { return fetch_add(__i) + __i; }
289
290 __integral_type
291 operator-=(__integral_type __i) volatile
292 { return fetch_sub(__i) - __i; }
293
294 __integral_type
295 operator&=(__integral_type __i) volatile
296 { return fetch_and(__i) & __i; }
297
298 __integral_type
299 operator|=(__integral_type __i) volatile
300 { return fetch_or(__i) | __i; }
301
302 __integral_type
303 operator^=(__integral_type __i) volatile
304 { return fetch_xor(__i) ^ __i; }
305
306 bool
307 is_lock_free() const volatile
308 { return false; }
309
310 void
311 store(__integral_type __i,
312 memory_order __m = memory_order_seq_cst) volatile
313 {
314 __glibcxx_assert(__m != memory_order_acquire);
315 __glibcxx_assert(__m != memory_order_acq_rel);
316 __glibcxx_assert(__m != memory_order_consume);
317 _ATOMIC_STORE_(this, __i, __m);
318 }
319
320 __integral_type
321 load(memory_order __m = memory_order_seq_cst) const volatile
322 {
323 __glibcxx_assert(__m != memory_order_release);
324 __glibcxx_assert(__m != memory_order_acq_rel);
325 return _ATOMIC_LOAD_(this, __m);
326 }
327
328 __integral_type
329 exchange(__integral_type __i,
330 memory_order __m = memory_order_seq_cst) volatile
331 { return _ATOMIC_MODIFY_(this, =, __i, __m); }
332
333 bool
334 compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
335 memory_order __m1, memory_order __m2) volatile
336 {
337 __glibcxx_assert(__m2 != memory_order_release);
338 __glibcxx_assert(__m2 != memory_order_acq_rel);
339 __glibcxx_assert(__m2 <= __m1);
340 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
341 }
342
343 bool
344 compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
345 memory_order __m = memory_order_seq_cst) volatile
346 {
347 return compare_exchange_weak(__i1, __i2, __m,
348 __calculate_memory_order(__m));
349 }
350
351 bool
352 compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
353 memory_order __m1, memory_order __m2) volatile
354 {
355 __glibcxx_assert(__m2 != memory_order_release);
356 __glibcxx_assert(__m2 != memory_order_acq_rel);
357 __glibcxx_assert(__m2 <= __m1);
358 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
359 }
360
361 bool
362 compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
363 memory_order __m = memory_order_seq_cst) volatile
364 {
365 return compare_exchange_strong(__i1, __i2, __m,
366 __calculate_memory_order(__m));
367 }
368
369 __integral_type
370 fetch_add(__integral_type __i,
371 memory_order __m = memory_order_seq_cst) volatile
372 { return _ATOMIC_MODIFY_(this, +=, __i, __m); }
373
374 __integral_type
375 fetch_sub(__integral_type __i,
376 memory_order __m = memory_order_seq_cst) volatile
377 { return _ATOMIC_MODIFY_(this, -=, __i, __m); }
378
379 __integral_type
380 fetch_and(__integral_type __i,
381 memory_order __m = memory_order_seq_cst) volatile
382 { return _ATOMIC_MODIFY_(this, &=, __i, __m); }
383
384 __integral_type
385 fetch_or(__integral_type __i,
386 memory_order __m = memory_order_seq_cst) volatile
387 { return _ATOMIC_MODIFY_(this, |=, __i, __m); }
388
389 __integral_type
390 fetch_xor(__integral_type __i,
391 memory_order __m = memory_order_seq_cst) volatile
392 { return _ATOMIC_MODIFY_(this, ^=, __i, __m); }
393 };
394
395
396 /// atomic_bool
397 // NB: No operators or fetch-operations for this type.
398 struct atomic_bool
399 {
400 private:
401 __atomic_base<bool> _M_base;
402
403 public:
404 atomic_bool() = default;
405 ~atomic_bool() = default;
406 atomic_bool(const atomic_bool&) = delete;
407 atomic_bool& operator=(const atomic_bool&) = delete;
408
409 atomic_bool(bool __i) : _M_base(__i) { }
410
411 bool
412 operator=(bool __i) // XXX volatile
413 { return _M_base.operator=(__i); }
414
415 operator bool() const volatile
416 { return _M_base.load(); }
417
418 bool
419 is_lock_free() const volatile
420 { return _M_base.is_lock_free(); }
421
422 void
423 store(bool __i, memory_order __m = memory_order_seq_cst) volatile
424 { _M_base.store(__i, __m); }
425
426 bool
427 load(memory_order __m = memory_order_seq_cst) const volatile
428 { return _M_base.load(__m); }
429
430 bool
431 exchange(bool __i, memory_order __m = memory_order_seq_cst) volatile
432 { return _M_base.exchange(__i, __m); }
433
434 bool
435 compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
436 memory_order __m2) volatile
437 { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
438
439 bool
440 compare_exchange_weak(bool& __i1, bool __i2,
441 memory_order __m = memory_order_seq_cst) volatile
442 { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
443
444 bool
445 compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
446 memory_order __m2) volatile
447 { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
448
449
450 bool
451 compare_exchange_strong(bool& __i1, bool __i2,
452 memory_order __m = memory_order_seq_cst) volatile
453 { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
454 };
455
456#undef _ATOMIC_LOAD_
457#undef _ATOMIC_STORE_
458#undef _ATOMIC_MODIFY_
459#undef _ATOMIC_CMPEXCHNG_
460} // namespace __atomic0
461
462// _GLIBCXX_END_NAMESPACE
463
464#endif