Update gcc-50 to SVN version 221572
[dragonfly.git] / contrib / gcc-5.0 / libstdc++-v3 / include / std / shared_mutex
CommitLineData
dda118e3
JM
1// <shared_mutex> -*- C++ -*-
2
3// Copyright (C) 2013-2015 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/shared_mutex
26 * This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_SHARED_MUTEX
30#define _GLIBCXX_SHARED_MUTEX 1
31
32#pragma GCC system_header
33
34#if __cplusplus <= 201103L
35# include <bits/c++14_warning.h>
36#else
37
38#include <bits/c++config.h>
39#include <mutex>
40#include <condition_variable>
41#include <bits/functexcept.h>
42
43namespace std _GLIBCXX_VISIBILITY(default)
44{
45_GLIBCXX_BEGIN_NAMESPACE_VERSION
46
47 /**
48 * @ingroup mutexes
49 * @{
50 */
51
52#ifdef _GLIBCXX_USE_C99_STDINT_TR1
53#ifdef _GLIBCXX_HAS_GTHREADS
54
55#define __cpp_lib_shared_timed_mutex 201402
56
57 /// shared_timed_mutex
58 class shared_timed_mutex
59 {
38c0c85b 60#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_T
dda118e3
JM
61 typedef chrono::system_clock __clock_t;
62
38c0c85b
JM
63#ifdef PTHREAD_RWLOCK_INITIALIZER
64 pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
65
66 public:
67 shared_timed_mutex() = default;
68 ~shared_timed_mutex() = default;
69#else
70 pthread_rwlock_t _M_rwlock;
dda118e3
JM
71
72 public:
73 shared_timed_mutex()
74 {
75 int __ret = pthread_rwlock_init(&_M_rwlock, NULL);
76 if (__ret == ENOMEM)
77 throw bad_alloc();
78 else if (__ret == EAGAIN)
79 __throw_system_error(int(errc::resource_unavailable_try_again));
80 else if (__ret == EPERM)
81 __throw_system_error(int(errc::operation_not_permitted));
82 // Errors not handled: EBUSY, EINVAL
83 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
84 }
85
86 ~shared_timed_mutex()
87 {
88 int __ret __attribute((__unused__)) = pthread_rwlock_destroy(&_M_rwlock);
89 // Errors not handled: EBUSY, EINVAL
90 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
91 }
38c0c85b 92#endif
dda118e3
JM
93
94 shared_timed_mutex(const shared_timed_mutex&) = delete;
95 shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
96
97 // Exclusive ownership
98
99 void
100 lock()
101 {
102 int __ret = pthread_rwlock_wrlock(&_M_rwlock);
103 if (__ret == EDEADLK)
104 __throw_system_error(int(errc::resource_deadlock_would_occur));
105 // Errors not handled: EINVAL
106 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
107 }
108
109 bool
110 try_lock()
111 {
112 int __ret = pthread_rwlock_trywrlock(&_M_rwlock);
113 if (__ret == EBUSY) return false;
114 // Errors not handled: EINVAL
115 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
116 return true;
117 }
118
119#if _GTHREAD_USE_MUTEX_TIMEDLOCK
120 template<typename _Rep, typename _Period>
121 bool
122 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
123 {
124 return try_lock_until(__clock_t::now() + __rel_time);
125 }
126
127 template<typename _Duration>
128 bool
129 try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
130 {
131 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
132 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
133
134 __gthread_time_t __ts =
135 {
136 static_cast<std::time_t>(__s.time_since_epoch().count()),
137 static_cast<long>(__ns.count())
138 };
139
140 int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts);
141 // On self-deadlock, we just fail to acquire the lock. Technically,
142 // the program violated the precondition.
143 if (__ret == ETIMEDOUT || __ret == EDEADLK)
144 return false;
145 // Errors not handled: EINVAL
146 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
147 return true;
148 }
149
150 template<typename _Clock, typename _Duration>
151 bool
152 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
153 {
154 // DR 887 - Sync unknown clock to known clock.
155 const typename _Clock::time_point __c_entry = _Clock::now();
156 const __clock_t::time_point __s_entry = __clock_t::now();
157 const auto __delta = __abs_time - __c_entry;
158 const auto __s_atime = __s_entry + __delta;
159 return try_lock_until(__s_atime);
160 }
161#endif
162
163 void
164 unlock()
165 {
166 int __ret __attribute((__unused__)) = pthread_rwlock_unlock(&_M_rwlock);
167 // Errors not handled: EPERM, EBUSY, EINVAL
168 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
169 }
170
171 // Shared ownership
172
173 void
174 lock_shared()
175 {
38c0c85b
JM
176 int __ret;
177 // We retry if we exceeded the maximum number of read locks supported by
178 // the POSIX implementation; this can result in busy-waiting, but this
179 // is okay based on the current specification of forward progress
180 // guarantees by the standard.
181 do
182 __ret = pthread_rwlock_rdlock(&_M_rwlock);
183 while (__ret == EAGAIN);
dda118e3
JM
184 if (__ret == EDEADLK)
185 __throw_system_error(int(errc::resource_deadlock_would_occur));
dda118e3
JM
186 // Errors not handled: EINVAL
187 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
188 }
189
190 bool
191 try_lock_shared()
192 {
193 int __ret = pthread_rwlock_tryrdlock(&_M_rwlock);
194 // If the maximum number of read locks has been exceeded, we just fail
195 // to acquire the lock. Unlike for lock(), we are not allowed to throw
196 // an exception.
197 if (__ret == EBUSY || __ret == EAGAIN) return false;
198 // Errors not handled: EINVAL
199 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
200 return true;
201 }
202
203#if _GTHREAD_USE_MUTEX_TIMEDLOCK
204 template<typename _Rep, typename _Period>
205 bool
206 try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
207 {
208 return try_lock_shared_until(__clock_t::now() + __rel_time);
209 }
210
211 template<typename _Duration>
212 bool
213 try_lock_shared_until(const chrono::time_point<__clock_t,
214 _Duration>& __atime)
215 {
216 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
217 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
218
219 __gthread_time_t __ts =
220 {
221 static_cast<std::time_t>(__s.time_since_epoch().count()),
222 static_cast<long>(__ns.count())
223 };
224
38c0c85b
JM
225 int __ret;
226 // Unlike for lock(), we are not allowed to throw an exception so if
227 // the maximum number of read locks has been exceeded, or we would
228 // deadlock, we just try to acquire the lock again (and will time out
229 // eventually).
230 // In cases where we would exceed the maximum number of read locks
231 // throughout the whole time until the timeout, we will fail to
232 // acquire the lock even if it would be logically free; however, this
233 // is allowed by the standard, and we made a "strong effort"
234 // (see C++14 30.4.1.4p26).
235 // For cases where the implementation detects a deadlock we
236 // intentionally block and timeout so that an early return isn't
237 // mistaken for a spurious failure, which might help users realise
238 // there is a deadlock.
239 do
240 __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts);
241 while (__ret == EAGAIN || __ret == EDEADLK);
242 if (__ret == ETIMEDOUT)
dda118e3
JM
243 return false;
244 // Errors not handled: EINVAL
245 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
246 return true;
247 }
248
249 template<typename _Clock, typename _Duration>
250 bool
251 try_lock_shared_until(const chrono::time_point<_Clock,
252 _Duration>& __abs_time)
253 {
254 // DR 887 - Sync unknown clock to known clock.
255 const typename _Clock::time_point __c_entry = _Clock::now();
256 const __clock_t::time_point __s_entry = __clock_t::now();
257 const auto __delta = __abs_time - __c_entry;
258 const auto __s_atime = __s_entry + __delta;
259 return try_lock_shared_until(__s_atime);
260 }
261#endif
262
263 void
264 unlock_shared()
265 {
266 unlock();
267 }
268
38c0c85b 269#else // ! _GLIBCXX_USE_PTHREAD_RWLOCK_T
dda118e3
JM
270
271#if _GTHREAD_USE_MUTEX_TIMEDLOCK
272 struct _Mutex : mutex, __timed_mutex_impl<_Mutex>
273 {
274 template<typename _Rep, typename _Period>
275 bool
276 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
277 { return _M_try_lock_for(__rtime); }
278
279 template<typename _Clock, typename _Duration>
280 bool
281 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
282 { return _M_try_lock_until(__atime); }
283 };
284#else
285 typedef mutex _Mutex;
286#endif
287
288 // Based on Howard Hinnant's reference implementation from N2406
289
290 _Mutex _M_mut;
291 condition_variable _M_gate1;
292 condition_variable _M_gate2;
293 unsigned _M_state;
294
295 static constexpr unsigned _S_write_entered
296 = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
297 static constexpr unsigned _M_n_readers = ~_S_write_entered;
298
299 public:
300 shared_timed_mutex() : _M_state(0) {}
301
302 ~shared_timed_mutex()
303 {
304 _GLIBCXX_DEBUG_ASSERT( _M_state == 0 );
305 }
306
307 shared_timed_mutex(const shared_timed_mutex&) = delete;
308 shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
309
310 // Exclusive ownership
311
312 void
313 lock()
314 {
315 unique_lock<mutex> __lk(_M_mut);
316 while (_M_state & _S_write_entered)
317 _M_gate1.wait(__lk);
318 _M_state |= _S_write_entered;
319 while (_M_state & _M_n_readers)
320 _M_gate2.wait(__lk);
321 }
322
323 bool
324 try_lock()
325 {
326 unique_lock<mutex> __lk(_M_mut, try_to_lock);
327 if (__lk.owns_lock() && _M_state == 0)
328 {
329 _M_state = _S_write_entered;
330 return true;
331 }
332 return false;
333 }
334
335#if _GTHREAD_USE_MUTEX_TIMEDLOCK
336 template<typename _Rep, typename _Period>
337 bool
338 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
339 {
340 unique_lock<_Mutex> __lk(_M_mut, __rel_time);
341 if (__lk.owns_lock() && _M_state == 0)
342 {
343 _M_state = _S_write_entered;
344 return true;
345 }
346 return false;
347 }
348
349 template<typename _Clock, typename _Duration>
350 bool
351 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
352 {
353 unique_lock<_Mutex> __lk(_M_mut, __abs_time);
354 if (__lk.owns_lock() && _M_state == 0)
355 {
356 _M_state = _S_write_entered;
357 return true;
358 }
359 return false;
360 }
361#endif
362
363 void
364 unlock()
365 {
366 {
367 lock_guard<_Mutex> __lk(_M_mut);
368 _M_state = 0;
369 }
370 _M_gate1.notify_all();
371 }
372
373 // Shared ownership
374
375 void
376 lock_shared()
377 {
378 unique_lock<mutex> __lk(_M_mut);
379 while ((_M_state & _S_write_entered)
380 || (_M_state & _M_n_readers) == _M_n_readers)
381 {
382 _M_gate1.wait(__lk);
383 }
384 unsigned __num_readers = (_M_state & _M_n_readers) + 1;
385 _M_state &= ~_M_n_readers;
386 _M_state |= __num_readers;
387 }
388
389 bool
390 try_lock_shared()
391 {
392 unique_lock<_Mutex> __lk(_M_mut, try_to_lock);
393 unsigned __num_readers = _M_state & _M_n_readers;
394 if (__lk.owns_lock() && !(_M_state & _S_write_entered)
395 && __num_readers != _M_n_readers)
396 {
397 ++__num_readers;
398 _M_state &= ~_M_n_readers;
399 _M_state |= __num_readers;
400 return true;
401 }
402 return false;
403 }
404
405#if _GTHREAD_USE_MUTEX_TIMEDLOCK
406 template<typename _Rep, typename _Period>
407 bool
408 try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
409 {
410 unique_lock<_Mutex> __lk(_M_mut, __rel_time);
411 if (__lk.owns_lock())
412 {
413 unsigned __num_readers = _M_state & _M_n_readers;
414 if (!(_M_state & _S_write_entered)
415 && __num_readers != _M_n_readers)
416 {
417 ++__num_readers;
418 _M_state &= ~_M_n_readers;
419 _M_state |= __num_readers;
420 return true;
421 }
422 }
423 return false;
424 }
425
426 template <typename _Clock, typename _Duration>
427 bool
428 try_lock_shared_until(const chrono::time_point<_Clock,
429 _Duration>& __abs_time)
430 {
431 unique_lock<_Mutex> __lk(_M_mut, __abs_time);
432 if (__lk.owns_lock())
433 {
434 unsigned __num_readers = _M_state & _M_n_readers;
435 if (!(_M_state & _S_write_entered)
436 && __num_readers != _M_n_readers)
437 {
438 ++__num_readers;
439 _M_state &= ~_M_n_readers;
440 _M_state |= __num_readers;
441 return true;
442 }
443 }
444 return false;
445 }
446#endif
447
448 void
449 unlock_shared()
450 {
451 lock_guard<_Mutex> __lk(_M_mut);
452 unsigned __num_readers = (_M_state & _M_n_readers) - 1;
453 _M_state &= ~_M_n_readers;
454 _M_state |= __num_readers;
455 if (_M_state & _S_write_entered)
456 {
457 if (__num_readers == 0)
458 _M_gate2.notify_one();
459 }
460 else
461 {
462 if (__num_readers == _M_n_readers - 1)
463 _M_gate1.notify_one();
464 }
465 }
38c0c85b 466#endif // ! _GLIBCXX_USE_PTHREAD_RWLOCK_T
dda118e3
JM
467 };
468#endif // _GLIBCXX_HAS_GTHREADS
469
470 /// shared_lock
471 template<typename _Mutex>
472 class shared_lock
473 {
474 public:
475 typedef _Mutex mutex_type;
476
477 // Shared locking
478
479 shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
480
481 explicit
482 shared_lock(mutex_type& __m) : _M_pm(&__m), _M_owns(true)
483 { __m.lock_shared(); }
484
485 shared_lock(mutex_type& __m, defer_lock_t) noexcept
486 : _M_pm(&__m), _M_owns(false) { }
487
488 shared_lock(mutex_type& __m, try_to_lock_t)
489 : _M_pm(&__m), _M_owns(__m.try_lock_shared()) { }
490
491 shared_lock(mutex_type& __m, adopt_lock_t)
492 : _M_pm(&__m), _M_owns(true) { }
493
494 template<typename _Clock, typename _Duration>
495 shared_lock(mutex_type& __m,
496 const chrono::time_point<_Clock, _Duration>& __abs_time)
497 : _M_pm(&__m), _M_owns(__m.try_lock_shared_until(__abs_time)) { }
498
499 template<typename _Rep, typename _Period>
500 shared_lock(mutex_type& __m,
501 const chrono::duration<_Rep, _Period>& __rel_time)
502 : _M_pm(&__m), _M_owns(__m.try_lock_shared_for(__rel_time)) { }
503
504 ~shared_lock()
505 {
506 if (_M_owns)
507 _M_pm->unlock_shared();
508 }
509
510 shared_lock(shared_lock const&) = delete;
511 shared_lock& operator=(shared_lock const&) = delete;
512
513 shared_lock(shared_lock&& __sl) noexcept : shared_lock()
514 { swap(__sl); }
515
516 shared_lock&
517 operator=(shared_lock&& __sl) noexcept
518 {
519 shared_lock(std::move(__sl)).swap(*this);
520 return *this;
521 }
522
523 void
524 lock()
525 {
526 _M_lockable();
527 _M_pm->lock_shared();
528 _M_owns = true;
529 }
530
531 bool
532 try_lock()
533 {
534 _M_lockable();
535 return _M_owns = _M_pm->try_lock_shared();
536 }
537
538 template<typename _Rep, typename _Period>
539 bool
540 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
541 {
542 _M_lockable();
543 return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
544 }
545
546 template<typename _Clock, typename _Duration>
547 bool
548 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
549 {
550 _M_lockable();
551 return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
552 }
553
554 void
555 unlock()
556 {
557 if (!_M_owns)
558 __throw_system_error(int(errc::resource_deadlock_would_occur));
559 _M_pm->unlock_shared();
560 _M_owns = false;
561 }
562
563 // Setters
564
565 void
566 swap(shared_lock& __u) noexcept
567 {
568 std::swap(_M_pm, __u._M_pm);
569 std::swap(_M_owns, __u._M_owns);
570 }
571
572 mutex_type*
573 release() noexcept
574 {
575 _M_owns = false;
576 return std::exchange(_M_pm, nullptr);
577 }
578
579 // Getters
580
581 bool owns_lock() const noexcept { return _M_owns; }
582
583 explicit operator bool() const noexcept { return _M_owns; }
584
585 mutex_type* mutex() const noexcept { return _M_pm; }
586
587 private:
588 void
589 _M_lockable() const
590 {
591 if (_M_pm == nullptr)
592 __throw_system_error(int(errc::operation_not_permitted));
593 if (_M_owns)
594 __throw_system_error(int(errc::resource_deadlock_would_occur));
595 }
596
597 mutex_type* _M_pm;
598 bool _M_owns;
599 };
600
601 /// Swap specialization for shared_lock
602 template<typename _Mutex>
603 void
604 swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
605 { __x.swap(__y); }
606
607#endif // _GLIBCXX_USE_C99_STDINT_TR1
608
609 // @} group mutexes
610_GLIBCXX_END_NAMESPACE_VERSION
611} // namespace
612
613#endif // C++14
614
615#endif // _GLIBCXX_SHARED_MUTEX