1 // <shared_mutex> -*- C++ -*-
3 // Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 /** @file include/shared_mutex
26 * This is a Standard C++ Library header.
29 #ifndef _GLIBCXX_SHARED_MUTEX
30 #define _GLIBCXX_SHARED_MUTEX 1
32 #pragma GCC system_header
34 #if __cplusplus <= 201103L
35 # include <bits/c++14_warning.h>
38 #include <bits/c++config.h>
40 #include <condition_variable>
41 #include <bits/functexcept.h>
43 namespace std _GLIBCXX_VISIBILITY(default)
45 _GLIBCXX_BEGIN_NAMESPACE_VERSION
52 #ifdef _GLIBCXX_USE_C99_STDINT_TR1
53 #ifdef _GLIBCXX_HAS_GTHREADS
55 #define __cpp_lib_shared_timed_mutex 201402
57 /// shared_timed_mutex
58 class shared_timed_mutex
60 #ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_T
61 typedef chrono::system_clock __clock_t;
63 #ifdef PTHREAD_RWLOCK_INITIALIZER
64 pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
67 shared_timed_mutex() = default;
68 ~shared_timed_mutex() = default;
70 pthread_rwlock_t _M_rwlock;
75 int __ret = pthread_rwlock_init(&_M_rwlock, NULL);
78 else if (__ret == EAGAIN)
79 __throw_system_error(int(errc::resource_unavailable_try_again));
80 else if (__ret == EPERM)
81 __throw_system_error(int(errc::operation_not_permitted));
82 // Errors not handled: EBUSY, EINVAL
83 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
88 int __ret __attribute((__unused__)) = pthread_rwlock_destroy(&_M_rwlock);
89 // Errors not handled: EBUSY, EINVAL
90 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
94 shared_timed_mutex(const shared_timed_mutex&) = delete;
95 shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
97 // Exclusive ownership
102 int __ret = pthread_rwlock_wrlock(&_M_rwlock);
103 if (__ret == EDEADLK)
104 __throw_system_error(int(errc::resource_deadlock_would_occur));
105 // Errors not handled: EINVAL
106 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
112 int __ret = pthread_rwlock_trywrlock(&_M_rwlock);
113 if (__ret == EBUSY) return false;
114 // Errors not handled: EINVAL
115 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
119 #if _GTHREAD_USE_MUTEX_TIMEDLOCK
120 template<typename _Rep, typename _Period>
122 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
124 return try_lock_until(__clock_t::now() + __rel_time);
127 template<typename _Duration>
129 try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
131 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
132 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
134 __gthread_time_t __ts =
136 static_cast<std::time_t>(__s.time_since_epoch().count()),
137 static_cast<long>(__ns.count())
140 int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts);
141 // On self-deadlock, we just fail to acquire the lock. Technically,
142 // the program violated the precondition.
143 if (__ret == ETIMEDOUT || __ret == EDEADLK)
145 // Errors not handled: EINVAL
146 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
150 template<typename _Clock, typename _Duration>
152 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
154 // DR 887 - Sync unknown clock to known clock.
155 const typename _Clock::time_point __c_entry = _Clock::now();
156 const __clock_t::time_point __s_entry = __clock_t::now();
157 const auto __delta = __abs_time - __c_entry;
158 const auto __s_atime = __s_entry + __delta;
159 return try_lock_until(__s_atime);
166 int __ret __attribute((__unused__)) = pthread_rwlock_unlock(&_M_rwlock);
167 // Errors not handled: EPERM, EBUSY, EINVAL
168 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
177 // We retry if we exceeded the maximum number of read locks supported by
178 // the POSIX implementation; this can result in busy-waiting, but this
179 // is okay based on the current specification of forward progress
180 // guarantees by the standard.
182 __ret = pthread_rwlock_rdlock(&_M_rwlock);
183 while (__ret == EAGAIN);
184 if (__ret == EDEADLK)
185 __throw_system_error(int(errc::resource_deadlock_would_occur));
186 // Errors not handled: EINVAL
187 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
193 int __ret = pthread_rwlock_tryrdlock(&_M_rwlock);
194 // If the maximum number of read locks has been exceeded, we just fail
195 // to acquire the lock. Unlike for lock(), we are not allowed to throw
197 if (__ret == EBUSY || __ret == EAGAIN) return false;
198 // Errors not handled: EINVAL
199 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
203 #if _GTHREAD_USE_MUTEX_TIMEDLOCK
204 template<typename _Rep, typename _Period>
206 try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
208 return try_lock_shared_until(__clock_t::now() + __rel_time);
211 template<typename _Duration>
213 try_lock_shared_until(const chrono::time_point<__clock_t,
216 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
217 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
219 __gthread_time_t __ts =
221 static_cast<std::time_t>(__s.time_since_epoch().count()),
222 static_cast<long>(__ns.count())
226 // Unlike for lock(), we are not allowed to throw an exception so if
227 // the maximum number of read locks has been exceeded, or we would
228 // deadlock, we just try to acquire the lock again (and will time out
230 // In cases where we would exceed the maximum number of read locks
231 // throughout the whole time until the timeout, we will fail to
232 // acquire the lock even if it would be logically free; however, this
233 // is allowed by the standard, and we made a "strong effort"
234 // (see C++14 30.4.1.4p26).
235 // For cases where the implementation detects a deadlock we
236 // intentionally block and timeout so that an early return isn't
237 // mistaken for a spurious failure, which might help users realise
238 // there is a deadlock.
240 __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts);
241 while (__ret == EAGAIN || __ret == EDEADLK);
242 if (__ret == ETIMEDOUT)
244 // Errors not handled: EINVAL
245 _GLIBCXX_DEBUG_ASSERT(__ret == 0);
249 template<typename _Clock, typename _Duration>
251 try_lock_shared_until(const chrono::time_point<_Clock,
252 _Duration>& __abs_time)
254 // DR 887 - Sync unknown clock to known clock.
255 const typename _Clock::time_point __c_entry = _Clock::now();
256 const __clock_t::time_point __s_entry = __clock_t::now();
257 const auto __delta = __abs_time - __c_entry;
258 const auto __s_atime = __s_entry + __delta;
259 return try_lock_shared_until(__s_atime);
269 #else // ! _GLIBCXX_USE_PTHREAD_RWLOCK_T
271 #if _GTHREAD_USE_MUTEX_TIMEDLOCK
272 struct _Mutex : mutex, __timed_mutex_impl<_Mutex>
274 template<typename _Rep, typename _Period>
276 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
277 { return _M_try_lock_for(__rtime); }
279 template<typename _Clock, typename _Duration>
281 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
282 { return _M_try_lock_until(__atime); }
285 typedef mutex _Mutex;
288 // Based on Howard Hinnant's reference implementation from N2406
291 condition_variable _M_gate1;
292 condition_variable _M_gate2;
295 static constexpr unsigned _S_write_entered
296 = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
297 static constexpr unsigned _M_n_readers = ~_S_write_entered;
300 shared_timed_mutex() : _M_state(0) {}
302 ~shared_timed_mutex()
304 _GLIBCXX_DEBUG_ASSERT( _M_state == 0 );
307 shared_timed_mutex(const shared_timed_mutex&) = delete;
308 shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
310 // Exclusive ownership
315 unique_lock<mutex> __lk(_M_mut);
316 while (_M_state & _S_write_entered)
318 _M_state |= _S_write_entered;
319 while (_M_state & _M_n_readers)
326 unique_lock<mutex> __lk(_M_mut, try_to_lock);
327 if (__lk.owns_lock() && _M_state == 0)
329 _M_state = _S_write_entered;
335 #if _GTHREAD_USE_MUTEX_TIMEDLOCK
336 template<typename _Rep, typename _Period>
338 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
340 unique_lock<_Mutex> __lk(_M_mut, __rel_time);
341 if (__lk.owns_lock() && _M_state == 0)
343 _M_state = _S_write_entered;
349 template<typename _Clock, typename _Duration>
351 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
353 unique_lock<_Mutex> __lk(_M_mut, __abs_time);
354 if (__lk.owns_lock() && _M_state == 0)
356 _M_state = _S_write_entered;
367 lock_guard<_Mutex> __lk(_M_mut);
370 _M_gate1.notify_all();
378 unique_lock<mutex> __lk(_M_mut);
379 while ((_M_state & _S_write_entered)
380 || (_M_state & _M_n_readers) == _M_n_readers)
384 unsigned __num_readers = (_M_state & _M_n_readers) + 1;
385 _M_state &= ~_M_n_readers;
386 _M_state |= __num_readers;
392 unique_lock<_Mutex> __lk(_M_mut, try_to_lock);
393 unsigned __num_readers = _M_state & _M_n_readers;
394 if (__lk.owns_lock() && !(_M_state & _S_write_entered)
395 && __num_readers != _M_n_readers)
398 _M_state &= ~_M_n_readers;
399 _M_state |= __num_readers;
405 #if _GTHREAD_USE_MUTEX_TIMEDLOCK
406 template<typename _Rep, typename _Period>
408 try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
410 unique_lock<_Mutex> __lk(_M_mut, __rel_time);
411 if (__lk.owns_lock())
413 unsigned __num_readers = _M_state & _M_n_readers;
414 if (!(_M_state & _S_write_entered)
415 && __num_readers != _M_n_readers)
418 _M_state &= ~_M_n_readers;
419 _M_state |= __num_readers;
426 template <typename _Clock, typename _Duration>
428 try_lock_shared_until(const chrono::time_point<_Clock,
429 _Duration>& __abs_time)
431 unique_lock<_Mutex> __lk(_M_mut, __abs_time);
432 if (__lk.owns_lock())
434 unsigned __num_readers = _M_state & _M_n_readers;
435 if (!(_M_state & _S_write_entered)
436 && __num_readers != _M_n_readers)
439 _M_state &= ~_M_n_readers;
440 _M_state |= __num_readers;
451 lock_guard<_Mutex> __lk(_M_mut);
452 unsigned __num_readers = (_M_state & _M_n_readers) - 1;
453 _M_state &= ~_M_n_readers;
454 _M_state |= __num_readers;
455 if (_M_state & _S_write_entered)
457 if (__num_readers == 0)
458 _M_gate2.notify_one();
462 if (__num_readers == _M_n_readers - 1)
463 _M_gate1.notify_one();
466 #endif // ! _GLIBCXX_USE_PTHREAD_RWLOCK_T
468 #endif // _GLIBCXX_HAS_GTHREADS
471 template<typename _Mutex>
475 typedef _Mutex mutex_type;
479 shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
482 shared_lock(mutex_type& __m) : _M_pm(&__m), _M_owns(true)
483 { __m.lock_shared(); }
485 shared_lock(mutex_type& __m, defer_lock_t) noexcept
486 : _M_pm(&__m), _M_owns(false) { }
488 shared_lock(mutex_type& __m, try_to_lock_t)
489 : _M_pm(&__m), _M_owns(__m.try_lock_shared()) { }
491 shared_lock(mutex_type& __m, adopt_lock_t)
492 : _M_pm(&__m), _M_owns(true) { }
494 template<typename _Clock, typename _Duration>
495 shared_lock(mutex_type& __m,
496 const chrono::time_point<_Clock, _Duration>& __abs_time)
497 : _M_pm(&__m), _M_owns(__m.try_lock_shared_until(__abs_time)) { }
499 template<typename _Rep, typename _Period>
500 shared_lock(mutex_type& __m,
501 const chrono::duration<_Rep, _Period>& __rel_time)
502 : _M_pm(&__m), _M_owns(__m.try_lock_shared_for(__rel_time)) { }
507 _M_pm->unlock_shared();
510 shared_lock(shared_lock const&) = delete;
511 shared_lock& operator=(shared_lock const&) = delete;
513 shared_lock(shared_lock&& __sl) noexcept : shared_lock()
517 operator=(shared_lock&& __sl) noexcept
519 shared_lock(std::move(__sl)).swap(*this);
527 _M_pm->lock_shared();
535 return _M_owns = _M_pm->try_lock_shared();
538 template<typename _Rep, typename _Period>
540 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
543 return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
546 template<typename _Clock, typename _Duration>
548 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
551 return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
558 __throw_system_error(int(errc::resource_deadlock_would_occur));
559 _M_pm->unlock_shared();
566 swap(shared_lock& __u) noexcept
568 std::swap(_M_pm, __u._M_pm);
569 std::swap(_M_owns, __u._M_owns);
576 return std::exchange(_M_pm, nullptr);
581 bool owns_lock() const noexcept { return _M_owns; }
583 explicit operator bool() const noexcept { return _M_owns; }
585 mutex_type* mutex() const noexcept { return _M_pm; }
591 if (_M_pm == nullptr)
592 __throw_system_error(int(errc::operation_not_permitted));
594 __throw_system_error(int(errc::resource_deadlock_would_occur));
601 /// Swap specialization for shared_lock
602 template<typename _Mutex>
604 swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
607 #endif // _GLIBCXX_USE_C99_STDINT_TR1
610 _GLIBCXX_END_NAMESPACE_VERSION
615 #endif // _GLIBCXX_SHARED_MUTEX