shared_mutex 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607
  1. // <shared_mutex> -*- C++ -*-
  2. // Copyright (C) 2013-2015 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file include/shared_mutex
  21. * This is a Standard C++ Library header.
  22. */
  23. #ifndef _GLIBCXX_SHARED_MUTEX
  24. #define _GLIBCXX_SHARED_MUTEX 1
  25. #pragma GCC system_header
  26. #if __cplusplus <= 201103L
  27. # include <bits/c++14_warning.h>
  28. #else
  29. #include <bits/c++config.h>
  30. #include <mutex>
  31. #include <condition_variable>
  32. #include <bits/functexcept.h>
  33. namespace std _GLIBCXX_VISIBILITY(default)
  34. {
  35. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  36. /**
  37. * @ingroup mutexes
  38. * @{
  39. */
  40. #ifdef _GLIBCXX_USE_C99_STDINT_TR1
  41. #ifdef _GLIBCXX_HAS_GTHREADS
  42. #define __cpp_lib_shared_timed_mutex 201402
  43. /// shared_timed_mutex
  44. class shared_timed_mutex
  45. {
  46. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
  47. typedef chrono::system_clock __clock_t;
  48. #ifdef PTHREAD_RWLOCK_INITIALIZER
  49. pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
  50. public:
  51. shared_timed_mutex() = default;
  52. ~shared_timed_mutex() = default;
  53. #else
  54. pthread_rwlock_t _M_rwlock;
  55. public:
  56. shared_timed_mutex()
  57. {
  58. int __ret = pthread_rwlock_init(&_M_rwlock, NULL);
  59. if (__ret == ENOMEM)
  60. __throw_bad_alloc();
  61. else if (__ret == EAGAIN)
  62. __throw_system_error(int(errc::resource_unavailable_try_again));
  63. else if (__ret == EPERM)
  64. __throw_system_error(int(errc::operation_not_permitted));
  65. // Errors not handled: EBUSY, EINVAL
  66. _GLIBCXX_DEBUG_ASSERT(__ret == 0);
  67. }
  68. ~shared_timed_mutex()
  69. {
  70. int __ret __attribute((__unused__)) = pthread_rwlock_destroy(&_M_rwlock);
  71. // Errors not handled: EBUSY, EINVAL
  72. _GLIBCXX_DEBUG_ASSERT(__ret == 0);
  73. }
  74. #endif
  75. shared_timed_mutex(const shared_timed_mutex&) = delete;
  76. shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
  77. // Exclusive ownership
  78. void
  79. lock()
  80. {
  81. int __ret = pthread_rwlock_wrlock(&_M_rwlock);
  82. if (__ret == EDEADLK)
  83. __throw_system_error(int(errc::resource_deadlock_would_occur));
  84. // Errors not handled: EINVAL
  85. _GLIBCXX_DEBUG_ASSERT(__ret == 0);
  86. }
  87. bool
  88. try_lock()
  89. {
  90. int __ret = pthread_rwlock_trywrlock(&_M_rwlock);
  91. if (__ret == EBUSY) return false;
  92. // Errors not handled: EINVAL
  93. _GLIBCXX_DEBUG_ASSERT(__ret == 0);
  94. return true;
  95. }
  96. template<typename _Rep, typename _Period>
  97. bool
  98. try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
  99. {
  100. return try_lock_until(__clock_t::now() + __rel_time);
  101. }
  102. template<typename _Duration>
  103. bool
  104. try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
  105. {
  106. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  107. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  108. __gthread_time_t __ts =
  109. {
  110. static_cast<std::time_t>(__s.time_since_epoch().count()),
  111. static_cast<long>(__ns.count())
  112. };
  113. int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts);
  114. // On self-deadlock, we just fail to acquire the lock. Technically,
  115. // the program violated the precondition.
  116. if (__ret == ETIMEDOUT || __ret == EDEADLK)
  117. return false;
  118. // Errors not handled: EINVAL
  119. _GLIBCXX_DEBUG_ASSERT(__ret == 0);
  120. return true;
  121. }
  122. template<typename _Clock, typename _Duration>
  123. bool
  124. try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
  125. {
  126. // DR 887 - Sync unknown clock to known clock.
  127. const typename _Clock::time_point __c_entry = _Clock::now();
  128. const __clock_t::time_point __s_entry = __clock_t::now();
  129. const auto __delta = __abs_time - __c_entry;
  130. const auto __s_atime = __s_entry + __delta;
  131. return try_lock_until(__s_atime);
  132. }
  133. void
  134. unlock()
  135. {
  136. int __ret __attribute((__unused__)) = pthread_rwlock_unlock(&_M_rwlock);
  137. // Errors not handled: EPERM, EBUSY, EINVAL
  138. _GLIBCXX_DEBUG_ASSERT(__ret == 0);
  139. }
  140. // Shared ownership
  141. void
  142. lock_shared()
  143. {
  144. int __ret;
  145. // We retry if we exceeded the maximum number of read locks supported by
  146. // the POSIX implementation; this can result in busy-waiting, but this
  147. // is okay based on the current specification of forward progress
  148. // guarantees by the standard.
  149. do
  150. __ret = pthread_rwlock_rdlock(&_M_rwlock);
  151. while (__ret == EAGAIN);
  152. if (__ret == EDEADLK)
  153. __throw_system_error(int(errc::resource_deadlock_would_occur));
  154. // Errors not handled: EINVAL
  155. _GLIBCXX_DEBUG_ASSERT(__ret == 0);
  156. }
  157. bool
  158. try_lock_shared()
  159. {
  160. int __ret = pthread_rwlock_tryrdlock(&_M_rwlock);
  161. // If the maximum number of read locks has been exceeded, we just fail
  162. // to acquire the lock. Unlike for lock(), we are not allowed to throw
  163. // an exception.
  164. if (__ret == EBUSY || __ret == EAGAIN) return false;
  165. // Errors not handled: EINVAL
  166. _GLIBCXX_DEBUG_ASSERT(__ret == 0);
  167. return true;
  168. }
  169. template<typename _Rep, typename _Period>
  170. bool
  171. try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
  172. {
  173. return try_lock_shared_until(__clock_t::now() + __rel_time);
  174. }
  175. template<typename _Duration>
  176. bool
  177. try_lock_shared_until(const chrono::time_point<__clock_t,
  178. _Duration>& __atime)
  179. {
  180. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  181. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  182. __gthread_time_t __ts =
  183. {
  184. static_cast<std::time_t>(__s.time_since_epoch().count()),
  185. static_cast<long>(__ns.count())
  186. };
  187. int __ret;
  188. // Unlike for lock(), we are not allowed to throw an exception so if
  189. // the maximum number of read locks has been exceeded, or we would
  190. // deadlock, we just try to acquire the lock again (and will time out
  191. // eventually).
  192. // In cases where we would exceed the maximum number of read locks
  193. // throughout the whole time until the timeout, we will fail to
  194. // acquire the lock even if it would be logically free; however, this
  195. // is allowed by the standard, and we made a "strong effort"
  196. // (see C++14 30.4.1.4p26).
  197. // For cases where the implementation detects a deadlock we
  198. // intentionally block and timeout so that an early return isn't
  199. // mistaken for a spurious failure, which might help users realise
  200. // there is a deadlock.
  201. do
  202. __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts);
  203. while (__ret == EAGAIN || __ret == EDEADLK);
  204. if (__ret == ETIMEDOUT)
  205. return false;
  206. // Errors not handled: EINVAL
  207. _GLIBCXX_DEBUG_ASSERT(__ret == 0);
  208. return true;
  209. }
  210. template<typename _Clock, typename _Duration>
  211. bool
  212. try_lock_shared_until(const chrono::time_point<_Clock,
  213. _Duration>& __abs_time)
  214. {
  215. // DR 887 - Sync unknown clock to known clock.
  216. const typename _Clock::time_point __c_entry = _Clock::now();
  217. const __clock_t::time_point __s_entry = __clock_t::now();
  218. const auto __delta = __abs_time - __c_entry;
  219. const auto __s_atime = __s_entry + __delta;
  220. return try_lock_shared_until(__s_atime);
  221. }
  222. void
  223. unlock_shared()
  224. {
  225. unlock();
  226. }
  227. #else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
  228. // Must use the same clock as condition_variable
  229. typedef chrono::system_clock __clock_t;
  230. // Based on Howard Hinnant's reference implementation from N2406.
  231. // The high bit of _M_state is the write-entered flag which is set to
  232. // indicate a writer has taken the lock or is queuing to take the lock.
  233. // The remaining bits are the count of reader locks.
  234. //
  235. // To take a reader lock, block on gate1 while the write-entered flag is
  236. // set or the maximum number of reader locks is held, then increment the
  237. // reader lock count.
  238. // To release, decrement the count, then if the write-entered flag is set
  239. // and the count is zero then signal gate2 to wake a queued writer,
  240. // otherwise if the maximum number of reader locks was held signal gate1
  241. // to wake a reader.
  242. //
  243. // To take a writer lock, block on gate1 while the write-entered flag is
  244. // set, then set the write-entered flag to start queueing, then block on
  245. // gate2 while the number of reader locks is non-zero.
  246. // To release, unset the write-entered flag and signal gate1 to wake all
  247. // blocked readers and writers.
  248. //
  249. // This means that when no reader locks are held readers and writers get
  250. // equal priority. When one or more reader locks is held a writer gets
  251. // priority and no more reader locks can be taken while the writer is
  252. // queued.
  253. // Only locked when accessing _M_state or waiting on condition variables.
  254. mutex _M_mut;
  255. // Used to block while write-entered is set or reader count at maximum.
  256. condition_variable _M_gate1;
  257. // Used to block queued writers while reader count is non-zero.
  258. condition_variable _M_gate2;
  259. // The write-entered flag and reader count.
  260. unsigned _M_state;
  261. static constexpr unsigned _S_write_entered
  262. = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
  263. static constexpr unsigned _S_max_readers = ~_S_write_entered;
  264. // Test whether the write-entered flag is set. _M_mut must be locked.
  265. bool _M_write_entered() const { return _M_state & _S_write_entered; }
  266. // The number of reader locks currently held. _M_mut must be locked.
  267. unsigned _M_readers() const { return _M_state & _S_max_readers; }
  268. public:
  269. shared_timed_mutex() : _M_state(0) {}
  270. ~shared_timed_mutex()
  271. {
  272. _GLIBCXX_DEBUG_ASSERT( _M_state == 0 );
  273. }
  274. shared_timed_mutex(const shared_timed_mutex&) = delete;
  275. shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
  276. // Exclusive ownership
  277. void
  278. lock()
  279. {
  280. unique_lock<mutex> __lk(_M_mut);
  281. // Wait until we can set the write-entered flag.
  282. _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
  283. _M_state |= _S_write_entered;
  284. // Then wait until there are no more readers.
  285. _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
  286. }
  287. bool
  288. try_lock()
  289. {
  290. unique_lock<mutex> __lk(_M_mut, try_to_lock);
  291. if (__lk.owns_lock() && _M_state == 0)
  292. {
  293. _M_state = _S_write_entered;
  294. return true;
  295. }
  296. return false;
  297. }
  298. template<typename _Rep, typename _Period>
  299. bool
  300. try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
  301. {
  302. return try_lock_until(__clock_t::now() + __rel_time);
  303. }
  304. template<typename _Clock, typename _Duration>
  305. bool
  306. try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
  307. {
  308. unique_lock<mutex> __lk(_M_mut);
  309. if (!_M_gate1.wait_until(__lk, __abs_time,
  310. [=]{ return !_M_write_entered(); }))
  311. {
  312. return false;
  313. }
  314. _M_state |= _S_write_entered;
  315. if (!_M_gate2.wait_until(__lk, __abs_time,
  316. [=]{ return _M_readers() == 0; }))
  317. {
  318. _M_state ^= _S_write_entered;
  319. // Wake all threads blocked while the write-entered flag was set.
  320. _M_gate1.notify_all();
  321. return false;
  322. }
  323. return true;
  324. }
  325. void
  326. unlock()
  327. {
  328. lock_guard<mutex> __lk(_M_mut);
  329. _GLIBCXX_DEBUG_ASSERT( _M_write_entered() );
  330. _M_state = 0;
  331. // call notify_all() while mutex is held so that another thread can't
  332. // lock and unlock the mutex then destroy *this before we make the call.
  333. _M_gate1.notify_all();
  334. }
  335. // Shared ownership
  336. void
  337. lock_shared()
  338. {
  339. unique_lock<mutex> __lk(_M_mut);
  340. _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
  341. ++_M_state;
  342. }
  343. bool
  344. try_lock_shared()
  345. {
  346. unique_lock<mutex> __lk(_M_mut, try_to_lock);
  347. if (!__lk.owns_lock())
  348. return false;
  349. if (_M_state < _S_max_readers)
  350. {
  351. ++_M_state;
  352. return true;
  353. }
  354. return false;
  355. }
  356. template<typename _Rep, typename _Period>
  357. bool
  358. try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
  359. {
  360. return try_lock_shared_until(__clock_t::now() + __rel_time);
  361. }
  362. template <typename _Clock, typename _Duration>
  363. bool
  364. try_lock_shared_until(const chrono::time_point<_Clock,
  365. _Duration>& __abs_time)
  366. {
  367. unique_lock<mutex> __lk(_M_mut);
  368. if (!_M_gate1.wait_until(__lk, __abs_time,
  369. [=]{ return _M_state < _S_max_readers; }))
  370. {
  371. return false;
  372. }
  373. ++_M_state;
  374. return true;
  375. }
  376. void
  377. unlock_shared()
  378. {
  379. lock_guard<mutex> __lk(_M_mut);
  380. _GLIBCXX_DEBUG_ASSERT( _M_readers() > 0 );
  381. auto __prev = _M_state--;
  382. if (_M_write_entered())
  383. {
  384. // Wake the queued writer if there are no more readers.
  385. if (_M_readers() == 0)
  386. _M_gate2.notify_one();
  387. // No need to notify gate1 because we give priority to the queued
  388. // writer, and that writer will eventually notify gate1 after it
  389. // clears the write-entered flag.
  390. }
  391. else
  392. {
  393. // Wake any thread that was blocked on reader overflow.
  394. if (__prev == _S_max_readers)
  395. _M_gate1.notify_one();
  396. }
  397. }
  398. #endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
  399. };
  400. #endif // _GLIBCXX_HAS_GTHREADS
  401. /// shared_lock
  402. template<typename _Mutex>
  403. class shared_lock
  404. {
  405. public:
  406. typedef _Mutex mutex_type;
  407. // Shared locking
  408. shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
  409. explicit
  410. shared_lock(mutex_type& __m) : _M_pm(&__m), _M_owns(true)
  411. { __m.lock_shared(); }
  412. shared_lock(mutex_type& __m, defer_lock_t) noexcept
  413. : _M_pm(&__m), _M_owns(false) { }
  414. shared_lock(mutex_type& __m, try_to_lock_t)
  415. : _M_pm(&__m), _M_owns(__m.try_lock_shared()) { }
  416. shared_lock(mutex_type& __m, adopt_lock_t)
  417. : _M_pm(&__m), _M_owns(true) { }
  418. template<typename _Clock, typename _Duration>
  419. shared_lock(mutex_type& __m,
  420. const chrono::time_point<_Clock, _Duration>& __abs_time)
  421. : _M_pm(&__m), _M_owns(__m.try_lock_shared_until(__abs_time)) { }
  422. template<typename _Rep, typename _Period>
  423. shared_lock(mutex_type& __m,
  424. const chrono::duration<_Rep, _Period>& __rel_time)
  425. : _M_pm(&__m), _M_owns(__m.try_lock_shared_for(__rel_time)) { }
  426. ~shared_lock()
  427. {
  428. if (_M_owns)
  429. _M_pm->unlock_shared();
  430. }
  431. shared_lock(shared_lock const&) = delete;
  432. shared_lock& operator=(shared_lock const&) = delete;
  433. shared_lock(shared_lock&& __sl) noexcept : shared_lock()
  434. { swap(__sl); }
  435. shared_lock&
  436. operator=(shared_lock&& __sl) noexcept
  437. {
  438. shared_lock(std::move(__sl)).swap(*this);
  439. return *this;
  440. }
  441. void
  442. lock()
  443. {
  444. _M_lockable();
  445. _M_pm->lock_shared();
  446. _M_owns = true;
  447. }
  448. bool
  449. try_lock()
  450. {
  451. _M_lockable();
  452. return _M_owns = _M_pm->try_lock_shared();
  453. }
  454. template<typename _Rep, typename _Period>
  455. bool
  456. try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
  457. {
  458. _M_lockable();
  459. return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
  460. }
  461. template<typename _Clock, typename _Duration>
  462. bool
  463. try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
  464. {
  465. _M_lockable();
  466. return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
  467. }
  468. void
  469. unlock()
  470. {
  471. if (!_M_owns)
  472. __throw_system_error(int(errc::resource_deadlock_would_occur));
  473. _M_pm->unlock_shared();
  474. _M_owns = false;
  475. }
  476. // Setters
  477. void
  478. swap(shared_lock& __u) noexcept
  479. {
  480. std::swap(_M_pm, __u._M_pm);
  481. std::swap(_M_owns, __u._M_owns);
  482. }
  483. mutex_type*
  484. release() noexcept
  485. {
  486. _M_owns = false;
  487. return std::exchange(_M_pm, nullptr);
  488. }
  489. // Getters
  490. bool owns_lock() const noexcept { return _M_owns; }
  491. explicit operator bool() const noexcept { return _M_owns; }
  492. mutex_type* mutex() const noexcept { return _M_pm; }
  493. private:
  494. void
  495. _M_lockable() const
  496. {
  497. if (_M_pm == nullptr)
  498. __throw_system_error(int(errc::operation_not_permitted));
  499. if (_M_owns)
  500. __throw_system_error(int(errc::resource_deadlock_would_occur));
  501. }
  502. mutex_type* _M_pm;
  503. bool _M_owns;
  504. };
  505. /// Swap specialization for shared_lock
  506. template<typename _Mutex>
  507. void
  508. swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
  509. { __x.swap(__y); }
  510. #endif // _GLIBCXX_USE_C99_STDINT_TR1
  511. // @} group mutexes
  512. _GLIBCXX_END_NAMESPACE_VERSION
  513. } // namespace
  514. #endif // C++14
  515. #endif // _GLIBCXX_SHARED_MUTEX