reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced
    1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
  140
  141
  142
  143
  144
  145
  146
  147
  148
  149
  150
  151
  152
  153
  154
  155
  156
  157
  158
  159
  160
  161
  162
  163
  164
  165
  166
  167
  168
  169
  170
  171
  172
  173
  174
  175
  176
  177
  178
  179
  180
  181
  182
  183
  184
  185
  186
  187
  188
  189
  190
  191
  192
  193
  194
  195
  196
  197
  198
  199
  200
  201
  202
  203
  204
  205
  206
  207
  208
  209
  210
  211
  212
  213
  214
  215
  216
  217
  218
  219
  220
  221
  222
  223
  224
  225
  226
  227
  228
  229
  230
  231
  232
  233
  234
  235
  236
  237
  238
  239
  240
  241
  242
  243
  244
  245
  246
  247
  248
  249
  250
  251
  252
  253
  254
  255
  256
  257
  258
  259
  260
  261
  262
  263
  264
  265
  266
  267
  268
  269
  270
  271
  272
  273
  274
  275
  276
  277
  278
  279
  280
  281
  282
  283
  284
  285
  286
  287
  288
  289
  290
  291
  292
  293
  294
  295
  296
  297
  298
  299
  300
  301
  302
  303
  304
  305
  306
  307
  308
  309
  310
  311
  312
  313
  314
  315
  316
  317
  318
  319
  320
  321
  322
  323
  324
  325
  326
  327
  328
  329
  330
  331
  332
  333
  334
  335
  336
  337
  338
  339
  340
  341
  342
  343
  344
  345
  346
  347
  348
  349
  350
  351
  352
  353
  354
  355
  356
  357
  358
  359
  360
  361
  362
  363
  364
  365
  366
  367
  368
  369
  370
  371
  372
  373
  374
  375
  376
  377
  378
  379
  380
  381
  382
  383
  384
  385
  386
  387
  388
  389
  390
  391
  392
  393
  394
  395
  396
  397
  398
  399
  400
  401
  402
  403
  404
  405
  406
  407
  408
  409
  410
  411
  412
  413
  414
  415
  416
  417
  418
  419
  420
  421
  422
  423
  424
  425
  426
  427
  428
  429
  430
  431
  432
  433
  434
  435
  436
  437
  438
  439
  440
  441
  442
  443
  444
  445
  446
  447
  448
  449
  450
  451
  452
  453
  454
  455
  456
  457
  458
  459
  460
  461
  462
  463
  464
  465
  466
  467
  468
  469
  470
  471
  472
  473
  474
  475
  476
  477
  478
  479
  480
  481
  482
  483
  484
  485
  486
  487
  488
  489
  490
  491
  492
  493
  494
  495
  496
  497
  498
  499
  500
  501
  502
  503
  504
  505
  506
  507
  508
  509
  510
  511
  512
  513
  514
  515
  516
  517
  518
  519
  520
  521
  522
  523
  524
  525
  526
  527
  528
  529
  530
  531
  532
  533
  534
  535
  536
  537
  538
  539
  540
  541
  542
  543
  544
  545
  546
  547
  548
  549
  550
  551
  552
  553
  554
  555
  556
  557
  558
  559
  560
  561
  562
  563
  564
  565
  566
  567
  568
  569
  570
  571
  572
  573
  574
  575
  576
  577
  578
  579
  580
  581
  582
  583
  584
  585
  586
  587
  588
  589
  590
  591
  592
  593
  594
  595
  596
  597
  598
  599
  600
  601
  602
  603
  604
  605
  606
  607
  608
  609
  610
  611
  612
  613
  614
  615
  616
  617
  618
  619
  620
  621
  622
  623
  624
  625
  626
  627
  628
  629
  630
  631
  632
  633
  634
  635
  636
  637
  638
  639
  640
  641
  642
  643
  644
  645
  646
  647
  648
  649
  650
  651
  652
  653
  654
  655
  656
  657
  658
  659
  660
  661
  662
  663
  664
  665
  666
  667
  668
  669
  670
  671
  672
  673
  674
  675
  676
  677
  678
  679
  680
  681
  682
  683
  684
  685
  686
  687
// <shared_mutex> -*- C++ -*-

// Copyright (C) 2013-2017 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library.  This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.

// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.

// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.

// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
// <http://www.gnu.org/licenses/>.

/** @file include/shared_mutex
 *  This is a Standard C++ Library header.
 */

#ifndef _GLIBCXX_SHARED_MUTEX
#define _GLIBCXX_SHARED_MUTEX 1

#pragma GCC system_header

#if __cplusplus >= 201402L

#include <bits/c++config.h>
#include <condition_variable>
#include <bits/functexcept.h>

namespace std _GLIBCXX_VISIBILITY(default)
{
_GLIBCXX_BEGIN_NAMESPACE_VERSION

  /**
   * @ingroup mutexes
   * @{
   */

#ifdef _GLIBCXX_USE_C99_STDINT_TR1
#ifdef _GLIBCXX_HAS_GTHREADS

#if __cplusplus >= 201703L
#define __cpp_lib_shared_mutex 201505
  class shared_mutex;
#endif

#define __cpp_lib_shared_timed_mutex 201402
  class shared_timed_mutex;

#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
  /// A shared mutex type implemented using pthread_rwlock_t.
  class __shared_mutex_pthread
  {
    friend class shared_timed_mutex;

#ifdef PTHREAD_RWLOCK_INITIALIZER
    pthread_rwlock_t	_M_rwlock = PTHREAD_RWLOCK_INITIALIZER;

  public:
    __shared_mutex_pthread() = default;
    ~__shared_mutex_pthread() = default;
#else
    pthread_rwlock_t	_M_rwlock;

  public:
    __shared_mutex_pthread()
    {
      int __ret = pthread_rwlock_init(&_M_rwlock, NULL);
      if (__ret == ENOMEM)
	__throw_bad_alloc();
      else if (__ret == EAGAIN)
	__throw_system_error(int(errc::resource_unavailable_try_again));
      else if (__ret == EPERM)
	__throw_system_error(int(errc::operation_not_permitted));
      // Errors not handled: EBUSY, EINVAL
      __glibcxx_assert(__ret == 0);
    }

    ~__shared_mutex_pthread()
    {
      int __ret __attribute((__unused__)) = pthread_rwlock_destroy(&_M_rwlock);
      // Errors not handled: EBUSY, EINVAL
      __glibcxx_assert(__ret == 0);
    }
#endif

    __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
    __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;

    void
    lock()
    {
      int __ret = pthread_rwlock_wrlock(&_M_rwlock);
      if (__ret == EDEADLK)
	__throw_system_error(int(errc::resource_deadlock_would_occur));
      // Errors not handled: EINVAL
      __glibcxx_assert(__ret == 0);
    }

    bool
    try_lock()
    {
      int __ret = pthread_rwlock_trywrlock(&_M_rwlock);
      if (__ret == EBUSY) return false;
      // Errors not handled: EINVAL
      __glibcxx_assert(__ret == 0);
      return true;
    }

    void
    unlock()
    {
      int __ret __attribute((__unused__)) = pthread_rwlock_unlock(&_M_rwlock);
      // Errors not handled: EPERM, EBUSY, EINVAL
      __glibcxx_assert(__ret == 0);
    }

    // Shared ownership

    void
    lock_shared()
    {
      int __ret;
      // We retry if we exceeded the maximum number of read locks supported by
      // the POSIX implementation; this can result in busy-waiting, but this
      // is okay based on the current specification of forward progress
      // guarantees by the standard.
      do
	__ret = pthread_rwlock_rdlock(&_M_rwlock);
      while (__ret == EAGAIN);
      if (__ret == EDEADLK)
	__throw_system_error(int(errc::resource_deadlock_would_occur));
      // Errors not handled: EINVAL
      __glibcxx_assert(__ret == 0);
    }

    bool
    try_lock_shared()
    {
      int __ret = pthread_rwlock_tryrdlock(&_M_rwlock);
      // If the maximum number of read locks has been exceeded, we just fail
      // to acquire the lock.  Unlike for lock(), we are not allowed to throw
      // an exception.
      if (__ret == EBUSY || __ret == EAGAIN) return false;
      // Errors not handled: EINVAL
      __glibcxx_assert(__ret == 0);
      return true;
    }

    void
    unlock_shared()
    {
      unlock();
    }

    void* native_handle() { return &_M_rwlock; }
  };
#endif

#if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
  /// A shared mutex type implemented using std::condition_variable.
  class __shared_mutex_cv
  {
    friend class shared_timed_mutex;

    // Based on Howard Hinnant's reference implementation from N2406.

    // The high bit of _M_state is the write-entered flag which is set to
    // indicate a writer has taken the lock or is queuing to take the lock.
    // The remaining bits are the count of reader locks.
    //
    // To take a reader lock, block on gate1 while the write-entered flag is
    // set or the maximum number of reader locks is held, then increment the
    // reader lock count.
    // To release, decrement the count, then if the write-entered flag is set
    // and the count is zero then signal gate2 to wake a queued writer,
    // otherwise if the maximum number of reader locks was held signal gate1
    // to wake a reader.
    //
    // To take a writer lock, block on gate1 while the write-entered flag is
    // set, then set the write-entered flag to start queueing, then block on
    // gate2 while the number of reader locks is non-zero.
    // To release, unset the write-entered flag and signal gate1 to wake all
    // blocked readers and writers.
    //
    // This means that when no reader locks are held readers and writers get
    // equal priority. When one or more reader locks is held a writer gets
    // priority and no more reader locks can be taken while the writer is
    // queued.

    // Only locked when accessing _M_state or waiting on condition variables.
    mutex		_M_mut;
    // Used to block while write-entered is set or reader count at maximum.
    condition_variable	_M_gate1;
    // Used to block queued writers while reader count is non-zero.
    condition_variable	_M_gate2;
    // The write-entered flag and reader count.
    unsigned		_M_state;

    static constexpr unsigned _S_write_entered
      = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
    static constexpr unsigned _S_max_readers = ~_S_write_entered;

    // Test whether the write-entered flag is set. _M_mut must be locked.
    bool _M_write_entered() const { return _M_state & _S_write_entered; }

    // The number of reader locks currently held. _M_mut must be locked.
    unsigned _M_readers() const { return _M_state & _S_max_readers; }

  public:
    __shared_mutex_cv() : _M_state(0) {}

    ~__shared_mutex_cv()
    {
      __glibcxx_assert( _M_state == 0 );
    }

    __shared_mutex_cv(const __shared_mutex_cv&) = delete;
    __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;

    // Exclusive ownership

    void
    lock()
    {
      unique_lock<mutex> __lk(_M_mut);
      // Wait until we can set the write-entered flag.
      _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
      _M_state |= _S_write_entered;
      // Then wait until there are no more readers.
      _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
    }

    bool
    try_lock()
    {
      unique_lock<mutex> __lk(_M_mut, try_to_lock);
      if (__lk.owns_lock() && _M_state == 0)
	{
	  _M_state = _S_write_entered;
	  return true;
	}
      return false;
    }

    void
    unlock()
    {
      lock_guard<mutex> __lk(_M_mut);
      __glibcxx_assert( _M_write_entered() );
      _M_state = 0;
      // call notify_all() while mutex is held so that another thread can't
      // lock and unlock the mutex then destroy *this before we make the call.
      _M_gate1.notify_all();
    }

    // Shared ownership

    void
    lock_shared()
    {
      unique_lock<mutex> __lk(_M_mut);
      _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
      ++_M_state;
    }

    bool
    try_lock_shared()
    {
      unique_lock<mutex> __lk(_M_mut, try_to_lock);
      if (!__lk.owns_lock())
	return false;
      if (_M_state < _S_max_readers)
	{
	  ++_M_state;
	  return true;
	}
      return false;
    }

    void
    unlock_shared()
    {
      lock_guard<mutex> __lk(_M_mut);
      __glibcxx_assert( _M_readers() > 0 );
      auto __prev = _M_state--;
      if (_M_write_entered())
	{
	  // Wake the queued writer if there are no more readers.
	  if (_M_readers() == 0)
	    _M_gate2.notify_one();
	  // No need to notify gate1 because we give priority to the queued
	  // writer, and that writer will eventually notify gate1 after it
	  // clears the write-entered flag.
	}
      else
	{
	  // Wake any thread that was blocked on reader overflow.
	  if (__prev == _S_max_readers)
	    _M_gate1.notify_one();
	}
    }
  };
#endif

#if __cplusplus > 201402L
  /// The standard shared mutex type.
  class shared_mutex
  {
  public:
    shared_mutex() = default;
    ~shared_mutex() = default;

    shared_mutex(const shared_mutex&) = delete;
    shared_mutex& operator=(const shared_mutex&) = delete;

    // Exclusive ownership

    void lock() { _M_impl.lock(); }
    bool try_lock() { return _M_impl.try_lock(); }
    void unlock() { _M_impl.unlock(); }

    // Shared ownership

    void lock_shared() { _M_impl.lock_shared(); }
    bool try_lock_shared() { return _M_impl.try_lock_shared(); }
    void unlock_shared() { _M_impl.unlock_shared(); }

#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
    typedef void* native_handle_type;
    native_handle_type native_handle() { return _M_impl.native_handle(); }

  private:
    __shared_mutex_pthread _M_impl;
#else
  private:
    __shared_mutex_cv _M_impl;
#endif
  };
#endif // C++17

#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
  using __shared_timed_mutex_base = __shared_mutex_pthread;
#else
  using __shared_timed_mutex_base = __shared_mutex_cv;
#endif

  /// The standard shared timed mutex type.
  class shared_timed_mutex
  : private __shared_timed_mutex_base
  {
    using _Base = __shared_timed_mutex_base;

    // Must use the same clock as condition_variable for __shared_mutex_cv.
    typedef chrono::system_clock	__clock_t;

  public:
    shared_timed_mutex() = default;
    ~shared_timed_mutex() = default;

    shared_timed_mutex(const shared_timed_mutex&) = delete;
    shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;

    // Exclusive ownership

    void lock() { _Base::lock(); }
    bool try_lock() { return _Base::try_lock(); }
    void unlock() { _Base::unlock(); }

    template<typename _Rep, typename _Period>
      bool
      try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
      {
	return try_lock_until(__clock_t::now() + __rel_time);
      }

    // Shared ownership

    void lock_shared() { _Base::lock_shared(); }
    bool try_lock_shared() { return _Base::try_lock_shared(); }
    void unlock_shared() { _Base::unlock_shared(); }

    template<typename _Rep, typename _Period>
      bool
      try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
      {
	return try_lock_shared_until(__clock_t::now() + __rel_time);
      }

#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK

    // Exclusive ownership

    template<typename _Duration>
      bool
      try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
      {
	auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
	auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);

	__gthread_time_t __ts =
	  {
	    static_cast<std::time_t>(__s.time_since_epoch().count()),
	    static_cast<long>(__ns.count())
	  };

	int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts);
	// On self-deadlock, we just fail to acquire the lock.  Technically,
	// the program violated the precondition.
	if (__ret == ETIMEDOUT || __ret == EDEADLK)
	  return false;
	// Errors not handled: EINVAL
	__glibcxx_assert(__ret == 0);
	return true;
      }

    template<typename _Clock, typename _Duration>
      bool
      try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
      {
	// DR 887 - Sync unknown clock to known clock.
	const typename _Clock::time_point __c_entry = _Clock::now();
	const __clock_t::time_point __s_entry = __clock_t::now();
	const auto __delta = __abs_time - __c_entry;
	const auto __s_atime = __s_entry + __delta;
	return try_lock_until(__s_atime);
      }

    // Shared ownership

    template<typename _Duration>
      bool
      try_lock_shared_until(const chrono::time_point<__clock_t,
			    _Duration>& __atime)
      {
	auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
	auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);

	__gthread_time_t __ts =
	  {
	    static_cast<std::time_t>(__s.time_since_epoch().count()),
	    static_cast<long>(__ns.count())
	  };

	int __ret;
	// Unlike for lock(), we are not allowed to throw an exception so if
	// the maximum number of read locks has been exceeded, or we would
	// deadlock, we just try to acquire the lock again (and will time out
	// eventually).
	// In cases where we would exceed the maximum number of read locks
	// throughout the whole time until the timeout, we will fail to
	// acquire the lock even if it would be logically free; however, this
	// is allowed by the standard, and we made a "strong effort"
	// (see C++14 30.4.1.4p26).
	// For cases where the implementation detects a deadlock we
	// intentionally block and timeout so that an early return isn't
	// mistaken for a spurious failure, which might help users realise
	// there is a deadlock.
	do
	  __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts);
	while (__ret == EAGAIN || __ret == EDEADLK);
	if (__ret == ETIMEDOUT)
	  return false;
	// Errors not handled: EINVAL
	__glibcxx_assert(__ret == 0);
	return true;
      }

    template<typename _Clock, typename _Duration>
      bool
      try_lock_shared_until(const chrono::time_point<_Clock,
						     _Duration>& __abs_time)
      {
	// DR 887 - Sync unknown clock to known clock.
	const typename _Clock::time_point __c_entry = _Clock::now();
	const __clock_t::time_point __s_entry = __clock_t::now();
	const auto __delta = __abs_time - __c_entry;
	const auto __s_atime = __s_entry + __delta;
	return try_lock_shared_until(__s_atime);
      }

#else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)

    // Exclusive ownership

    template<typename _Clock, typename _Duration>
      bool
      try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
      {
	unique_lock<mutex> __lk(_M_mut);
	if (!_M_gate1.wait_until(__lk, __abs_time,
				 [=]{ return !_M_write_entered(); }))
	  {
	    return false;
	  }
	_M_state |= _S_write_entered;
	if (!_M_gate2.wait_until(__lk, __abs_time,
				 [=]{ return _M_readers() == 0; }))
	  {
	    _M_state ^= _S_write_entered;
	    // Wake all threads blocked while the write-entered flag was set.
	    _M_gate1.notify_all();
	    return false;
	  }
	return true;
      }

    // Shared ownership

    template <typename _Clock, typename _Duration>
      bool
      try_lock_shared_until(const chrono::time_point<_Clock,
						     _Duration>& __abs_time)
      {
	unique_lock<mutex> __lk(_M_mut);
	if (!_M_gate1.wait_until(__lk, __abs_time,
				 [=]{ return _M_state < _S_max_readers; }))
	  {
	    return false;
	  }
	++_M_state;
	return true;
      }

#endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
  };
#endif // _GLIBCXX_HAS_GTHREADS

  /// shared_lock
  template<typename _Mutex>
    class shared_lock
    {
    public:
      typedef _Mutex mutex_type;

      // Shared locking

      shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }

      explicit
      shared_lock(mutex_type& __m)
      : _M_pm(std::__addressof(__m)), _M_owns(true)
      { __m.lock_shared(); }

      shared_lock(mutex_type& __m, defer_lock_t) noexcept
      : _M_pm(std::__addressof(__m)), _M_owns(false) { }

      shared_lock(mutex_type& __m, try_to_lock_t)
      : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }

      shared_lock(mutex_type& __m, adopt_lock_t)
      : _M_pm(std::__addressof(__m)), _M_owns(true) { }

      template<typename _Clock, typename _Duration>
	shared_lock(mutex_type& __m,
		    const chrono::time_point<_Clock, _Duration>& __abs_time)
      : _M_pm(std::__addressof(__m)),
	_M_owns(__m.try_lock_shared_until(__abs_time)) { }

      template<typename _Rep, typename _Period>
	shared_lock(mutex_type& __m,
		    const chrono::duration<_Rep, _Period>& __rel_time)
      : _M_pm(std::__addressof(__m)),
	_M_owns(__m.try_lock_shared_for(__rel_time)) { }

      ~shared_lock()
      {
	if (_M_owns)
	  _M_pm->unlock_shared();
      }

      shared_lock(shared_lock const&) = delete;
      shared_lock& operator=(shared_lock const&) = delete;

      shared_lock(shared_lock&& __sl) noexcept : shared_lock()
      { swap(__sl); }

      shared_lock&
      operator=(shared_lock&& __sl) noexcept
      {
	shared_lock(std::move(__sl)).swap(*this);
	return *this;
      }

      void
      lock()
      {
	_M_lockable();
	_M_pm->lock_shared();
	_M_owns = true;
      }

      bool
      try_lock()
      {
	_M_lockable();
	return _M_owns = _M_pm->try_lock_shared();
      }

      template<typename _Rep, typename _Period>
	bool
	try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
	{
	  _M_lockable();
	  return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
	}

      template<typename _Clock, typename _Duration>
	bool
	try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
	{
	  _M_lockable();
	  return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
	}

      void
      unlock()
      {
	if (!_M_owns)
	  __throw_system_error(int(errc::resource_deadlock_would_occur));
	_M_pm->unlock_shared();
	_M_owns = false;
      }

      // Setters

      void
      swap(shared_lock& __u) noexcept
      {
	std::swap(_M_pm, __u._M_pm);
	std::swap(_M_owns, __u._M_owns);
      }

      mutex_type*
      release() noexcept
      {
	_M_owns = false;
	return std::exchange(_M_pm, nullptr);
      }

      // Getters

      bool owns_lock() const noexcept { return _M_owns; }

      explicit operator bool() const noexcept { return _M_owns; }

      mutex_type* mutex() const noexcept { return _M_pm; }

    private:
      void
      _M_lockable() const
      {
	if (_M_pm == nullptr)
	  __throw_system_error(int(errc::operation_not_permitted));
	if (_M_owns)
	  __throw_system_error(int(errc::resource_deadlock_would_occur));
      }

      mutex_type*	_M_pm;
      bool		_M_owns;
    };

  /// Swap specialization for shared_lock
  template<typename _Mutex>
    void
    swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
    { __x.swap(__y); }

#endif // _GLIBCXX_USE_C99_STDINT_TR1

  // @} group mutexes
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace

#endif // C++14

#endif // _GLIBCXX_SHARED_MUTEX