Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
6554 serge 1
//  -*- C++ -*-
2
 
3
// Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
//
5
// This file is part of the GNU ISO C++ Library.  This library is free
6
// software; you can redistribute it and/or modify it under the
7
// terms of the GNU General Public License as published by the
8
// Free Software Foundation; either version 3, or (at your option)
9
// any later version.
10
 
11
// This library is distributed in the hope that it will be useful,
12
// but WITHOUT ANY WARRANTY; without even the implied warranty of
13
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
// GNU General Public License for more details.
15
 
16
// Under Section 7 of GPL version 3, you are granted additional
17
// permissions described in the GCC Runtime Library Exception, version
18
// 3.1, as published by the Free Software Foundation.
19
 
20
// You should have received a copy of the GNU General Public License and
21
// a copy of the GCC Runtime Library Exception along with this program;
22
// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23
// .
24
 
25
/** @file include/shared_mutex
26
 *  This is a Standard C++ Library header.
27
 */
28
 
29
#ifndef _GLIBCXX_SHARED_MUTEX
30
#define _GLIBCXX_SHARED_MUTEX 1
31
 
32
#pragma GCC system_header
33
 
34
#if __cplusplus <= 201103L
35
# include 
36
#else
37
 
38
#include 
39
#include 
40
#include 
41
#include 
42
 
43
namespace std _GLIBCXX_VISIBILITY(default)
44
{
45
_GLIBCXX_BEGIN_NAMESPACE_VERSION
46
 
47
  /**
48
   * @ingroup mutexes
49
   * @{
50
   */
51
 
52
#ifdef _GLIBCXX_USE_C99_STDINT_TR1
53
#ifdef _GLIBCXX_HAS_GTHREADS
54
 
55
#define __cpp_lib_shared_timed_mutex 201402
56
 
57
  /// shared_timed_mutex
58
  class shared_timed_mutex
59
  {
60
#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
61
    typedef chrono::system_clock	__clock_t;
62
 
63
#ifdef PTHREAD_RWLOCK_INITIALIZER
64
    pthread_rwlock_t	_M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
65
 
66
  public:
67
    shared_timed_mutex() = default;
68
    ~shared_timed_mutex() = default;
69
#else
70
    pthread_rwlock_t	_M_rwlock;
71
 
72
  public:
73
    shared_timed_mutex()
74
    {
75
      int __ret = pthread_rwlock_init(&_M_rwlock, NULL);
76
      if (__ret == ENOMEM)
77
	__throw_bad_alloc();
78
      else if (__ret == EAGAIN)
79
	__throw_system_error(int(errc::resource_unavailable_try_again));
80
      else if (__ret == EPERM)
81
	__throw_system_error(int(errc::operation_not_permitted));
82
      // Errors not handled: EBUSY, EINVAL
83
      _GLIBCXX_DEBUG_ASSERT(__ret == 0);
84
    }
85
 
86
    ~shared_timed_mutex()
87
    {
88
      int __ret __attribute((__unused__)) = pthread_rwlock_destroy(&_M_rwlock);
89
      // Errors not handled: EBUSY, EINVAL
90
      _GLIBCXX_DEBUG_ASSERT(__ret == 0);
91
    }
92
#endif
93
 
94
    shared_timed_mutex(const shared_timed_mutex&) = delete;
95
    shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
96
 
97
    // Exclusive ownership
98
 
99
    void
100
    lock()
101
    {
102
      int __ret = pthread_rwlock_wrlock(&_M_rwlock);
103
      if (__ret == EDEADLK)
104
	__throw_system_error(int(errc::resource_deadlock_would_occur));
105
      // Errors not handled: EINVAL
106
      _GLIBCXX_DEBUG_ASSERT(__ret == 0);
107
    }
108
 
109
    bool
110
    try_lock()
111
    {
112
      int __ret = pthread_rwlock_trywrlock(&_M_rwlock);
113
      if (__ret == EBUSY) return false;
114
      // Errors not handled: EINVAL
115
      _GLIBCXX_DEBUG_ASSERT(__ret == 0);
116
      return true;
117
    }
118
 
119
    template
120
      bool
121
      try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
122
      {
123
	return try_lock_until(__clock_t::now() + __rel_time);
124
      }
125
 
126
    template
127
      bool
128
      try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
129
      {
130
	auto __s = chrono::time_point_cast(__atime);
131
	auto __ns = chrono::duration_cast(__atime - __s);
132
 
133
	__gthread_time_t __ts =
134
	  {
135
	    static_cast(__s.time_since_epoch().count()),
136
	    static_cast(__ns.count())
137
	  };
138
 
139
	int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts);
140
	// On self-deadlock, we just fail to acquire the lock.  Technically,
141
	// the program violated the precondition.
142
	if (__ret == ETIMEDOUT || __ret == EDEADLK)
143
	  return false;
144
	// Errors not handled: EINVAL
145
	_GLIBCXX_DEBUG_ASSERT(__ret == 0);
146
	return true;
147
      }
148
 
149
    template
150
      bool
151
      try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
152
      {
153
	// DR 887 - Sync unknown clock to known clock.
154
	const typename _Clock::time_point __c_entry = _Clock::now();
155
	const __clock_t::time_point __s_entry = __clock_t::now();
156
	const auto __delta = __abs_time - __c_entry;
157
	const auto __s_atime = __s_entry + __delta;
158
	return try_lock_until(__s_atime);
159
      }
160
 
161
    void
162
    unlock()
163
    {
164
      int __ret __attribute((__unused__)) = pthread_rwlock_unlock(&_M_rwlock);
165
      // Errors not handled: EPERM, EBUSY, EINVAL
166
      _GLIBCXX_DEBUG_ASSERT(__ret == 0);
167
    }
168
 
169
    // Shared ownership
170
 
171
    void
172
    lock_shared()
173
    {
174
      int __ret;
175
      // We retry if we exceeded the maximum number of read locks supported by
176
      // the POSIX implementation; this can result in busy-waiting, but this
177
      // is okay based on the current specification of forward progress
178
      // guarantees by the standard.
179
      do
180
	__ret = pthread_rwlock_rdlock(&_M_rwlock);
181
      while (__ret == EAGAIN);
182
      if (__ret == EDEADLK)
183
	__throw_system_error(int(errc::resource_deadlock_would_occur));
184
      // Errors not handled: EINVAL
185
      _GLIBCXX_DEBUG_ASSERT(__ret == 0);
186
    }
187
 
188
    bool
189
    try_lock_shared()
190
    {
191
      int __ret = pthread_rwlock_tryrdlock(&_M_rwlock);
192
      // If the maximum number of read locks has been exceeded, we just fail
193
      // to acquire the lock.  Unlike for lock(), we are not allowed to throw
194
      // an exception.
195
      if (__ret == EBUSY || __ret == EAGAIN) return false;
196
      // Errors not handled: EINVAL
197
      _GLIBCXX_DEBUG_ASSERT(__ret == 0);
198
      return true;
199
    }
200
 
201
    template
202
      bool
203
      try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
204
      {
205
	return try_lock_shared_until(__clock_t::now() + __rel_time);
206
      }
207
 
208
    template
209
      bool
210
      try_lock_shared_until(const chrono::time_point<__clock_t,
211
			    _Duration>& __atime)
212
      {
213
	auto __s = chrono::time_point_cast(__atime);
214
	auto __ns = chrono::duration_cast(__atime - __s);
215
 
216
	__gthread_time_t __ts =
217
	  {
218
	    static_cast(__s.time_since_epoch().count()),
219
	    static_cast(__ns.count())
220
	  };
221
 
222
	int __ret;
223
	// Unlike for lock(), we are not allowed to throw an exception so if
224
	// the maximum number of read locks has been exceeded, or we would
225
	// deadlock, we just try to acquire the lock again (and will time out
226
	// eventually).
227
	// In cases where we would exceed the maximum number of read locks
228
	// throughout the whole time until the timeout, we will fail to
229
	// acquire the lock even if it would be logically free; however, this
230
	// is allowed by the standard, and we made a "strong effort"
231
	// (see C++14 30.4.1.4p26).
232
	// For cases where the implementation detects a deadlock we
233
	// intentionally block and timeout so that an early return isn't
234
	// mistaken for a spurious failure, which might help users realise
235
	// there is a deadlock.
236
	do
237
	  __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts);
238
	while (__ret == EAGAIN || __ret == EDEADLK);
239
	if (__ret == ETIMEDOUT)
240
	  return false;
241
	// Errors not handled: EINVAL
242
	_GLIBCXX_DEBUG_ASSERT(__ret == 0);
243
	return true;
244
      }
245
 
246
    template
247
      bool
248
      try_lock_shared_until(const chrono::time_point<_Clock,
249
			    _Duration>& __abs_time)
250
      {
251
	// DR 887 - Sync unknown clock to known clock.
252
	const typename _Clock::time_point __c_entry = _Clock::now();
253
	const __clock_t::time_point __s_entry = __clock_t::now();
254
	const auto __delta = __abs_time - __c_entry;
255
	const auto __s_atime = __s_entry + __delta;
256
	return try_lock_shared_until(__s_atime);
257
      }
258
 
259
    void
260
    unlock_shared()
261
    {
262
      unlock();
263
    }
264
 
265
#else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
266
 
267
    // Must use the same clock as condition_variable
268
    typedef chrono::system_clock	__clock_t;
269
 
270
    // Based on Howard Hinnant's reference implementation from N2406.
271
 
272
    // The high bit of _M_state is the write-entered flag which is set to
273
    // indicate a writer has taken the lock or is queuing to take the lock.
274
    // The remaining bits are the count of reader locks.
275
    //
276
    // To take a reader lock, block on gate1 while the write-entered flag is
277
    // set or the maximum number of reader locks is held, then increment the
278
    // reader lock count.
279
    // To release, decrement the count, then if the write-entered flag is set
280
    // and the count is zero then signal gate2 to wake a queued writer,
281
    // otherwise if the maximum number of reader locks was held signal gate1
282
    // to wake a reader.
283
    //
284
    // To take a writer lock, block on gate1 while the write-entered flag is
285
    // set, then set the write-entered flag to start queueing, then block on
286
    // gate2 while the number of reader locks is non-zero.
287
    // To release, unset the write-entered flag and signal gate1 to wake all
288
    // blocked readers and writers.
289
    //
290
    // This means that when no reader locks are held readers and writers get
291
    // equal priority. When one or more reader locks is held a writer gets
292
    // priority and no more reader locks can be taken while the writer is
293
    // queued.
294
 
295
    // Only locked when accessing _M_state or waiting on condition variables.
296
    mutex		_M_mut;
297
    // Used to block while write-entered is set or reader count at maximum.
298
    condition_variable	_M_gate1;
299
    // Used to block queued writers while reader count is non-zero.
300
    condition_variable	_M_gate2;
301
    // The write-entered flag and reader count.
302
    unsigned		_M_state;
303
 
304
    static constexpr unsigned _S_write_entered
305
      = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
306
    static constexpr unsigned _S_max_readers = ~_S_write_entered;
307
 
308
    // Test whether the write-entered flag is set. _M_mut must be locked.
309
    bool _M_write_entered() const { return _M_state & _S_write_entered; }
310
 
311
    // The number of reader locks currently held. _M_mut must be locked.
312
    unsigned _M_readers() const { return _M_state & _S_max_readers; }
313
 
314
  public:
315
    shared_timed_mutex() : _M_state(0) {}
316
 
317
    ~shared_timed_mutex()
318
    {
319
      _GLIBCXX_DEBUG_ASSERT( _M_state == 0 );
320
    }
321
 
322
    shared_timed_mutex(const shared_timed_mutex&) = delete;
323
    shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
324
 
325
    // Exclusive ownership
326
 
327
    void
328
    lock()
329
    {
330
      unique_lock __lk(_M_mut);
331
      // Wait until we can set the write-entered flag.
332
      _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
333
      _M_state |= _S_write_entered;
334
      // Then wait until there are no more readers.
335
      _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
336
    }
337
 
338
    bool
339
    try_lock()
340
    {
341
      unique_lock __lk(_M_mut, try_to_lock);
342
      if (__lk.owns_lock() && _M_state == 0)
343
	{
344
	  _M_state = _S_write_entered;
345
	  return true;
346
	}
347
      return false;
348
    }
349
 
350
    template
351
      bool
352
      try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
353
      {
354
	return try_lock_until(__clock_t::now() + __rel_time);
355
      }
356
 
357
    template
358
      bool
359
      try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
360
      {
361
	unique_lock __lk(_M_mut);
362
	if (!_M_gate1.wait_until(__lk, __abs_time,
363
				 [=]{ return !_M_write_entered(); }))
364
	  {
365
	    return false;
366
	  }
367
	_M_state |= _S_write_entered;
368
	if (!_M_gate2.wait_until(__lk, __abs_time,
369
				 [=]{ return _M_readers() == 0; }))
370
	  {
371
	    _M_state ^= _S_write_entered;
372
	    // Wake all threads blocked while the write-entered flag was set.
373
	    _M_gate1.notify_all();
374
	    return false;
375
	  }
376
	return true;
377
      }
378
 
379
    void
380
    unlock()
381
    {
382
      lock_guard __lk(_M_mut);
383
      _GLIBCXX_DEBUG_ASSERT( _M_write_entered() );
384
      _M_state = 0;
385
      // call notify_all() while mutex is held so that another thread can't
386
      // lock and unlock the mutex then destroy *this before we make the call.
387
      _M_gate1.notify_all();
388
    }
389
 
390
    // Shared ownership
391
 
392
    void
393
    lock_shared()
394
    {
395
      unique_lock __lk(_M_mut);
396
      _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
397
      ++_M_state;
398
    }
399
 
400
    bool
401
    try_lock_shared()
402
    {
403
      unique_lock __lk(_M_mut, try_to_lock);
404
      if (!__lk.owns_lock())
405
	return false;
406
      if (_M_state < _S_max_readers)
407
	{
408
	  ++_M_state;
409
	  return true;
410
	}
411
      return false;
412
    }
413
 
414
    template
415
      bool
416
      try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
417
      {
418
	return try_lock_shared_until(__clock_t::now() + __rel_time);
419
      }
420
 
421
    template 
422
      bool
423
      try_lock_shared_until(const chrono::time_point<_Clock,
424
						     _Duration>& __abs_time)
425
      {
426
	unique_lock __lk(_M_mut);
427
	if (!_M_gate1.wait_until(__lk, __abs_time,
428
				 [=]{ return _M_state < _S_max_readers; }))
429
	  {
430
	    return false;
431
	  }
432
	++_M_state;
433
	return true;
434
      }
435
 
436
    void
437
    unlock_shared()
438
    {
439
      lock_guard __lk(_M_mut);
440
      _GLIBCXX_DEBUG_ASSERT( _M_readers() > 0 );
441
      auto __prev = _M_state--;
442
      if (_M_write_entered())
443
	{
444
	  // Wake the queued writer if there are no more readers.
445
	  if (_M_readers() == 0)
446
	    _M_gate2.notify_one();
447
	  // No need to notify gate1 because we give priority to the queued
448
	  // writer, and that writer will eventually notify gate1 after it
449
	  // clears the write-entered flag.
450
	}
451
      else
452
	{
453
	  // Wake any thread that was blocked on reader overflow.
454
	  if (__prev == _S_max_readers)
455
	    _M_gate1.notify_one();
456
	}
457
    }
458
#endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
459
  };
460
#endif // _GLIBCXX_HAS_GTHREADS
461
 
462
  /// shared_lock
463
  template
464
    class shared_lock
465
    {
466
    public:
467
      typedef _Mutex mutex_type;
468
 
469
      // Shared locking
470
 
471
      shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
472
 
473
      explicit
474
      shared_lock(mutex_type& __m) : _M_pm(&__m), _M_owns(true)
475
      { __m.lock_shared(); }
476
 
477
      shared_lock(mutex_type& __m, defer_lock_t) noexcept
478
      : _M_pm(&__m), _M_owns(false) { }
479
 
480
      shared_lock(mutex_type& __m, try_to_lock_t)
481
      : _M_pm(&__m), _M_owns(__m.try_lock_shared()) { }
482
 
483
      shared_lock(mutex_type& __m, adopt_lock_t)
484
      : _M_pm(&__m), _M_owns(true) { }
485
 
486
      template
487
	shared_lock(mutex_type& __m,
488
		    const chrono::time_point<_Clock, _Duration>& __abs_time)
489
      : _M_pm(&__m), _M_owns(__m.try_lock_shared_until(__abs_time)) { }
490
 
491
      template
492
	shared_lock(mutex_type& __m,
493
		    const chrono::duration<_Rep, _Period>& __rel_time)
494
      : _M_pm(&__m), _M_owns(__m.try_lock_shared_for(__rel_time)) { }
495
 
496
      ~shared_lock()
497
      {
498
	if (_M_owns)
499
	  _M_pm->unlock_shared();
500
      }
501
 
502
      shared_lock(shared_lock const&) = delete;
503
      shared_lock& operator=(shared_lock const&) = delete;
504
 
505
      shared_lock(shared_lock&& __sl) noexcept : shared_lock()
506
      { swap(__sl); }
507
 
508
      shared_lock&
509
      operator=(shared_lock&& __sl) noexcept
510
      {
511
	shared_lock(std::move(__sl)).swap(*this);
512
	return *this;
513
      }
514
 
515
      void
516
      lock()
517
      {
518
	_M_lockable();
519
	_M_pm->lock_shared();
520
	_M_owns = true;
521
      }
522
 
523
      bool
524
      try_lock()
525
      {
526
	_M_lockable();
527
	return _M_owns = _M_pm->try_lock_shared();
528
      }
529
 
530
      template
531
	bool
532
	try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
533
	{
534
	  _M_lockable();
535
	  return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
536
	}
537
 
538
      template
539
	bool
540
	try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
541
	{
542
	  _M_lockable();
543
	  return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
544
	}
545
 
546
      void
547
      unlock()
548
      {
549
	if (!_M_owns)
550
	  __throw_system_error(int(errc::resource_deadlock_would_occur));
551
	_M_pm->unlock_shared();
552
	_M_owns = false;
553
      }
554
 
555
      // Setters
556
 
557
      void
558
      swap(shared_lock& __u) noexcept
559
      {
560
	std::swap(_M_pm, __u._M_pm);
561
	std::swap(_M_owns, __u._M_owns);
562
      }
563
 
564
      mutex_type*
565
      release() noexcept
566
      {
567
	_M_owns = false;
568
	return std::exchange(_M_pm, nullptr);
569
      }
570
 
571
      // Getters
572
 
573
      bool owns_lock() const noexcept { return _M_owns; }
574
 
575
      explicit operator bool() const noexcept { return _M_owns; }
576
 
577
      mutex_type* mutex() const noexcept { return _M_pm; }
578
 
579
    private:
580
      void
581
      _M_lockable() const
582
      {
583
	if (_M_pm == nullptr)
584
	  __throw_system_error(int(errc::operation_not_permitted));
585
	if (_M_owns)
586
	  __throw_system_error(int(errc::resource_deadlock_would_occur));
587
      }
588
 
589
      mutex_type*	_M_pm;
590
      bool		_M_owns;
591
    };
592
 
593
  /// Swap specialization for shared_lock
594
  template
595
    void
596
    swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
597
    { __x.swap(__y); }
598
 
599
#endif // _GLIBCXX_USE_C99_STDINT_TR1
600
 
601
  // @} group mutexes
602
_GLIBCXX_END_NAMESPACE_VERSION
603
} // namespace
604
 
605
#endif // C++14
606
 
607
#endif // _GLIBCXX_SHARED_MUTEX