Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
5134 serge 1
//  -*- C++ -*-
2
 
3
// Copyright (C) 2003-2013 Free Software Foundation, Inc.
4
//
5
// This file is part of the GNU ISO C++ Library.  This library is free
6
// software; you can redistribute it and/or modify it under the
7
// terms of the GNU General Public License as published by the
8
// Free Software Foundation; either version 3, or (at your option)
9
// any later version.
10
 
11
// This library is distributed in the hope that it will be useful,
12
// but WITHOUT ANY WARRANTY; without even the implied warranty of
13
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
// GNU General Public License for more details.
15
 
16
// Under Section 7 of GPL version 3, you are granted additional
17
// permissions described in the GCC Runtime Library Exception, version
18
// 3.1, as published by the Free Software Foundation.
19
 
20
// You should have received a copy of the GNU General Public License and
21
// a copy of the GCC Runtime Library Exception along with this program;
22
// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23
// .
24
 
25
/** @file include/mutex
26
 *  This is a Standard C++ Library header.
27
 */
28
 
29
#ifndef _GLIBCXX_MUTEX
30
#define _GLIBCXX_MUTEX 1
31
 
32
#pragma GCC system_header
33
 
34
#if __cplusplus < 201103L
35
# include 
36
#else
37
 
38
#include 
39
#include 
40
#include 
41
#include 
42
#include 
43
#include 
44
#include 
45
#include 
46
#include  // for std::swap
47
 
48
#ifdef _GLIBCXX_USE_C99_STDINT_TR1
49
 
50
namespace std _GLIBCXX_VISIBILITY(default)
51
{
52
_GLIBCXX_BEGIN_NAMESPACE_VERSION
53
 
54
#ifdef _GLIBCXX_HAS_GTHREADS
55
  // Common base class for std::mutex and std::timed_mutex
56
  class __mutex_base
57
  {
58
  protected:
59
    typedef __gthread_mutex_t			__native_type;
60
 
61
#ifdef __GTHREAD_MUTEX_INIT
62
    __native_type  _M_mutex = __GTHREAD_MUTEX_INIT;
63
 
64
    constexpr __mutex_base() noexcept = default;
65
#else
66
    __native_type  _M_mutex;
67
 
68
    __mutex_base() noexcept
69
    {
70
      // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
71
      __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
72
    }
73
 
74
    ~__mutex_base() noexcept { __gthread_mutex_destroy(&_M_mutex); }
75
#endif
76
 
77
    __mutex_base(const __mutex_base&) = delete;
78
    __mutex_base& operator=(const __mutex_base&) = delete;
79
  };
80
 
81
  // Common base class for std::recursive_mutex and std::timed_recursive_mutex
82
  class __recursive_mutex_base
83
  {
84
  protected:
85
    typedef __gthread_recursive_mutex_t		__native_type;
86
 
87
    __recursive_mutex_base(const __recursive_mutex_base&) = delete;
88
    __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
89
 
90
#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
91
    __native_type  _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
92
 
93
    __recursive_mutex_base() = default;
94
#else
95
    __native_type  _M_mutex;
96
 
97
    __recursive_mutex_base()
98
    {
99
      // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
100
      __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
101
    }
102
 
103
    ~__recursive_mutex_base()
104
    { __gthread_recursive_mutex_destroy(&_M_mutex); }
105
#endif
106
  };
107
 
108
  /**
109
   * @defgroup mutexes Mutexes
110
   * @ingroup concurrency
111
   *
112
   * Classes for mutex support.
113
   * @{
114
   */
115
 
116
  /// mutex
117
  class mutex : private __mutex_base
118
  {
119
  public:
120
    typedef __native_type* 			native_handle_type;
121
 
122
#ifdef __GTHREAD_MUTEX_INIT
123
    constexpr
124
#endif
125
    mutex() noexcept = default;
126
    ~mutex() = default;
127
 
128
    mutex(const mutex&) = delete;
129
    mutex& operator=(const mutex&) = delete;
130
 
131
    void
132
    lock()
133
    {
134
      int __e = __gthread_mutex_lock(&_M_mutex);
135
 
136
      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
137
      if (__e)
138
	__throw_system_error(__e);
139
    }
140
 
141
    bool
142
    try_lock() noexcept
143
    {
144
      // XXX EINVAL, EAGAIN, EBUSY
145
      return !__gthread_mutex_trylock(&_M_mutex);
146
    }
147
 
148
    void
149
    unlock()
150
    {
151
      // XXX EINVAL, EAGAIN, EPERM
152
      __gthread_mutex_unlock(&_M_mutex);
153
    }
154
 
155
    native_handle_type
156
    native_handle()
157
    { return &_M_mutex; }
158
  };
159
 
160
  /// recursive_mutex
161
  class recursive_mutex : private __recursive_mutex_base
162
  {
163
  public:
164
    typedef __native_type* 			native_handle_type;
165
 
166
    recursive_mutex() = default;
167
    ~recursive_mutex() = default;
168
 
169
    recursive_mutex(const recursive_mutex&) = delete;
170
    recursive_mutex& operator=(const recursive_mutex&) = delete;
171
 
172
    void
173
    lock()
174
    {
175
      int __e = __gthread_recursive_mutex_lock(&_M_mutex);
176
 
177
      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
178
      if (__e)
179
	__throw_system_error(__e);
180
    }
181
 
182
    bool
183
    try_lock() noexcept
184
    {
185
      // XXX EINVAL, EAGAIN, EBUSY
186
      return !__gthread_recursive_mutex_trylock(&_M_mutex);
187
    }
188
 
189
    void
190
    unlock()
191
    {
192
      // XXX EINVAL, EAGAIN, EBUSY
193
      __gthread_recursive_mutex_unlock(&_M_mutex);
194
    }
195
 
196
    native_handle_type
197
    native_handle()
198
    { return &_M_mutex; }
199
  };
200
 
201
#if _GTHREAD_USE_MUTEX_TIMEDLOCK
202
  /// timed_mutex
203
  class timed_mutex : private __mutex_base
204
  {
205
#ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
206
    typedef chrono::steady_clock 	  	__clock_t;
207
#else
208
    typedef chrono::high_resolution_clock 	__clock_t;
209
#endif
210
 
211
  public:
212
    typedef __native_type* 		  	native_handle_type;
213
 
214
    timed_mutex() = default;
215
    ~timed_mutex() = default;
216
 
217
    timed_mutex(const timed_mutex&) = delete;
218
    timed_mutex& operator=(const timed_mutex&) = delete;
219
 
220
    void
221
    lock()
222
    {
223
      int __e = __gthread_mutex_lock(&_M_mutex);
224
 
225
      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
226
      if (__e)
227
	__throw_system_error(__e);
228
    }
229
 
230
    bool
231
    try_lock() noexcept
232
    {
233
      // XXX EINVAL, EAGAIN, EBUSY
234
      return !__gthread_mutex_trylock(&_M_mutex);
235
    }
236
 
237
    template 
238
      bool
239
      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
240
      { return _M_try_lock_for(__rtime); }
241
 
242
    template 
243
      bool
244
      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
245
      { return _M_try_lock_until(__atime); }
246
 
247
    void
248
    unlock()
249
    {
250
      // XXX EINVAL, EAGAIN, EBUSY
251
      __gthread_mutex_unlock(&_M_mutex);
252
    }
253
 
254
    native_handle_type
255
    native_handle()
256
    { return &_M_mutex; }
257
 
258
  private:
259
    template
260
      bool
261
      _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
262
      {
263
	auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
264
	if (ratio_greater<__clock_t::period, _Period>())
265
	  ++__rt;
266
 
267
	return _M_try_lock_until(__clock_t::now() + __rt);
268
      }
269
 
270
    template
271
      bool
272
      _M_try_lock_until(const chrono::time_point<__clock_t,
273
						 _Duration>& __atime)
274
      {
275
	chrono::time_point<__clock_t, chrono::seconds> __s =
276
	  chrono::time_point_cast(__atime);
277
 
278
	chrono::nanoseconds __ns =
279
	  chrono::duration_cast(__atime - __s);
280
 
281
	__gthread_time_t __ts = {
282
	  static_cast(__s.time_since_epoch().count()),
283
	  static_cast(__ns.count())
284
	};
285
 
286
	return !__gthread_mutex_timedlock(native_handle(), &__ts);
287
      }
288
 
289
    template
290
      bool
291
      _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
292
      { return _M_try_lock_for(__atime - _Clock::now()); }
293
  };
294
 
295
  /// recursive_timed_mutex
296
  class recursive_timed_mutex : private __recursive_mutex_base
297
  {
298
#ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
299
    typedef chrono::steady_clock 		__clock_t;
300
#else
301
    typedef chrono::high_resolution_clock 	__clock_t;
302
#endif
303
 
304
  public:
305
    typedef __native_type* 			native_handle_type;
306
 
307
    recursive_timed_mutex() = default;
308
    ~recursive_timed_mutex() = default;
309
 
310
    recursive_timed_mutex(const recursive_timed_mutex&) = delete;
311
    recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
312
 
313
    void
314
    lock()
315
    {
316
      int __e = __gthread_recursive_mutex_lock(&_M_mutex);
317
 
318
      // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
319
      if (__e)
320
	__throw_system_error(__e);
321
    }
322
 
323
    bool
324
    try_lock() noexcept
325
    {
326
      // XXX EINVAL, EAGAIN, EBUSY
327
      return !__gthread_recursive_mutex_trylock(&_M_mutex);
328
    }
329
 
330
    template 
331
      bool
332
      try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
333
      { return _M_try_lock_for(__rtime); }
334
 
335
    template 
336
      bool
337
      try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
338
      { return _M_try_lock_until(__atime); }
339
 
340
    void
341
    unlock()
342
    {
343
      // XXX EINVAL, EAGAIN, EBUSY
344
      __gthread_recursive_mutex_unlock(&_M_mutex);
345
    }
346
 
347
    native_handle_type
348
    native_handle()
349
    { return &_M_mutex; }
350
 
351
  private:
352
    template
353
      bool
354
      _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
355
      {
356
	auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
357
	if (ratio_greater<__clock_t::period, _Period>())
358
	  ++__rt;
359
 
360
	return _M_try_lock_until(__clock_t::now() + __rt);
361
      }
362
 
363
    template
364
      bool
365
      _M_try_lock_until(const chrono::time_point<__clock_t,
366
						 _Duration>& __atime)
367
      {
368
	chrono::time_point<__clock_t, chrono::seconds> __s =
369
	  chrono::time_point_cast(__atime);
370
 
371
	chrono::nanoseconds __ns =
372
	  chrono::duration_cast(__atime - __s);
373
 
374
	__gthread_time_t __ts = {
375
	  static_cast(__s.time_since_epoch().count()),
376
	  static_cast(__ns.count())
377
	};
378
 
379
	return !__gthread_mutex_timedlock(native_handle(), &__ts);
380
      }
381
 
382
    template
383
      bool
384
      _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
385
      { return _M_try_lock_for(__atime - _Clock::now()); }
386
  };
387
#endif
388
#endif // _GLIBCXX_HAS_GTHREADS
389
 
390
  /// Do not acquire ownership of the mutex.
391
  struct defer_lock_t { };
392
 
393
  /// Try to acquire ownership of the mutex without blocking.
394
  struct try_to_lock_t { };
395
 
396
  /// Assume the calling thread has already obtained mutex ownership
397
  /// and manage it.
398
  struct adopt_lock_t { };
399
 
400
  constexpr defer_lock_t	defer_lock { };
401
  constexpr try_to_lock_t	try_to_lock { };
402
  constexpr adopt_lock_t	adopt_lock { };
403
 
404
  /// @brief  Scoped lock idiom.
405
  // Acquire the mutex here with a constructor call, then release with
406
  // the destructor call in accordance with RAII style.
407
  template
408
    class lock_guard
409
    {
410
    public:
411
      typedef _Mutex mutex_type;
412
 
413
      explicit lock_guard(mutex_type& __m) : _M_device(__m)
414
      { _M_device.lock(); }
415
 
416
      lock_guard(mutex_type& __m, adopt_lock_t) : _M_device(__m)
417
      { } // calling thread owns mutex
418
 
419
      ~lock_guard()
420
      { _M_device.unlock(); }
421
 
422
      lock_guard(const lock_guard&) = delete;
423
      lock_guard& operator=(const lock_guard&) = delete;
424
 
425
    private:
426
      mutex_type&  _M_device;
427
    };
428
 
429
  /// unique_lock
430
  template
431
    class unique_lock
432
    {
433
    public:
434
      typedef _Mutex mutex_type;
435
 
436
      unique_lock() noexcept
437
      : _M_device(0), _M_owns(false)
438
      { }
439
 
440
      explicit unique_lock(mutex_type& __m)
441
      : _M_device(&__m), _M_owns(false)
442
      {
443
	lock();
444
	_M_owns = true;
445
      }
446
 
447
      unique_lock(mutex_type& __m, defer_lock_t) noexcept
448
      : _M_device(&__m), _M_owns(false)
449
      { }
450
 
451
      unique_lock(mutex_type& __m, try_to_lock_t)
452
      : _M_device(&__m), _M_owns(_M_device->try_lock())
453
      { }
454
 
455
      unique_lock(mutex_type& __m, adopt_lock_t)
456
      : _M_device(&__m), _M_owns(true)
457
      {
458
	// XXX calling thread owns mutex
459
      }
460
 
461
      template
462
	unique_lock(mutex_type& __m,
463
		    const chrono::time_point<_Clock, _Duration>& __atime)
464
	: _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
465
	{ }
466
 
467
      template
468
	unique_lock(mutex_type& __m,
469
		    const chrono::duration<_Rep, _Period>& __rtime)
470
	: _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
471
	{ }
472
 
473
      ~unique_lock()
474
      {
475
	if (_M_owns)
476
	  unlock();
477
      }
478
 
479
      unique_lock(const unique_lock&) = delete;
480
      unique_lock& operator=(const unique_lock&) = delete;
481
 
482
      unique_lock(unique_lock&& __u) noexcept
483
      : _M_device(__u._M_device), _M_owns(__u._M_owns)
484
      {
485
	__u._M_device = 0;
486
	__u._M_owns = false;
487
      }
488
 
489
      unique_lock& operator=(unique_lock&& __u) noexcept
490
      {
491
	if(_M_owns)
492
	  unlock();
493
 
494
	unique_lock(std::move(__u)).swap(*this);
495
 
496
	__u._M_device = 0;
497
	__u._M_owns = false;
498
 
499
	return *this;
500
      }
501
 
502
      void
503
      lock()
504
      {
505
	if (!_M_device)
506
	  __throw_system_error(int(errc::operation_not_permitted));
507
	else if (_M_owns)
508
	  __throw_system_error(int(errc::resource_deadlock_would_occur));
509
	else
510
	  {
511
	    _M_device->lock();
512
	    _M_owns = true;
513
	  }
514
      }
515
 
516
      bool
517
      try_lock()
518
      {
519
	if (!_M_device)
520
	  __throw_system_error(int(errc::operation_not_permitted));
521
	else if (_M_owns)
522
	  __throw_system_error(int(errc::resource_deadlock_would_occur));
523
	else
524
	  {
525
	    _M_owns = _M_device->try_lock();
526
	    return _M_owns;
527
	  }
528
      }
529
 
530
      template
531
	bool
532
	try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
533
	{
534
	  if (!_M_device)
535
	    __throw_system_error(int(errc::operation_not_permitted));
536
	  else if (_M_owns)
537
	    __throw_system_error(int(errc::resource_deadlock_would_occur));
538
	  else
539
	    {
540
	      _M_owns = _M_device->try_lock_until(__atime);
541
	      return _M_owns;
542
	    }
543
	}
544
 
545
      template
546
	bool
547
	try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
548
	{
549
	  if (!_M_device)
550
	    __throw_system_error(int(errc::operation_not_permitted));
551
	  else if (_M_owns)
552
	    __throw_system_error(int(errc::resource_deadlock_would_occur));
553
	  else
554
	    {
555
	      _M_owns = _M_device->try_lock_for(__rtime);
556
	      return _M_owns;
557
	    }
558
	 }
559
 
560
      void
561
      unlock()
562
      {
563
	if (!_M_owns)
564
	  __throw_system_error(int(errc::operation_not_permitted));
565
	else if (_M_device)
566
	  {
567
	    _M_device->unlock();
568
	    _M_owns = false;
569
	  }
570
      }
571
 
572
      void
573
      swap(unique_lock& __u) noexcept
574
      {
575
	std::swap(_M_device, __u._M_device);
576
	std::swap(_M_owns, __u._M_owns);
577
      }
578
 
579
      mutex_type*
580
      release() noexcept
581
      {
582
	mutex_type* __ret = _M_device;
583
	_M_device = 0;
584
	_M_owns = false;
585
	return __ret;
586
      }
587
 
588
      bool
589
      owns_lock() const noexcept
590
      { return _M_owns; }
591
 
592
      explicit operator bool() const noexcept
593
      { return owns_lock(); }
594
 
595
      mutex_type*
596
      mutex() const noexcept
597
      { return _M_device; }
598
 
599
    private:
600
      mutex_type*	_M_device;
601
      bool		_M_owns; // XXX use atomic_bool
602
    };
603
 
604
  /// Partial specialization for unique_lock objects.
605
  template
606
    inline void
607
    swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y) noexcept
608
    { __x.swap(__y); }
609
 
610
  template
611
    struct __unlock_impl
612
    {
613
      template
614
	static void
615
	__do_unlock(tuple<_Lock&...>& __locks)
616
	{
617
	  std::get<_Idx>(__locks).unlock();
618
	  __unlock_impl<_Idx - 1>::__do_unlock(__locks);
619
	}
620
    };
621
 
622
  template<>
623
    struct __unlock_impl<-1>
624
    {
625
      template
626
	static void
627
	__do_unlock(tuple<_Lock&...>&)
628
	{ }
629
    };
630
 
631
  template
632
    unique_lock<_Lock>
633
    __try_to_lock(_Lock& __l)
634
    { return unique_lock<_Lock>(__l, try_to_lock); }
635
 
636
  template
637
    struct __try_lock_impl
638
    {
639
      template
640
	static void
641
	__do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
642
	{
643
          __idx = _Idx;
644
          auto __lock = __try_to_lock(std::get<_Idx>(__locks));
645
          if (__lock.owns_lock())
646
            {
647
              __try_lock_impl<_Idx + 1, _Idx + 2 < sizeof...(_Lock)>::
648
                __do_try_lock(__locks, __idx);
649
              if (__idx == -1)
650
                __lock.release();
651
            }
652
	}
653
    };
654
 
655
  template
656
    struct __try_lock_impl<_Idx, false>
657
    {
658
      template
659
	static void
660
	__do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
661
	{
662
          __idx = _Idx;
663
          auto __lock = __try_to_lock(std::get<_Idx>(__locks));
664
          if (__lock.owns_lock())
665
            {
666
              __idx = -1;
667
              __lock.release();
668
            }
669
	}
670
    };
671
 
672
  /** @brief Generic try_lock.
673
   *  @param __l1 Meets Mutex requirements (try_lock() may throw).
674
   *  @param __l2 Meets Mutex requirements (try_lock() may throw).
675
   *  @param __l3 Meets Mutex requirements (try_lock() may throw).
676
   *  @return Returns -1 if all try_lock() calls return true. Otherwise returns
677
   *          a 0-based index corresponding to the argument that returned false.
678
   *  @post Either all arguments are locked, or none will be.
679
   *
680
   *  Sequentially calls try_lock() on each argument.
681
   */
682
  template
683
    int
684
    try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
685
    {
686
      int __idx;
687
      auto __locks = std::tie(__l1, __l2, __l3...);
688
      __try
689
      { __try_lock_impl<0>::__do_try_lock(__locks, __idx); }
690
      __catch(...)
691
      { }
692
      return __idx;
693
    }
694
 
695
  /** @brief Generic lock.
696
   *  @param __l1 Meets Mutex requirements (try_lock() may throw).
697
   *  @param __l2 Meets Mutex requirements (try_lock() may throw).
698
   *  @param __l3 Meets Mutex requirements (try_lock() may throw).
699
   *  @throw An exception thrown by an argument's lock() or try_lock() member.
700
   *  @post All arguments are locked.
701
   *
702
   *  All arguments are locked via a sequence of calls to lock(), try_lock()
703
   *  and unlock().  If the call exits via an exception any locks that were
704
   *  obtained will be released.
705
   */
706
  template
707
    void
708
    lock(_L1& __l1, _L2& __l2, _L3&... __l3)
709
    {
710
      while (true)
711
        {
712
          unique_lock<_L1> __first(__l1);
713
          int __idx;
714
          auto __locks = std::tie(__l2, __l3...);
715
          __try_lock_impl<0, sizeof...(_L3)>::__do_try_lock(__locks, __idx);
716
          if (__idx == -1)
717
            {
718
              __first.release();
719
              return;
720
            }
721
        }
722
    }
723
 
724
#ifdef _GLIBCXX_HAS_GTHREADS
725
  /// once_flag
726
  struct once_flag
727
  {
728
  private:
729
    typedef __gthread_once_t __native_type;
730
    __native_type  _M_once = __GTHREAD_ONCE_INIT;
731
 
732
  public:
733
    /// Constructor
734
    constexpr once_flag() noexcept = default;
735
 
736
    /// Deleted copy constructor
737
    once_flag(const once_flag&) = delete;
738
    /// Deleted assignment operator
739
    once_flag& operator=(const once_flag&) = delete;
740
 
741
    template
742
      friend void
743
      call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
744
  };
745
 
746
#ifdef _GLIBCXX_HAVE_TLS
747
  extern __thread void* __once_callable;
748
  extern __thread void (*__once_call)();
749
 
750
  template
751
    inline void
752
    __once_call_impl()
753
    {
754
      (*(_Callable*)__once_callable)();
755
    }
756
#else
757
  extern function __once_functor;
758
 
759
  extern void
760
  __set_once_functor_lock_ptr(unique_lock*);
761
 
762
  extern mutex&
763
  __get_once_mutex();
764
#endif
765
 
766
  extern "C" void __once_proxy(void);
767
 
768
  /// call_once
769
  template
770
    void
771
    call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
772
    {
773
#ifdef _GLIBCXX_HAVE_TLS
774
      auto __bound_functor = std::__bind_simple(std::forward<_Callable>(__f),
775
          std::forward<_Args>(__args)...);
776
      __once_callable = &__bound_functor;
777
      __once_call = &__once_call_impl;
778
#else
779
      unique_lock __functor_lock(__get_once_mutex());
780
      auto __callable = std::__bind_simple(std::forward<_Callable>(__f),
781
          std::forward<_Args>(__args)...);
782
      __once_functor = [&]() { __callable(); };
783
      __set_once_functor_lock_ptr(&__functor_lock);
784
#endif
785
 
786
      int __e = __gthread_once(&(__once._M_once), &__once_proxy);
787
 
788
#ifndef _GLIBCXX_HAVE_TLS
789
      if (__functor_lock)
790
        __set_once_functor_lock_ptr(0);
791
#endif
792
 
793
      if (__e)
794
	__throw_system_error(__e);
795
    }
796
#endif // _GLIBCXX_HAS_GTHREADS
797
 
798
  // @} group mutexes
799
_GLIBCXX_END_NAMESPACE_VERSION
800
} // namespace
801
#endif // _GLIBCXX_USE_C99_STDINT_TR1
802
 
803
#endif // C++11
804
 
805
#endif // _GLIBCXX_MUTEX