Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (c) 1997-1999
  3.  * Silicon Graphics Computer Systems, Inc.
  4.  *
  5.  * Permission to use, copy, modify, distribute and sell this software
  6.  * and its documentation for any purpose is hereby granted without fee,
  7.  * provided that the above copyright notice appear in all copies and
  8.  * that both that copyright notice and this permission notice appear
  9.  * in supporting documentation.  Silicon Graphics makes no
  10.  * representations about the suitability of this software for any
  11.  * purpose.  It is provided "as is" without express or implied warranty.
  12.  */
  13.  
  14. // WARNING: This is an internal header file, included by other C++
  15. // standard library headers.  You should not attempt to use this header
  16. // file directly.
  17. // Stl_config.h should be included before this file.
  18.  
  19. #ifndef __SGI_STL_INTERNAL_THREADS_H
  20. #define __SGI_STL_INTERNAL_THREADS_H
  21.  
  22. // Supported threading models are native SGI, pthreads, uithreads
  23. // (similar to pthreads, but based on an earlier draft of the Posix
  24. // threads standard), and Win32 threads.  Uithread support by Jochen
  25. // Schlick, 1999.
  26.  
  27. // GCC extension begin
  28. // In order to present a stable threading configuration, in all cases,
  29. // gcc looks for it's own abstraction layer before all others.  All
  30. // modifications to this file are marked to allow easier importation of
  31. // STL upgrades.
  32. #if defined(__STL_GTHREADS)
  33. #include "bits/gthr.h"
  34. #else
  35. // GCC extension end
  36. #if defined(__STL_SGI_THREADS)
  37. #include <mutex.h>
  38. #include <time.h>
  39. #elif defined(__STL_PTHREADS)
  40. #include <pthread.h>
  41. #elif defined(__STL_UITHREADS)
  42. #include <thread.h>
  43. #include <synch.h>
  44. #elif defined(__STL_WIN32THREADS)
  45. #include <windows.h>
  46. #endif
  47. // GCC extension begin
  48. #endif
  49. // GCC extension end
  50.  
  51. namespace std
  52. {
  53.  
  54. // Class _Refcount_Base provides a type, _RC_t, a data member,
  55. // _M_ref_count, and member functions _M_incr and _M_decr, which perform
  56. // atomic preincrement/predecrement.  The constructor initializes
  57. // _M_ref_count.
  58.  
  59. // Hack for SGI o32 compilers.
  60. #if defined(__STL_SGI_THREADS) && !defined(__add_and_fetch) && \
  61.     (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
  62. #  define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)  
  63. #  define __test_and_set(__l,__v)  test_and_set(__l,__v)
  64. #endif /* o32 */
  65.  
  66. struct _Refcount_Base
  67. {
  68.   // The type _RC_t
  69. # ifdef __STL_WIN32THREADS
  70.   typedef long _RC_t;
  71. # else
  72.   typedef size_t _RC_t;
  73. #endif
  74.  
  75.   // The data member _M_ref_count
  76.    volatile _RC_t _M_ref_count;
  77.  
  78.   // Constructor
  79. // GCC extension begin
  80. #ifdef __STL_GTHREADS
  81.   __gthread_mutex_t _M_ref_count_lock;
  82.   _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
  83.     {
  84. #ifdef __GTHREAD_MUTEX_INIT
  85.       __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
  86.       _M_ref_count_lock = __tmp;
  87. #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
  88.       __GTHREAD_MUTEX_INIT_FUNCTION (&_M_ref_count_lock);
  89. #else
  90. #error __GTHREAD_MUTEX_INIT or __GTHREAD_MUTEX_INIT_FUNCTION should be defined by gthr.h abstraction layer, report problem to libstdc++@gcc.gnu.org.
  91. #endif
  92.     }
  93. #else
  94. // GCC extension end
  95. # ifdef __STL_PTHREADS
  96.   pthread_mutex_t _M_ref_count_lock;
  97.   _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
  98.     { pthread_mutex_init(&_M_ref_count_lock, 0); }
  99. # elif defined(__STL_UITHREADS)
  100.   mutex_t         _M_ref_count_lock;
  101.   _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
  102.     { mutex_init(&_M_ref_count_lock, USYNC_THREAD, 0); }
  103. # else
  104.   _Refcount_Base(_RC_t __n) : _M_ref_count(__n) {}
  105. # endif
  106. // GCC extension begin
  107. #endif
  108. // GCC extension end
  109.  
  110. // GCC extension begin
  111. #ifdef __STL_GTHREADS
  112.   void _M_incr() {
  113.     __gthread_mutex_lock(&_M_ref_count_lock);
  114.     ++_M_ref_count;
  115.     __gthread_mutex_unlock(&_M_ref_count_lock);
  116.   }
  117.   _RC_t _M_decr() {
  118.     __gthread_mutex_lock(&_M_ref_count_lock);
  119.     volatile _RC_t __tmp = --_M_ref_count;
  120.     __gthread_mutex_unlock(&_M_ref_count_lock);
  121.     return __tmp;
  122.   }
  123. #else
  124. // GCC extension end
  125.   // _M_incr and _M_decr
  126. # ifdef __STL_SGI_THREADS
  127.   void _M_incr() {  __add_and_fetch(&_M_ref_count, 1); }
  128.   _RC_t _M_decr() { return __add_and_fetch(&_M_ref_count, (size_t) -1); }
  129. # elif defined (__STL_WIN32THREADS)
  130.    void _M_incr() { InterlockedIncrement((_RC_t*)&_M_ref_count); }
  131.   _RC_t _M_decr() { return InterlockedDecrement((_RC_t*)&_M_ref_count); }
  132. # elif defined(__STL_PTHREADS)
  133.   void _M_incr() {
  134.     pthread_mutex_lock(&_M_ref_count_lock);
  135.     ++_M_ref_count;
  136.     pthread_mutex_unlock(&_M_ref_count_lock);
  137.   }
  138.   _RC_t _M_decr() {
  139.     pthread_mutex_lock(&_M_ref_count_lock);
  140.     volatile _RC_t __tmp = --_M_ref_count;
  141.     pthread_mutex_unlock(&_M_ref_count_lock);
  142.     return __tmp;
  143.   }
  144. # elif defined(__STL_UITHREADS)
  145.   void _M_incr() {
  146.     mutex_lock(&_M_ref_count_lock);
  147.     ++_M_ref_count;
  148.     mutex_unlock(&_M_ref_count_lock);
  149.   }
  150.   _RC_t _M_decr() {
  151.     mutex_lock(&_M_ref_count_lock);
  152.     /*volatile*/ _RC_t __tmp = --_M_ref_count;
  153.     mutex_unlock(&_M_ref_count_lock);
  154.     return __tmp;
  155.   }
  156. # else  /* No threads */
  157.   void _M_incr() { ++_M_ref_count; }
  158.   _RC_t _M_decr() { return --_M_ref_count; }
  159. # endif
  160. // GCC extension begin
  161. #endif
  162. // GCC extension end
  163. };
  164.  
  165. // Atomic swap on unsigned long
  166. // This is guaranteed to behave as though it were atomic only if all
  167. // possibly concurrent updates use _Atomic_swap.
  168. // In some cases the operation is emulated with a lock.
  169. // GCC extension begin
  170. #ifdef __STL_GTHREADS
  171. // We don't provide an _Atomic_swap in this configuration.  This only
  172. // affects the use of ext/rope with threads.  Someone could add this
  173. // later, if required.  You can start by cloning the __STL_PTHREADS
  174. // path while making the obvious changes.  Later it could be optimized
  175. // to use the atomicity.h abstraction layer from libstdc++-v3.
  176. #else
  177. // GCC extension end
  178. # ifdef __STL_SGI_THREADS
  179.     inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
  180. #       if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
  181.             return test_and_set(__p, __q);
  182. #       else
  183.             return __test_and_set(__p, (unsigned long)__q);
  184. #       endif
  185.     }
  186. # elif defined(__STL_WIN32THREADS)
  187.     inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
  188.         return (unsigned long) InterlockedExchange((LPLONG)__p, (LONG)__q);
  189.     }
  190. # elif defined(__STL_PTHREADS)
  191.     // We use a template here only to get a unique initialized instance.
  192.     template<int __dummy>
  193.     struct _Swap_lock_struct {
  194.         static pthread_mutex_t _S_swap_lock;
  195.     };
  196.  
  197.     template<int __dummy>
  198.     pthread_mutex_t
  199.     _Swap_lock_struct<__dummy>::_S_swap_lock = PTHREAD_MUTEX_INITIALIZER;
  200.  
  201.     // This should be portable, but performance is expected
  202.     // to be quite awful.  This really needs platform specific
  203.     // code.
  204.     inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
  205.         pthread_mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
  206.         unsigned long __result = *__p;
  207.         *__p = __q;
  208.         pthread_mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
  209.         return __result;
  210.     }
  211. # elif defined(__STL_UITHREADS)
  212.     // We use a template here only to get a unique initialized instance.
  213.     template<int __dummy>
  214.     struct _Swap_lock_struct {
  215.         static mutex_t _S_swap_lock;
  216.     };
  217.  
  218.     template<int __dummy>
  219.     mutex_t
  220.     _Swap_lock_struct<__dummy>::_S_swap_lock = DEFAULTMUTEX;
  221.  
  222.     // This should be portable, but performance is expected
  223.     // to be quite awful.  This really needs platform specific
  224.     // code.
  225.     inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
  226.         mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
  227.         unsigned long __result = *__p;
  228.         *__p = __q;
  229.         mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
  230.         return __result;
  231.     }
  232. # elif defined (__STL_SOLARIS_THREADS)
  233.     // any better solutions ?
  234.     // We use a template here only to get a unique initialized instance.
  235.     template<int __dummy>
  236.     struct _Swap_lock_struct {
  237.         static mutex_t _S_swap_lock;
  238.     };
  239.  
  240. # if ( __STL_STATIC_TEMPLATE_DATA > 0 )
  241.     template<int __dummy>
  242.     mutex_t
  243.     _Swap_lock_struct<__dummy>::_S_swap_lock = DEFAULTMUTEX;
  244. #  else
  245.     __DECLARE_INSTANCE(mutex_t, _Swap_lock_struct<__dummy>::_S_swap_lock,
  246.                        =DEFAULTMUTEX);
  247. # endif /* ( __STL_STATIC_TEMPLATE_DATA > 0 ) */
  248.  
  249.     // This should be portable, but performance is expected
  250.     // to be quite awful.  This really needs platform specific
  251.     // code.
  252.     inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
  253.         mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
  254.         unsigned long __result = *__p;
  255.         *__p = __q;
  256.         mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
  257.         return __result;
  258.     }
  259. # else
  260.     static inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
  261.         unsigned long __result = *__p;
  262.         *__p = __q;
  263.         return __result;
  264.     }
  265. # endif
  266. // GCC extension begin
  267. #endif
  268. // GCC extension end
  269.  
  270. // Locking class.  Note that this class *does not have a constructor*.
  271. // It must be initialized either statically, with __STL_MUTEX_INITIALIZER,
  272. // or dynamically, by explicitly calling the _M_initialize member function.
  273. // (This is similar to the ways that a pthreads mutex can be initialized.)
  274. // There are explicit member functions for acquiring and releasing the lock.
  275.  
  276. // There is no constructor because static initialization is essential for
  277. // some uses, and only a class aggregate (see section 8.5.1 of the C++
  278. // standard) can be initialized that way.  That means we must have no
  279. // constructors, no base classes, no virtual functions, and no private or
  280. // protected members.
  281.  
  282. // Helper struct.  This is a workaround for various compilers that don't
  283. // handle static variables in inline functions properly.
  284. template <int __inst>
  285. struct _STL_mutex_spin {
  286.   enum { __low_max = 30, __high_max = 1000 };
  287.   // Low if we suspect uniprocessor, high for multiprocessor.
  288.  
  289.   static unsigned __max;
  290.   static unsigned __last;
  291. };
  292.  
  293. template <int __inst>
  294. unsigned _STL_mutex_spin<__inst>::__max = _STL_mutex_spin<__inst>::__low_max;
  295.  
  296. template <int __inst>
  297. unsigned _STL_mutex_spin<__inst>::__last = 0;
  298.  
  299. // GCC extension begin
  300. #if defined(__STL_GTHREADS)
  301. #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
  302. extern __gthread_mutex_t _GLIBCPP_mutex;
  303. extern __gthread_mutex_t *_GLIBCPP_mutex_address;
  304. extern __gthread_once_t _GLIBCPP_once;
  305. extern void _GLIBCPP_mutex_init (void);
  306. extern void _GLIBCPP_mutex_address_init (void);
  307. #endif
  308. #endif
  309. // GCC extension end
  310.  
  311. struct _STL_mutex_lock
  312. {
  313. // GCC extension begin
  314. #if defined(__STL_GTHREADS)
  315.   // The class must be statically initialized with __STL_MUTEX_INITIALIZER.
  316. #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
  317.   volatile int _M_init_flag;
  318.   __gthread_once_t _M_once;
  319. #endif
  320.   __gthread_mutex_t _M_lock;
  321.   void _M_initialize() {
  322. #ifdef __GTHREAD_MUTEX_INIT
  323.     // There should be no code in this path given the usage rules above.
  324. #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
  325.     if (_M_init_flag) return;
  326.     if (__gthread_once (&_GLIBCPP_once, _GLIBCPP_mutex_init) != 0
  327.         && __gthread_active_p ())
  328.       abort ();
  329.     __gthread_mutex_lock (&_GLIBCPP_mutex);
  330.     if (!_M_init_flag) {
  331.         // Even though we have a global lock, we use __gthread_once to be
  332.         // absolutely certain the _M_lock mutex is only initialized once on
  333.         // multiprocessor systems.
  334.         _GLIBCPP_mutex_address = &_M_lock;
  335.         if (__gthread_once (&_M_once, _GLIBCPP_mutex_address_init) != 0
  336.             && __gthread_active_p ())
  337.           abort ();
  338.         _M_init_flag = 1;
  339.     }
  340.     __gthread_mutex_unlock (&_GLIBCPP_mutex);
  341. #endif
  342.   }
  343.   void _M_acquire_lock() {
  344. #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
  345.     if (!_M_init_flag) _M_initialize();
  346. #endif
  347.     __gthread_mutex_lock(&_M_lock);
  348.   }
  349.   void _M_release_lock() {
  350. #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
  351.     if (!_M_init_flag) _M_initialize();
  352. #endif
  353.     __gthread_mutex_unlock(&_M_lock);
  354.   }
  355. #else
  356. // GCC extension end
  357. #if defined(__STL_SGI_THREADS) || defined(__STL_WIN32THREADS)
  358.   // It should be relatively easy to get this to work on any modern Unix.
  359.   volatile unsigned long _M_lock;
  360.   void _M_initialize() { _M_lock = 0; }
  361.   static void _S_nsec_sleep(int __log_nsec) {
  362. #     ifdef __STL_SGI_THREADS
  363.           struct timespec __ts;
  364.           /* Max sleep is 2**27nsec ~ 60msec      */
  365.           __ts.tv_sec = 0;
  366.           __ts.tv_nsec = 1L << __log_nsec;
  367.           nanosleep(&__ts, 0);
  368. #     elif defined(__STL_WIN32THREADS)
  369.           if (__log_nsec <= 20) {
  370.               Sleep(0);
  371.           } else {
  372.               Sleep(1 << (__log_nsec - 20));
  373.           }
  374. #     else
  375. #       error unimplemented
  376. #     endif
  377.   }
  378.   void _M_acquire_lock() {
  379.     volatile unsigned long* __lock = &this->_M_lock;
  380.  
  381.     if (!_Atomic_swap((unsigned long*)__lock, 1)) {
  382.       return;
  383.     }
  384.     unsigned __my_spin_max = _STL_mutex_spin<0>::__max;
  385.     unsigned __my_last_spins = _STL_mutex_spin<0>::__last;
  386.     volatile unsigned __junk = 17;      // Value doesn't matter.
  387.     unsigned __i;
  388.     for (__i = 0; __i < __my_spin_max; __i++) {
  389.       if (__i < __my_last_spins/2 || *__lock) {
  390.         __junk *= __junk; __junk *= __junk;
  391.         __junk *= __junk; __junk *= __junk;
  392.         continue;
  393.       }
  394.       if (!_Atomic_swap((unsigned long*)__lock, 1)) {
  395.         // got it!
  396.         // Spinning worked.  Thus we're probably not being scheduled
  397.         // against the other process with which we were contending.
  398.         // Thus it makes sense to spin longer the next time.
  399.         _STL_mutex_spin<0>::__last = __i;
  400.         _STL_mutex_spin<0>::__max = _STL_mutex_spin<0>::__high_max;
  401.         return;
  402.       }
  403.     }
  404.     // We are probably being scheduled against the other process.  Sleep.
  405.     _STL_mutex_spin<0>::__max = _STL_mutex_spin<0>::__low_max;
  406.     for (__i = 0 ;; ++__i) {
  407.       int __log_nsec = __i + 6;
  408.  
  409.       if (__log_nsec > 27) __log_nsec = 27;
  410.       if (!_Atomic_swap((unsigned long *)__lock, 1)) {
  411.         return;
  412.       }
  413.       _S_nsec_sleep(__log_nsec);
  414.     }
  415.   }
  416.   void _M_release_lock() {
  417.     volatile unsigned long* __lock = &_M_lock;
  418. #   if defined(__STL_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
  419.         asm("sync");
  420.         *__lock = 0;
  421. #   elif defined(__STL_SGI_THREADS) && __mips >= 3 \
  422.          && (defined (_ABIN32) || defined(_ABI64))
  423.         __lock_release(__lock);
  424. #   else
  425.         *__lock = 0;
  426.         // This is not sufficient on many multiprocessors, since
  427.         // writes to protected variables and the lock may be reordered.
  428. #   endif
  429.   }
  430.  
  431. // We no longer use win32 critical sections.
  432. // They appear to be slower in the contention-free case,
  433. // and they appear difficult to initialize without introducing a race.
  434.  
  435. #elif defined(__STL_PTHREADS)
  436.   pthread_mutex_t _M_lock;
  437.   void _M_initialize()   { pthread_mutex_init(&_M_lock, NULL); }
  438.   void _M_acquire_lock() { pthread_mutex_lock(&_M_lock); }
  439.   void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
  440. #elif defined(__STL_UITHREADS)
  441.   mutex_t _M_lock;
  442.   void _M_initialize()   { mutex_init(&_M_lock, USYNC_THREAD, 0); }
  443.   void _M_acquire_lock() { mutex_lock(&_M_lock); }
  444.   void _M_release_lock() { mutex_unlock(&_M_lock); }
  445. #else /* No threads */
  446.   void _M_initialize()   {}
  447.   void _M_acquire_lock() {}
  448.   void _M_release_lock() {}
  449. #endif
  450. // GCC extension begin
  451. #endif
  452. // GCC extension end
  453. };
  454.  
  455. // GCC extension begin
  456. #if defined(__STL_GTHREADS)
  457. #ifdef __GTHREAD_MUTEX_INIT
  458. #define __STL_MUTEX_INITIALIZER = { __GTHREAD_MUTEX_INIT }
  459. #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
  460. #ifdef __GTHREAD_MUTEX_INIT_DEFAULT
  461. #define __STL_MUTEX_INITIALIZER \
  462.   = { 0, __GTHREAD_ONCE_INIT, __GTHREAD_MUTEX_INIT_DEFAULT }
  463. #else
  464. #define __STL_MUTEX_INITIALIZER = { 0, __GTHREAD_ONCE_INIT }
  465. #endif
  466. #endif
  467. #else
  468. // GCC extension end
  469. #ifdef __STL_PTHREADS
  470. // Pthreads locks must be statically initialized to something other than
  471. // the default value of zero.
  472. #   define __STL_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
  473. #elif defined(__STL_UITHREADS)
  474. // UIthreads locks must be statically initialized to something other than
  475. // the default value of zero.
  476. #   define __STL_MUTEX_INITIALIZER = { DEFAULTMUTEX }
  477. #elif defined(__STL_SGI_THREADS) || defined(__STL_WIN32THREADS)
  478. #   define __STL_MUTEX_INITIALIZER = { 0 }
  479. #else
  480. #   define __STL_MUTEX_INITIALIZER
  481. #endif
  482. // GCC extension begin
  483. #endif
  484. // GCC extension end
  485.  
  486.  
  487. // A locking class that uses _STL_mutex_lock.  The constructor takes a
  488. // reference to an _STL_mutex_lock, and acquires a lock.  The
  489. // destructor releases the lock.  It's not clear that this is exactly
  490. // the right functionality.  It will probably change in the future.
  491.  
  492. struct _STL_auto_lock
  493. {
  494.   _STL_mutex_lock& _M_lock;
  495.  
  496.   _STL_auto_lock(_STL_mutex_lock& __lock) : _M_lock(__lock)
  497.     { _M_lock._M_acquire_lock(); }
  498.   ~_STL_auto_lock() { _M_lock._M_release_lock(); }
  499.  
  500. private:
  501.   void operator=(const _STL_auto_lock&);
  502.   _STL_auto_lock(const _STL_auto_lock&);
  503. };
  504.  
  505. } // namespace std
  506.  
  507. #endif /* __SGI_STL_INTERNAL_THREADS_H */
  508.  
  509. // Local Variables:
  510. // mode:C++
  511. // End:
  512.  
  513.