Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. // Copyright (C) 2002-2015 Free Software Foundation, Inc.
  2. //  
  3. // This file is part of GCC.
  4. //
  5. // GCC is free software; you can redistribute it and/or modify
  6. // it under the terms of the GNU General Public License as published by
  7. // the Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9.  
  10. // GCC is distributed in the hope that it will be useful,
  11. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13. // GNU General Public License for more details.
  14.  
  15. // Under Section 7 of GPL version 3, you are granted additional
  16. // permissions described in the GCC Runtime Library Exception, version
  17. // 3.1, as published by the Free Software Foundation.
  18.  
  19. // You should have received a copy of the GNU General Public License and
  20. // a copy of the GCC Runtime Library Exception along with this program;
  21. // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
  22. // <http://www.gnu.org/licenses/>.
  23.  
  24. // Written by Mark Mitchell, CodeSourcery LLC, <mark@codesourcery.com>
  25. // Thread support written by Jason Merrill, Red Hat Inc. <jason@redhat.com>
  26.  
  27. #include <bits/c++config.h>
  28. #include <cxxabi.h>
  29. #include <exception>
  30. #include <new>
  31. #include <ext/atomicity.h>
  32. #include <ext/concurrence.h>
  33. #if defined(__GTHREADS) && defined(__GTHREAD_HAS_COND) \
  34.   && (ATOMIC_INT_LOCK_FREE > 1) && defined(_GLIBCXX_HAVE_LINUX_FUTEX)
  35. # include <climits>
  36. # include <syscall.h>
  37. # include <unistd.h>
  38. # define _GLIBCXX_USE_FUTEX
  39. # define _GLIBCXX_FUTEX_WAIT 0
  40. # define _GLIBCXX_FUTEX_WAKE 1
  41. #endif
  42.  
  43. // The IA64/generic ABI uses the first byte of the guard variable.
  44. // The ARM EABI uses the least significant bit.
  45.  
  46. // Thread-safe static local initialization support.
  47. #ifdef __GTHREADS
  48. # ifndef _GLIBCXX_USE_FUTEX
  49. namespace
  50. {
  51.   // A single mutex controlling all static initializations.
  52.   static __gnu_cxx::__recursive_mutex* static_mutex;  
  53.  
  54.   typedef char fake_recursive_mutex[sizeof(__gnu_cxx::__recursive_mutex)]
  55.   __attribute__ ((aligned(__alignof__(__gnu_cxx::__recursive_mutex))));
  56.   fake_recursive_mutex fake_mutex;
  57.  
  58.   static void init()
  59.   { static_mutex =  new (&fake_mutex) __gnu_cxx::__recursive_mutex(); }
  60.  
  61.   __gnu_cxx::__recursive_mutex&
  62.   get_static_mutex()
  63.   {
  64.     static __gthread_once_t once = __GTHREAD_ONCE_INIT;
  65.     __gthread_once(&once, init);
  66.     return *static_mutex;
  67.   }
  68.  
  69.   // Simple wrapper for exception safety.
  70.   struct mutex_wrapper
  71.   {
  72.     bool unlock;
  73.     mutex_wrapper() : unlock(true)
  74.     { get_static_mutex().lock(); }
  75.  
  76.     ~mutex_wrapper()
  77.     {
  78.       if (unlock)
  79.         static_mutex->unlock();
  80.     }
  81.   };
  82. }
  83. # endif
  84.  
  85. # if defined(__GTHREAD_HAS_COND) && !defined(_GLIBCXX_USE_FUTEX)
  86. namespace
  87. {
  88.   // A single condition variable controlling all static initializations.
  89.   static __gnu_cxx::__cond* static_cond;  
  90.  
  91.   // using a fake type to avoid initializing a static class.
  92.   typedef char fake_cond_t[sizeof(__gnu_cxx::__cond)]
  93.   __attribute__ ((aligned(__alignof__(__gnu_cxx::__cond))));
  94.   fake_cond_t fake_cond;
  95.  
  96.   static void init_static_cond()
  97.   { static_cond =  new (&fake_cond) __gnu_cxx::__cond(); }
  98.  
  99.   __gnu_cxx::__cond&
  100.   get_static_cond()
  101.   {
  102.     static __gthread_once_t once = __GTHREAD_ONCE_INIT;
  103.     __gthread_once(&once, init_static_cond);
  104.     return *static_cond;
  105.   }
  106. }
  107. # endif
  108.  
  109. # ifndef _GLIBCXX_GUARD_TEST_AND_ACQUIRE
  110. inline bool
  111. __test_and_acquire (__cxxabiv1::__guard *g)
  112. {
  113.   bool b = _GLIBCXX_GUARD_TEST (g);
  114.   _GLIBCXX_READ_MEM_BARRIER;
  115.   return b;
  116. }
  117. #  define _GLIBCXX_GUARD_TEST_AND_ACQUIRE(G) __test_and_acquire (G)
  118. # endif
  119.  
  120. # ifndef _GLIBCXX_GUARD_SET_AND_RELEASE
  121. inline void
  122. __set_and_release (__cxxabiv1::__guard *g)
  123. {
  124.   _GLIBCXX_WRITE_MEM_BARRIER;
  125.   _GLIBCXX_GUARD_SET (g);
  126. }
  127. #  define _GLIBCXX_GUARD_SET_AND_RELEASE(G) __set_and_release (G)
  128. # endif
  129.  
  130. #else /* !__GTHREADS */
  131.  
  132. # undef _GLIBCXX_GUARD_TEST_AND_ACQUIRE
  133. # undef _GLIBCXX_GUARD_SET_AND_RELEASE
  134. # define _GLIBCXX_GUARD_SET_AND_RELEASE(G) _GLIBCXX_GUARD_SET (G)
  135.  
  136. #endif /* __GTHREADS */
  137.  
  138. //
  139. // Here are C++ run-time routines for guarded initialization of static
  140. // variables. There are 4 scenarios under which these routines are called:
  141. //
  142. //   1. Threads not supported (__GTHREADS not defined)
  143. //   2. Threads are supported but not enabled at run-time.
  144. //   3. Threads enabled at run-time but __gthreads_* are not fully POSIX.
  145. //   4. Threads enabled at run-time and __gthreads_* support all POSIX threads
  146. //      primitives we need here.
  147. //
  148. // The old code supported scenarios 1-3 but was broken since it used a global
  149. // mutex for all threads and had the mutex locked during the whole duration of
  150. // initialization of a guarded static variable. The following created a
  151. // dead-lock with the old code.
  152. //
  153. //      Thread 1 acquires the global mutex.
  154. //      Thread 1 starts initializing static variable.
  155. //      Thread 1 creates thread 2 during initialization.
  156. //      Thread 2 attempts to acquire mutex to initialize another variable.
  157. //      Thread 2 blocks since thread 1 is locking the mutex.
  158. //      Thread 1 waits for result from thread 2 and also blocks. A deadlock.
  159. //
  160. // The new code here can handle this situation and thus is more robust. However,
  161. // we need to use the POSIX thread condition variable, which is not supported
  162. // in all platforms, notably older versions of Microsoft Windows. The gthr*.h
  163. // headers define a symbol __GTHREAD_HAS_COND for platforms that support POSIX
  164. // like condition variables. For platforms that do not support condition
  165. // variables, we need to fall back to the old code.
  166.  
  167. // If _GLIBCXX_USE_FUTEX, no global mutex or condition variable is used,
  168. // only atomic operations are used together with futex syscall.
  169. // Valid values of the first integer in guard are:
  170. // 0                              No thread encountered the guarded init
  171. //                                yet or it has been aborted.
  172. // _GLIBCXX_GUARD_BIT             The guarded static var has been successfully
  173. //                                initialized.
  174. // _GLIBCXX_GUARD_PENDING_BIT     The guarded static var is being initialized
  175. //                                and no other thread is waiting for its
  176. //                                initialization.
  177. // (_GLIBCXX_GUARD_PENDING_BIT    The guarded static var is being initialized
  178. //  | _GLIBCXX_GUARD_WAITING_BIT) and some other threads are waiting until
  179. //                                it is initialized.
  180.  
  181. namespace __cxxabiv1
  182. {
  183. #ifdef _GLIBCXX_USE_FUTEX
  184.   namespace
  185.   {
  186.     static inline int __guard_test_bit (const int __byte, const int __val)
  187.     {
  188.       union { int __i; char __c[sizeof (int)]; } __u = { 0 };
  189.       __u.__c[__byte] = __val;
  190.       return __u.__i;
  191.     }
  192.   }
  193. #endif
  194.  
  195.   static inline int
  196.   init_in_progress_flag(__guard* g)
  197.   { return ((char *)g)[1]; }
  198.  
  199.   static inline void
  200.   set_init_in_progress_flag(__guard* g, int v)
  201.   { ((char *)g)[1] = v; }
  202.  
  203.   static inline void
  204.   throw_recursive_init_exception()
  205.   {
  206. #if __cpp_exceptions
  207.         throw __gnu_cxx::recursive_init_error();
  208. #else
  209.         // Use __builtin_trap so we don't require abort().
  210.         __builtin_trap();
  211. #endif
  212.   }
  213.  
  214.   // acquire() is a helper function used to acquire guard if thread support is
  215.   // not compiled in or is compiled in but not enabled at run-time.
  216.   static int
  217.   acquire(__guard *g)
  218.   {
  219.     // Quit if the object is already initialized.
  220.     if (_GLIBCXX_GUARD_TEST(g))
  221.       return 0;
  222.  
  223.     if (init_in_progress_flag(g))
  224.       throw_recursive_init_exception();
  225.  
  226.     set_init_in_progress_flag(g, 1);
  227.     return 1;
  228.   }
  229.  
  230.   extern "C"
  231.   int __cxa_guard_acquire (__guard *g)
  232.   {
  233. #ifdef __GTHREADS
  234.     // If the target can reorder loads, we need to insert a read memory
  235.     // barrier so that accesses to the guarded variable happen after the
  236.     // guard test.
  237.     if (_GLIBCXX_GUARD_TEST_AND_ACQUIRE (g))
  238.       return 0;
  239.  
  240. # ifdef _GLIBCXX_USE_FUTEX
  241.     // If __atomic_* and futex syscall are supported, don't use any global
  242.     // mutex.
  243.     if (__gthread_active_p ())
  244.       {
  245.         int *gi = (int *) (void *) g;
  246.         const int guard_bit = _GLIBCXX_GUARD_BIT;
  247.         const int pending_bit = _GLIBCXX_GUARD_PENDING_BIT;
  248.         const int waiting_bit = _GLIBCXX_GUARD_WAITING_BIT;
  249.  
  250.         while (1)
  251.           {
  252.             int expected(0);
  253.             if (__atomic_compare_exchange_n(gi, &expected, pending_bit, false,
  254.                                             __ATOMIC_ACQ_REL,
  255.                                             __ATOMIC_ACQUIRE))
  256.               {
  257.                 // This thread should do the initialization.
  258.                 return 1;
  259.               }
  260.              
  261.             if (expected == guard_bit)
  262.               {
  263.                 // Already initialized.
  264.                 return 0;      
  265.               }
  266.  
  267.              if (expected == pending_bit)
  268.                {
  269.                  // Use acquire here.
  270.                  int newv = expected | waiting_bit;
  271.                  if (!__atomic_compare_exchange_n(gi, &expected, newv, false,
  272.                                                   __ATOMIC_ACQ_REL,
  273.                                                   __ATOMIC_ACQUIRE))
  274.                    {
  275.                      if (expected == guard_bit)
  276.                        {
  277.                          // Make a thread that failed to set the
  278.                          // waiting bit exit the function earlier,
  279.                          // if it detects that another thread has
  280.                          // successfully finished initialising.
  281.                          return 0;
  282.                        }
  283.                      if (expected == 0)
  284.                        continue;
  285.                    }
  286.                  
  287.                  expected = newv;
  288.                }
  289.  
  290.             syscall (SYS_futex, gi, _GLIBCXX_FUTEX_WAIT, expected, 0);
  291.           }
  292.       }
  293. # else
  294.     if (__gthread_active_p ())
  295.       {
  296.         mutex_wrapper mw;
  297.  
  298.         while (1)       // When this loop is executing, mutex is locked.
  299.           {
  300. #  ifdef __GTHREAD_HAS_COND
  301.             // The static is already initialized.
  302.             if (_GLIBCXX_GUARD_TEST(g))
  303.               return 0; // The mutex will be unlocked via wrapper
  304.  
  305.             if (init_in_progress_flag(g))
  306.               {
  307.                 // The guarded static is currently being initialized by
  308.                 // another thread, so we release mutex and wait for the
  309.                 // condition variable. We will lock the mutex again after
  310.                 // this.
  311.                 get_static_cond().wait_recursive(&get_static_mutex());
  312.               }
  313.             else
  314.               {
  315.                 set_init_in_progress_flag(g, 1);
  316.                 return 1; // The mutex will be unlocked via wrapper.
  317.               }
  318. #  else
  319.             // This provides compatibility with older systems not supporting
  320.             // POSIX like condition variables.
  321.             if (acquire(g))
  322.               {
  323.                 mw.unlock = false;
  324.                 return 1; // The mutex still locked.
  325.               }
  326.             return 0; // The mutex will be unlocked via wrapper.
  327. #  endif
  328.           }
  329.       }
  330. # endif
  331. #endif
  332.  
  333.     return acquire (g);
  334.   }
  335.  
  336.   extern "C"
  337.   void __cxa_guard_abort (__guard *g) throw ()
  338.   {
  339. #ifdef _GLIBCXX_USE_FUTEX
  340.     // If __atomic_* and futex syscall are supported, don't use any global
  341.     // mutex.
  342.     if (__gthread_active_p ())
  343.       {
  344.         int *gi = (int *) (void *) g;
  345.         const int waiting_bit = _GLIBCXX_GUARD_WAITING_BIT;
  346.         int old = __atomic_exchange_n (gi, 0, __ATOMIC_ACQ_REL);
  347.  
  348.         if ((old & waiting_bit) != 0)
  349.           syscall (SYS_futex, gi, _GLIBCXX_FUTEX_WAKE, INT_MAX);
  350.         return;
  351.       }
  352. #elif defined(__GTHREAD_HAS_COND)
  353.     if (__gthread_active_p())
  354.       {
  355.         mutex_wrapper mw;
  356.  
  357.         set_init_in_progress_flag(g, 0);
  358.  
  359.         // If we abort, we still need to wake up all other threads waiting for
  360.         // the condition variable.
  361.         get_static_cond().broadcast();
  362.         return;
  363.       }
  364. #endif
  365.  
  366.     set_init_in_progress_flag(g, 0);
  367. #if defined(__GTHREADS) && !defined(__GTHREAD_HAS_COND)
  368.     // This provides compatibility with older systems not supporting POSIX like
  369.     // condition variables.
  370.     if (__gthread_active_p ())
  371.       static_mutex->unlock();
  372. #endif
  373.   }
  374.  
  375.   extern "C"
  376.   void __cxa_guard_release (__guard *g) throw ()
  377.   {
  378. #ifdef _GLIBCXX_USE_FUTEX
  379.     // If __atomic_* and futex syscall are supported, don't use any global
  380.     // mutex.
  381.     if (__gthread_active_p ())
  382.       {
  383.         int *gi = (int *) (void *) g;
  384.         const int guard_bit = _GLIBCXX_GUARD_BIT;
  385.         const int waiting_bit = _GLIBCXX_GUARD_WAITING_BIT;
  386.         int old = __atomic_exchange_n (gi, guard_bit, __ATOMIC_ACQ_REL);
  387.  
  388.         if ((old & waiting_bit) != 0)
  389.           syscall (SYS_futex, gi, _GLIBCXX_FUTEX_WAKE, INT_MAX);
  390.         return;
  391.       }
  392. #elif defined(__GTHREAD_HAS_COND)
  393.     if (__gthread_active_p())
  394.       {
  395.         mutex_wrapper mw;
  396.  
  397.         set_init_in_progress_flag(g, 0);
  398.         _GLIBCXX_GUARD_SET_AND_RELEASE(g);
  399.  
  400.         get_static_cond().broadcast();
  401.         return;
  402.       }
  403. #endif
  404.  
  405.     set_init_in_progress_flag(g, 0);
  406.     _GLIBCXX_GUARD_SET_AND_RELEASE (g);
  407.  
  408. #if defined(__GTHREADS) && !defined(__GTHREAD_HAS_COND)
  409.     // This provides compatibility with older systems not supporting POSIX like
  410.     // condition variables.
  411.     if (__gthread_active_p())
  412.       static_mutex->unlock();
  413. #endif
  414.   }
  415. }
  416.