Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /**
  2.  * Many similar implementations exist. See for example libwsbm
  3.  * or the linux kernel include/atomic.h
  4.  *
  5.  * No copyright claimed on this file.
  6.  *
  7.  */
  8.  
  9. #include "no_extern_c.h"
  10.  
  11. #ifndef U_ATOMIC_H
  12. #define U_ATOMIC_H
  13.  
  14. #include <stdbool.h>
  15.  
  16. /* Favor OS-provided implementations.
  17.  *
  18.  * Where no OS-provided implementation is available, fall back to
  19.  * locally coded assembly, compiler intrinsic or ultimately a
  20.  * mutex-based implementation.
  21.  */
  22. #if defined(__sun)
  23. #define PIPE_ATOMIC_OS_SOLARIS
  24. #elif defined(_MSC_VER)
  25. #define PIPE_ATOMIC_MSVC_INTRINSIC
  26. #elif defined(__GNUC__)
  27. #define PIPE_ATOMIC_GCC_INTRINSIC
  28. #else
  29. #error "Unsupported platform"
  30. #endif
  31.  
  32.  
  33. /* Implementation using GCC-provided synchronization intrinsics
  34.  */
  35. #if defined(PIPE_ATOMIC_GCC_INTRINSIC)
  36.  
  37. #define PIPE_ATOMIC "GCC Sync Intrinsics"
  38.  
  39. #define p_atomic_set(_v, _i) (*(_v) = (_i))
  40. #define p_atomic_read(_v) (*(_v))
  41. #define p_atomic_dec_zero(v) (__sync_sub_and_fetch((v), 1) == 0)
  42. #define p_atomic_inc(v) (void) __sync_add_and_fetch((v), 1)
  43. #define p_atomic_dec(v) (void) __sync_sub_and_fetch((v), 1)
  44. #define p_atomic_add(v, i) (void) __sync_add_and_fetch((v), (i))
  45. #define p_atomic_inc_return(v) __sync_add_and_fetch((v), 1)
  46. #define p_atomic_dec_return(v) __sync_sub_and_fetch((v), 1)
  47. #define p_atomic_cmpxchg(v, old, _new) \
  48.    __sync_val_compare_and_swap((v), (old), (_new))
  49.  
  50. #endif
  51.  
  52.  
  53.  
  54. /* Unlocked version for single threaded environments, such as some
  55.  * windows kernel modules.
  56.  */
  57. #if defined(PIPE_ATOMIC_OS_UNLOCKED)
  58.  
  59. #define PIPE_ATOMIC "Unlocked"
  60.  
  61. #define p_atomic_set(_v, _i) (*(_v) = (_i))
  62. #define p_atomic_read(_v) (*(_v))
  63. #define p_atomic_dec_zero(_v) (p_atomic_dec_return(_v) == 0)
  64. #define p_atomic_inc(_v) ((void) p_atomic_inc_return(_v))
  65. #define p_atomic_dec(_v) ((void) p_atomic_dec_return(_v))
  66. #define p_atomic_add(_v, _i) (*(_v) = *(_v) + (_i))
  67. #define p_atomic_inc_return(_v) (++(*(_v)))
  68. #define p_atomic_dec_return(_v) (--(*(_v)))
  69. #define p_atomic_cmpxchg(_v, _old, _new) (*(_v) == (_old) ? (*(_v) = (_new), (_old)) : *(_v))
  70.  
  71. #endif
  72.  
  73.  
  74. #if defined(PIPE_ATOMIC_MSVC_INTRINSIC)
  75.  
  76. #define PIPE_ATOMIC "MSVC Intrinsics"
  77.  
  78. /* We use the Windows header's Interlocked*64 functions instead of the
  79.  * _Interlocked*64 intrinsics wherever we can, as support for the latter varies
  80.  * with target CPU, whereas Windows headers take care of all portability
  81.  * issues: using intrinsics where available, falling back to library
  82.  * implementations where not.
  83.  */
  84. #ifndef WIN32_LEAN_AND_MEAN
  85. #define WIN32_LEAN_AND_MEAN 1
  86. #endif
  87. #include <windows.h>
  88. #include <intrin.h>
  89. #include <assert.h>
  90.  
  91. #if _MSC_VER < 1600
  92.  
  93. /* Implement _InterlockedCompareExchange8 in terms of _InterlockedCompareExchange16 */
  94. static __inline char
  95. _InterlockedCompareExchange8(char volatile *destination8, char exchange8, char comparand8)
  96. {
  97.    INT_PTR destinationAddr = (INT_PTR)destination8;
  98.    short volatile *destination16 = (short volatile *)(destinationAddr & ~1);
  99.    const short shift8 = (destinationAddr & 1) * 8;
  100.    const short mask8 = 0xff << shift8;
  101.    short initial16 = *destination16;
  102.    char initial8 = initial16 >> shift8;
  103.    while (initial8 == comparand8) {
  104.       /* initial *destination8 matches, so try exchange it while keeping the
  105.        * neighboring byte untouched */
  106.       short exchange16 = (initial16 & ~mask8) | ((short)exchange8 << shift8);
  107.       short comparand16 = initial16;
  108.       short initial16 = _InterlockedCompareExchange16(destination16, exchange16, comparand16);
  109.       if (initial16 == comparand16) {
  110.          /* succeeded */
  111.          return comparand8;
  112.       }
  113.       /* something changed, retry with the new initial value */
  114.       initial8 = initial16 >> shift8;
  115.    }
  116.    return initial8;
  117. }
  118.  
  119. /* Implement _InterlockedExchangeAdd16 in terms of _InterlockedCompareExchange16 */
  120. static __inline short
  121. _InterlockedExchangeAdd16(short volatile *addend, short value)
  122. {
  123.    short initial = *addend;
  124.    short comparand;
  125.    do {
  126.       short exchange = initial + value;
  127.       comparand = initial;
  128.       /* if *addend==comparand then *addend=exchange, return original *addend */
  129.       initial = _InterlockedCompareExchange16(addend, exchange, comparand);
  130.    } while(initial != comparand);
  131.    return comparand;
  132. }
  133.  
  134. /* Implement _InterlockedExchangeAdd8 in terms of _InterlockedCompareExchange8 */
  135. static __inline char
  136. _InterlockedExchangeAdd8(char volatile *addend, char value)
  137. {
  138.    char initial = *addend;
  139.    char comparand;
  140.    do {
  141.       char exchange = initial + value;
  142.       comparand = initial;
  143.       initial = _InterlockedCompareExchange8(addend, exchange, comparand);
  144.    } while(initial != comparand);
  145.    return comparand;
  146. }
  147.  
  148. #endif /* _MSC_VER < 1600 */
  149.  
  150. /* MSVC supports decltype keyword, but it's only supported on C++ and doesn't
  151.  * quite work here; and if a C++-only solution is worthwhile, then it would be
  152.  * better to use templates / function overloading, instead of decltype magic.
  153.  * Therefore, we rely on implicit casting to LONGLONG for the functions that return
  154.  */
  155.  
  156. #define p_atomic_set(_v, _i) (*(_v) = (_i))
  157. #define p_atomic_read(_v) (*(_v))
  158.  
  159. #define p_atomic_dec_zero(_v) \
  160.    (p_atomic_dec_return(_v) == 0)
  161.  
  162. #define p_atomic_inc(_v) \
  163.    ((void) p_atomic_inc_return(_v))
  164.  
  165. #define p_atomic_inc_return(_v) (\
  166.    sizeof *(_v) == sizeof(short)   ? _InterlockedIncrement16((short *)  (_v)) : \
  167.    sizeof *(_v) == sizeof(long)    ? _InterlockedIncrement  ((long *)   (_v)) : \
  168.    sizeof *(_v) == sizeof(__int64) ? InterlockedIncrement64 ((__int64 *)(_v)) : \
  169.                                      (assert(!"should not get here"), 0))
  170.  
  171. #define p_atomic_dec(_v) \
  172.    ((void) p_atomic_dec_return(_v))
  173.  
  174. #define p_atomic_dec_return(_v) (\
  175.    sizeof *(_v) == sizeof(short)   ? _InterlockedDecrement16((short *)  (_v)) : \
  176.    sizeof *(_v) == sizeof(long)    ? _InterlockedDecrement  ((long *)   (_v)) : \
  177.    sizeof *(_v) == sizeof(__int64) ? InterlockedDecrement64 ((__int64 *)(_v)) : \
  178.                                      (assert(!"should not get here"), 0))
  179.  
  180. #define p_atomic_add(_v, _i) (\
  181.    sizeof *(_v) == sizeof(char)    ? _InterlockedExchangeAdd8 ((char *)   (_v), (_i)) : \
  182.    sizeof *(_v) == sizeof(short)   ? _InterlockedExchangeAdd16((short *)  (_v), (_i)) : \
  183.    sizeof *(_v) == sizeof(long)    ? _InterlockedExchangeAdd  ((long *)   (_v), (_i)) : \
  184.    sizeof *(_v) == sizeof(__int64) ? InterlockedExchangeAdd64((__int64 *)(_v), (_i)) : \
  185.                                      (assert(!"should not get here"), 0))
  186.  
  187. #define p_atomic_cmpxchg(_v, _old, _new) (\
  188.    sizeof *(_v) == sizeof(char)    ? _InterlockedCompareExchange8 ((char *)   (_v), (char)   (_new), (char)   (_old)) : \
  189.    sizeof *(_v) == sizeof(short)   ? _InterlockedCompareExchange16((short *)  (_v), (short)  (_new), (short)  (_old)) : \
  190.    sizeof *(_v) == sizeof(long)    ? _InterlockedCompareExchange  ((long *)   (_v), (long)   (_new), (long)   (_old)) : \
  191.    sizeof *(_v) == sizeof(__int64) ? InterlockedCompareExchange64 ((__int64 *)(_v), (__int64)(_new), (__int64)(_old)) : \
  192.                                      (assert(!"should not get here"), 0))
  193.  
  194. #endif
  195.  
  196. #if defined(PIPE_ATOMIC_OS_SOLARIS)
  197.  
  198. #define PIPE_ATOMIC "Solaris OS atomic functions"
  199.  
  200. #include <atomic.h>
  201. #include <assert.h>
  202.  
  203. #define p_atomic_set(_v, _i) (*(_v) = (_i))
  204. #define p_atomic_read(_v) (*(_v))
  205.  
  206. #define p_atomic_dec_zero(v) (\
  207.    sizeof(*v) == sizeof(uint8_t)  ? atomic_dec_8_nv ((uint8_t  *)(v)) == 0 : \
  208.    sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16_nv((uint16_t *)(v)) == 0 : \
  209.    sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32_nv((uint32_t *)(v)) == 0 : \
  210.    sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64_nv((uint64_t *)(v)) == 0 : \
  211.                                     (assert(!"should not get here"), 0))
  212.  
  213. #define p_atomic_inc(v) (void) (\
  214.    sizeof(*v) == sizeof(uint8_t)  ? atomic_inc_8 ((uint8_t  *)(v)) : \
  215.    sizeof(*v) == sizeof(uint16_t) ? atomic_inc_16((uint16_t *)(v)) : \
  216.    sizeof(*v) == sizeof(uint32_t) ? atomic_inc_32((uint32_t *)(v)) : \
  217.    sizeof(*v) == sizeof(uint64_t) ? atomic_inc_64((uint64_t *)(v)) : \
  218.                                     (assert(!"should not get here"), 0))
  219.  
  220. #define p_atomic_inc_return(v) ((__typeof(*v)) \
  221.    sizeof(*v) == sizeof(uint8_t)  ? atomic_inc_8_nv ((uint8_t  *)(v)) : \
  222.    sizeof(*v) == sizeof(uint16_t) ? atomic_inc_16_nv((uint16_t *)(v)) : \
  223.    sizeof(*v) == sizeof(uint32_t) ? atomic_inc_32_nv((uint32_t *)(v)) : \
  224.    sizeof(*v) == sizeof(uint64_t) ? atomic_inc_64_nv((uint64_t *)(v)) : \
  225.                                     (assert(!"should not get here"), 0))
  226.  
  227. #define p_atomic_dec(v) ((void) \
  228.    sizeof(*v) == sizeof(uint8_t)  ? atomic_dec_8 ((uint8_t  *)(v)) : \
  229.    sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16((uint16_t *)(v)) : \
  230.    sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32((uint32_t *)(v)) : \
  231.    sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64((uint64_t *)(v)) : \
  232.                                     (assert(!"should not get here"), 0))
  233.  
  234. #define p_atomic_dec_return(v) ((__typeof(*v)) \
  235.    sizeof(*v) == sizeof(uint8_t)  ? atomic_dec_8_nv ((uint8_t  *)(v)) : \
  236.    sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16_nv((uint16_t *)(v)) : \
  237.    sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32_nv((uint32_t *)(v)) : \
  238.    sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64_nv((uint64_t *)(v)) : \
  239.                                     (assert(!"should not get here"), 0))
  240.  
  241. #define p_atomic_add(v, i) ((void)                                   \
  242.    sizeof(*v) == sizeof(uint8_t)  ? atomic_add_8 ((uint8_t  *)(v), (i)) : \
  243.    sizeof(*v) == sizeof(uint16_t) ? atomic_add_16((uint16_t *)(v), (i)) : \
  244.    sizeof(*v) == sizeof(uint32_t) ? atomic_add_32((uint32_t *)(v), (i)) : \
  245.    sizeof(*v) == sizeof(uint64_t) ? atomic_add_64((uint64_t *)(v), (i)) : \
  246.                                     (assert(!"should not get here"), 0))
  247.  
  248. #define p_atomic_cmpxchg(v, old, _new) ((__typeof(*v)) \
  249.    sizeof(*v) == sizeof(uint8_t)  ? atomic_cas_8 ((uint8_t  *)(v), (uint8_t )(old), (uint8_t )(_new)) : \
  250.    sizeof(*v) == sizeof(uint16_t) ? atomic_cas_16((uint16_t *)(v), (uint16_t)(old), (uint16_t)(_new)) : \
  251.    sizeof(*v) == sizeof(uint32_t) ? atomic_cas_32((uint32_t *)(v), (uint32_t)(old), (uint32_t)(_new)) : \
  252.    sizeof(*v) == sizeof(uint64_t) ? atomic_cas_64((uint64_t *)(v), (uint64_t)(old), (uint64_t)(_new)) : \
  253.                                     (assert(!"should not get here"), 0))
  254.  
  255. #endif
  256.  
  257. #ifndef PIPE_ATOMIC
  258. #error "No pipe_atomic implementation selected"
  259. #endif
  260.  
  261.  
  262.  
  263. #endif /* U_ATOMIC_H */
  264.