Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29.  
  30. #ifndef KERN_ATOMIC_H_
  31. #define KERN_ATOMIC_H_
  32.  
  33. typedef struct atomic {
  34.         volatile long count;
  35. } atomic_t;
  36.  
  37. static inline void atomic_inc(atomic_t *val) {
  38. #ifdef USE_SMP
  39.   asm volatile ("lock inc %0\n" : "+m" (val->count));
  40. #else
  41.   asm volatile ("inc %0\n" : "+m" (val->count));
  42. #endif /* USE_SMP */
  43. }
  44.  
  45. static inline void atomic_dec(atomic_t *val) {
  46. #ifdef USE_SMP
  47.   asm volatile ("lock dec %0\n" : "+m" (val->count));
  48. #else
  49.   asm volatile ("dec %0\n" : "+m" (val->count));
  50. #endif /* USE_SMP */
  51. }
  52.  
  53. /*
  54. static inline long atomic_postinc(atomic_t *val)
  55. {
  56.         long r = 1;
  57.  
  58.         asm volatile (
  59.     "lock xadd %1, %0\n"
  60.                 : "+m" (val->count), "+r" (r)
  61.         );
  62.  
  63.         return r;
  64. }
  65.  
  66. static inline long atomic_postdec(atomic_t *val)
  67. {
  68.         long r = -1;
  69.  
  70.         asm volatile (
  71.     "lock xadd %1, %0\n"
  72.                 : "+m" (val->count), "+r"(r)
  73.         );
  74.  
  75.         return r;
  76. }
  77.  
  78. #define atomic_preinc(val) (atomic_postinc(val) + 1)
  79. #define atomic_predec(val) (atomic_postdec(val) - 1)
  80.  
  81. static inline u32_t test_and_set(atomic_t *val) {
  82.         uint32_t v;
  83.  
  84.         asm volatile (
  85.                 "movl $1, %0\n"
  86.                 "xchgl %0, %1\n"
  87.                 : "=r" (v),"+m" (val->count)
  88.         );
  89.  
  90.         return v;
  91. }
  92. */
  93.  
  94. /* ia32 specific fast spinlock */
  95.  
  96. static inline void atomic_lock_arch(atomic_t *val)
  97. {
  98.   u32_t tmp;
  99.  
  100. //  preemption_disable();
  101.         asm volatile (
  102.     "0:\n"
  103.     "pause\n\t" /* Pentium 4's HT love this instruction */
  104.     "mov %1, [%0]\n\t"
  105.     "test %1, %1\n\t"
  106.     "jnz 0b\n\t"       /* lightweight looping on locked spinlock */
  107.  
  108.     "inc %1\n\t"      /* now use the atomic operation */
  109.     "xchg [%0], %1\n\t"
  110.     "test %1, %1\n\t"
  111.     "jnz 0b\n\t"
  112.     : "+m" (val->count), "=r"(tmp)
  113.         );
  114.         /*
  115.          * Prevent critical section code from bleeding out this way up.
  116.          */
  117.  // CS_ENTER_BARRIER();
  118. }
  119.  
  120. static inline void atomic_set(atomic_t *val, long i)
  121. {
  122.         val->count = i;
  123. }
  124.  
  125. static inline long atomic_get(atomic_t *val)
  126. {
  127.         return val->count;
  128. }
  129.  
  130. #endif  /*  KERN_ATOMIC_H_  */
  131.  
  132.