Subversion Repositories Kolibri OS

Rev

Rev 854 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2006 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29.  
  30. #ifndef KERN_ATOMIC_H_
  31. #define KERN_ATOMIC_H_
  32.  
  33. typedef struct atomic {
  34.         volatile long count;
  35. } atomic_t;
  36.  
  37. static inline void atomic_inc(atomic_t *val) {
  38. #ifdef USE_SMP
  39.   asm volatile ("lock incl %0\n" : "+m" (val->count));
  40. #else
  41.   asm volatile ("incl %0\n" : "+m" (val->count));
  42. #endif /* USE_SMP */
  43. }
  44.  
  45. static inline void atomic_dec(atomic_t *val) {
  46. #ifdef USE_SMP
  47.   asm volatile ("lock decl %0\n" : "+m" (val->count));
  48. #else
  49.   asm volatile ("decl %0\n" : "+m" (val->count));
  50. #endif /* USE_SMP */
  51. }
  52.  
  53. /*
  54. static inline long atomic_postinc(atomic_t *val)
  55. {
  56.         long r = 1;
  57.  
  58.         asm volatile (
  59.     "lock xadd %1, %0\n"
  60.                 : "+m" (val->count), "+r" (r)
  61.         );
  62.  
  63.         return r;
  64. }
  65.  
  66. static inline long atomic_postdec(atomic_t *val)
  67. {
  68.         long r = -1;
  69.  
  70.         asm volatile (
  71.     "lock xadd %1, %0\n"
  72.                 : "+m" (val->count), "+r"(r)
  73.         );
  74.  
  75.         return r;
  76. }
  77.  
  78. #define atomic_preinc(val) (atomic_postinc(val) + 1)
  79. #define atomic_predec(val) (atomic_postdec(val) - 1)
  80.  
  81. static inline u32_t test_and_set(atomic_t *val) {
  82.         uint32_t v;
  83.  
  84.         asm volatile (
  85.                 "movl $1, %0\n"
  86.                 "xchgl %0, %1\n"
  87.                 : "=r" (v),"+m" (val->count)
  88.         );
  89.  
  90.         return v;
  91. }
  92. */
  93.  
  94. /* ia32 specific fast spinlock */
  95.  
  96. static inline void atomic_lock_arch(atomic_t *val)
  97. {
  98.   u32_t tmp;
  99.  
  100. //  preemption_disable();
  101.  
  102.     asm volatile (
  103.                 "0:\n"
  104.                 "pause\n" /* Pentium 4's HT love this instruction */
  105.                 "mov %0, %1\n"
  106.                 "testl %1, %1\n"
  107.                 "jnz 0b\n"       /* lightweight looping on locked spinlock */
  108.  
  109.                 "incl %1\n"      /* now use the atomic operation */
  110.                 "xchgl %0, %1\n"
  111.                 "testl %1, %1\n"
  112.                 "jnz 0b\n"
  113.     : "+m" (val->count), "=&r"(tmp)
  114.   );
  115.  // CS_ENTER_BARRIER();
  116. }
  117.  
  118. static inline void atomic_set(atomic_t *val, long i)
  119. {
  120.         val->count = i;
  121. }
  122.  
  123. static inline long atomic_get(atomic_t *val)
  124. {
  125.         return val->count;
  126. }
  127.  
  128. #endif  /*  KERN_ATOMIC_H_  */
  129.  
  130.