Subversion Repositories Kolibri OS

Rev

Rev 1408 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. #ifndef __LINUX_SPINLOCK_UP_H
  2. #define __LINUX_SPINLOCK_UP_H
  3.  
  4. #ifndef __LINUX_SPINLOCK_H
  5. # error "please don't include this file directly"
  6. #endif
  7.  
  8. /*
  9.  * include/linux/spinlock_up.h - UP-debug version of spinlocks.
  10.  *
  11.  * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
  12.  * Released under the General Public License (GPL).
  13.  *
  14.  * In the debug case, 1 means unlocked, 0 means locked. (the values
  15.  * are inverted, to catch initialization bugs)
  16.  *
  17.  * No atomicity anywhere, we are on UP. However, we still need
  18.  * the compiler barriers, because we do not want the compiler to
  19.  * move potentially faulting instructions (notably user accesses)
  20.  * into the locked sequence, resulting in non-atomic execution.
  21.  */
  22.  
  23. #ifdef CONFIG_DEBUG_SPINLOCK
  24. #define __raw_spin_is_locked(x)         ((x)->slock == 0)
  25.  
  26. static inline void __raw_spin_lock(raw_spinlock_t *lock)
  27. {
  28.         lock->slock = 0;
  29. }
  30.  
  31. static inline void
  32. __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
  33. {
  34.         local_irq_save(flags);
  35.         lock->slock = 0;
  36. }
  37.  
  38. static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  39. {
  40.         char oldval = lock->slock;
  41.  
  42.         lock->slock = 0;
  43.  
  44.         return oldval > 0;
  45. }
  46.  
  47. static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  48. {
  49.         lock->slock = 1;
  50. }
  51.  
  52. /*
  53.  * Read-write spinlocks. No debug version.
  54.  */
  55. #define __raw_read_lock(lock)           do { (void)(lock); } while (0)
  56. #define __raw_write_lock(lock)          do { (void)(lock); } while (0)
  57. #define __raw_read_trylock(lock)        ({ (void)(lock); 1; })
  58. #define __raw_write_trylock(lock)       ({ (void)(lock); 1; })
  59. #define __raw_read_unlock(lock)         do { (void)(lock); } while (0)
  60. #define __raw_write_unlock(lock)        do { (void)(lock); } while (0)
  61.  
  62. #else /* DEBUG_SPINLOCK */
  63. #define __raw_spin_is_locked(lock)      ((void)(lock), 0)
  64. /* for sched.c and kernel_lock.c: */
  65. # define __raw_spin_lock(lock)          do { (void)(lock); } while (0)
  66. # define __raw_spin_lock_flags(lock, flags)     do { (void)(lock); } while (0)
  67. # define __raw_spin_unlock(lock)        do { (void)(lock); } while (0)
  68. # define __raw_spin_trylock(lock)       ({ (void)(lock); 1; })
  69. #endif /* DEBUG_SPINLOCK */
  70.  
  71. #define __raw_spin_is_contended(lock)   (((void)(lock), 0))
  72.  
  73. #define __raw_read_can_lock(lock)       (((void)(lock), 1))
  74. #define __raw_write_can_lock(lock)      (((void)(lock), 1))
  75.  
  76. #define __raw_spin_unlock_wait(lock) \
  77.                 do { cpu_relax(); } while (__raw_spin_is_locked(lock))
  78.  
  79. #endif /* __LINUX_SPINLOCK_UP_H */
  80.