Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. #ifndef __LINUX_SPINLOCK_H
  2. #define __LINUX_SPINLOCK_H
  3.  
  4. /*
  5.  * include/linux/spinlock.h - generic spinlock/rwlock declarations
  6.  *
  7.  * here's the role of the various spinlock/rwlock related include files:
  8.  *
  9.  * on SMP builds:
  10.  *
  11.  *  asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
  12.  *                        initializers
  13.  *
  14.  *  linux/spinlock_types.h:
  15.  *                        defines the generic type and initializers
  16.  *
  17.  *  asm/spinlock.h:       contains the __raw_spin_*()/etc. lowlevel
  18.  *                        implementations, mostly inline assembly code
  19.  *
  20.  *   (also included on UP-debug builds:)
  21.  *
  22.  *  linux/spinlock_api_smp.h:
  23.  *                        contains the prototypes for the _spin_*() APIs.
  24.  *
  25.  *  linux/spinlock.h:     builds the final spin_*() APIs.
  26.  *
  27.  * on UP builds:
  28.  *
  29.  *  linux/spinlock_type_up.h:
  30.  *                        contains the generic, simplified UP spinlock type.
  31.  *                        (which is an empty structure on non-debug builds)
  32.  *
  33.  *  linux/spinlock_types.h:
  34.  *                        defines the generic type and initializers
  35.  *
  36.  *  linux/spinlock_up.h:
  37.  *                        contains the __raw_spin_*()/etc. version of UP
  38.  *                        builds. (which are NOPs on non-debug, non-preempt
  39.  *                        builds)
  40.  *
  41.  *   (included on UP-non-debug builds:)
  42.  *
  43.  *  linux/spinlock_api_up.h:
  44.  *                        builds the _spin_*() APIs.
  45.  *
  46.  *  linux/spinlock.h:     builds the final spin_*() APIs.
  47.  */
  48.  
  49. #include <linux/typecheck.h>
  50. //#include <linux/preempt.h>
  51. //#include <linux/linkage.h>
  52. #include <linux/compiler.h>
  53. //#include <linux/thread_info.h>
  54. #include <linux/kernel.h>
  55. #include <linux/stringify.h>
  56. //#include <linux/bottom_half.h>
  57.  
  58. //#include <asm/system.h>
  59.  
  60. /*
  61.  * Must define these before including other files, inline functions need them
  62.  */
  63. #define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME
  64.  
  65. #define LOCK_SECTION_START(extra)               \
  66.         ".subsection 1\n\t"                     \
  67.         extra                                   \
  68.         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
  69.         LOCK_SECTION_NAME ":\n\t"               \
  70.         ".endif\n"
  71.  
  72. #define LOCK_SECTION_END                        \
  73.         ".previous\n\t"
  74.  
  75. #define __lockfunc __attribute__((section(".spinlock.text")))
  76.  
  77. /*
  78.  * Pull the raw_spinlock_t and raw_rwlock_t definitions:
  79.  */
  80. #include <linux/spinlock_types.h>
  81.  
  82. /*
  83.  * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
  84.  */
  85. #ifdef CONFIG_SMP
  86. # include <asm/spinlock.h>
  87. #else
  88. # include <linux/spinlock_up.h>
  89. #endif
  90.  
  91. #ifdef CONFIG_DEBUG_SPINLOCK
  92.   extern void __spin_lock_init(spinlock_t *lock, const char *name,
  93.                                struct lock_class_key *key);
  94. # define spin_lock_init(lock)                                   \
  95. do {                                                            \
  96.         static struct lock_class_key __key;                     \
  97.                                                                 \
  98.         __spin_lock_init((lock), #lock, &__key);                \
  99. } while (0)
  100.  
  101. #else
  102. # define spin_lock_init(lock)                                   \
  103.         do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
  104. #endif
  105.  
  106. #ifdef CONFIG_DEBUG_SPINLOCK
  107.   extern void __rwlock_init(rwlock_t *lock, const char *name,
  108.                             struct lock_class_key *key);
  109. # define rwlock_init(lock)                                      \
  110. do {                                                            \
  111.         static struct lock_class_key __key;                     \
  112.                                                                 \
  113.         __rwlock_init((lock), #lock, &__key);                   \
  114. } while (0)
  115. #else
  116. # define rwlock_init(lock)                                      \
  117.         do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
  118. #endif
  119.  
  120. #define spin_is_locked(lock)    __raw_spin_is_locked(&(lock)->raw_lock)
  121.  
  122. #ifdef CONFIG_GENERIC_LOCKBREAK
  123. #define spin_is_contended(lock) ((lock)->break_lock)
  124. #else
  125.  
  126. #ifdef __raw_spin_is_contended
  127. #define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
  128. #else
  129. #define spin_is_contended(lock) (((void)(lock), 0))
  130. #endif /*__raw_spin_is_contended*/
  131. #endif
  132.  
  133. /* The lock does not imply full memory barrier. */
  134. #ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
  135. static inline void smp_mb__after_lock(void) { smp_mb(); }
  136. #endif
  137.  
  138. /**
  139.  * spin_unlock_wait - wait until the spinlock gets unlocked
  140.  * @lock: the spinlock in question.
  141.  */
  142. #define spin_unlock_wait(lock)  __raw_spin_unlock_wait(&(lock)->raw_lock)
  143.  
  144. #ifdef CONFIG_DEBUG_SPINLOCK
  145.  extern void _raw_spin_lock(spinlock_t *lock);
  146. #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  147.  extern int _raw_spin_trylock(spinlock_t *lock);
  148.  extern void _raw_spin_unlock(spinlock_t *lock);
  149.  extern void _raw_read_lock(rwlock_t *lock);
  150. #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
  151.  extern int _raw_read_trylock(rwlock_t *lock);
  152.  extern void _raw_read_unlock(rwlock_t *lock);
  153.  extern void _raw_write_lock(rwlock_t *lock);
  154. #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
  155.  extern int _raw_write_trylock(rwlock_t *lock);
  156.  extern void _raw_write_unlock(rwlock_t *lock);
  157. #else
  158. # define _raw_spin_lock(lock)           __raw_spin_lock(&(lock)->raw_lock)
  159. # define _raw_spin_lock_flags(lock, flags) \
  160.                 __raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
  161. # define _raw_spin_trylock(lock)        __raw_spin_trylock(&(lock)->raw_lock)
  162. # define _raw_spin_unlock(lock)         __raw_spin_unlock(&(lock)->raw_lock)
  163. # define _raw_read_lock(rwlock)         __raw_read_lock(&(rwlock)->raw_lock)
  164. # define _raw_read_lock_flags(lock, flags) \
  165.                 __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
  166. # define _raw_read_trylock(rwlock)      __raw_read_trylock(&(rwlock)->raw_lock)
  167. # define _raw_read_unlock(rwlock)       __raw_read_unlock(&(rwlock)->raw_lock)
  168. # define _raw_write_lock(rwlock)        __raw_write_lock(&(rwlock)->raw_lock)
  169. # define _raw_write_lock_flags(lock, flags) \
  170.                 __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
  171. # define _raw_write_trylock(rwlock)     __raw_write_trylock(&(rwlock)->raw_lock)
  172. # define _raw_write_unlock(rwlock)      __raw_write_unlock(&(rwlock)->raw_lock)
  173. #endif
  174.  
  175. #define read_can_lock(rwlock)           __raw_read_can_lock(&(rwlock)->raw_lock)
  176. #define write_can_lock(rwlock)          __raw_write_can_lock(&(rwlock)->raw_lock)
  177.  
  178. /*
  179.  * Define the various spin_lock and rw_lock methods.  Note we define these
  180.  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
  181.  * methods are defined as nops in the case they are not required.
  182.  */
  183. #define spin_trylock(lock)              __cond_lock(lock, _spin_trylock(lock))
  184. #define read_trylock(lock)              __cond_lock(lock, _read_trylock(lock))
  185. #define write_trylock(lock)             __cond_lock(lock, _write_trylock(lock))
  186.  
  187. #define spin_lock(lock)                 _spin_lock(lock)
  188.  
  189. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  190. # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
  191. # define spin_lock_nest_lock(lock, nest_lock)                           \
  192.          do {                                                           \
  193.                  typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
  194.                  _spin_lock_nest_lock(lock, &(nest_lock)->dep_map);     \
  195.          } while (0)
  196. #else
  197. # define spin_lock_nested(lock, subclass) _spin_lock(lock)
  198. # define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
  199. #endif
  200.  
  201. #define write_lock(lock)                _write_lock(lock)
  202. #define read_lock(lock)                 _read_lock(lock)
  203.  
  204. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  205.  
  206. #define spin_lock_irqsave(lock, flags)                  \
  207.         do {                                            \
  208.                 typecheck(unsigned long, flags);        \
  209.                 flags = _spin_lock_irqsave(lock);       \
  210.         } while (0)
  211. #define read_lock_irqsave(lock, flags)                  \
  212.         do {                                            \
  213.                 typecheck(unsigned long, flags);        \
  214.                 flags = _read_lock_irqsave(lock);       \
  215.         } while (0)
  216. #define write_lock_irqsave(lock, flags)                 \
  217.         do {                                            \
  218.                 typecheck(unsigned long, flags);        \
  219.                 flags = _write_lock_irqsave(lock);      \
  220.         } while (0)
  221.  
  222. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  223. #define spin_lock_irqsave_nested(lock, flags, subclass)                 \
  224.         do {                                                            \
  225.                 typecheck(unsigned long, flags);                        \
  226.                 flags = _spin_lock_irqsave_nested(lock, subclass);      \
  227.         } while (0)
  228. #else
  229. #define spin_lock_irqsave_nested(lock, flags, subclass)                 \
  230.         do {                                                            \
  231.                 typecheck(unsigned long, flags);                        \
  232.                 flags = _spin_lock_irqsave(lock);                       \
  233.         } while (0)
  234. #endif
  235.  
  236. #else
  237.  
  238. #define spin_lock_irqsave(lock, flags)                  \
  239.         do {                                            \
  240.                 typecheck(unsigned long, flags);        \
  241.                 _spin_lock_irqsave(lock, flags);        \
  242.         } while (0)
  243. #define read_lock_irqsave(lock, flags)                  \
  244.         do {                                            \
  245.                 typecheck(unsigned long, flags);        \
  246.                 _read_lock_irqsave(lock, flags);        \
  247.         } while (0)
  248. #define write_lock_irqsave(lock, flags)                 \
  249.         do {                                            \
  250.                 typecheck(unsigned long, flags);        \
  251.                 _write_lock_irqsave(lock, flags);       \
  252.         } while (0)
  253. #define spin_lock_irqsave_nested(lock, flags, subclass) \
  254.         spin_lock_irqsave(lock, flags)
  255.  
  256. #endif
  257.  
  258. #define spin_lock_irq(lock)             _spin_lock_irq(lock)
  259. #define spin_lock_bh(lock)              _spin_lock_bh(lock)
  260. #define read_lock_irq(lock)             _read_lock_irq(lock)
  261. #define read_lock_bh(lock)              _read_lock_bh(lock)
  262. #define write_lock_irq(lock)            _write_lock_irq(lock)
  263. #define write_lock_bh(lock)             _write_lock_bh(lock)
  264. #define spin_unlock(lock)               _spin_unlock(lock)
  265. #define read_unlock(lock)               _read_unlock(lock)
  266. #define write_unlock(lock)              _write_unlock(lock)
  267. #define spin_unlock_irq(lock)           _spin_unlock_irq(lock)
  268. #define read_unlock_irq(lock)           _read_unlock_irq(lock)
  269. #define write_unlock_irq(lock)          _write_unlock_irq(lock)
  270.  
  271. #define spin_unlock_irqrestore(lock, flags)             \
  272.         do {                                            \
  273.                 typecheck(unsigned long, flags);        \
  274.                 _spin_unlock_irqrestore(lock, flags);   \
  275.         } while (0)
  276. #define spin_unlock_bh(lock)            _spin_unlock_bh(lock)
  277.  
  278. #define read_unlock_irqrestore(lock, flags)             \
  279.         do {                                            \
  280.                 typecheck(unsigned long, flags);        \
  281.                 _read_unlock_irqrestore(lock, flags);   \
  282.         } while (0)
  283. #define read_unlock_bh(lock)            _read_unlock_bh(lock)
  284.  
  285. #define write_unlock_irqrestore(lock, flags)            \
  286.         do {                                            \
  287.                 typecheck(unsigned long, flags);        \
  288.                 _write_unlock_irqrestore(lock, flags);  \
  289.         } while (0)
  290. #define write_unlock_bh(lock)           _write_unlock_bh(lock)
  291.  
  292. #define spin_trylock_bh(lock)   __cond_lock(lock, _spin_trylock_bh(lock))
  293.  
  294. #define spin_trylock_irq(lock) \
  295. ({ \
  296.         local_irq_disable(); \
  297.         spin_trylock(lock) ? \
  298.         1 : ({ local_irq_enable(); 0;  }); \
  299. })
  300.  
  301. #define spin_trylock_irqsave(lock, flags) \
  302. ({ \
  303.         local_irq_save(flags); \
  304.         spin_trylock(lock) ? \
  305.         1 : ({ local_irq_restore(flags); 0; }); \
  306. })
  307.  
  308. #define write_trylock_irqsave(lock, flags) \
  309. ({ \
  310.         local_irq_save(flags); \
  311.         write_trylock(lock) ? \
  312.         1 : ({ local_irq_restore(flags); 0; }); \
  313. })
  314.  
  315. /*
  316.  * Pull the atomic_t declaration:
  317.  * (asm-mips/atomic.h needs above definitions)
  318.  */
  319. #include <asm/atomic.h>
  320. /**
  321.  * atomic_dec_and_lock - lock on reaching reference count zero
  322.  * @atomic: the atomic counter
  323.  * @lock: the spinlock in question
  324.  *
  325.  * Decrements @atomic by 1.  If the result is 0, returns true and locks
  326.  * @lock.  Returns false for all other cases.
  327.  */
  328. extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  329. #define atomic_dec_and_lock(atomic, lock) \
  330.                 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  331.  
  332. /**
  333.  * spin_can_lock - would spin_trylock() succeed?
  334.  * @lock: the spinlock in question.
  335.  */
  336. #define spin_can_lock(lock)     (!spin_is_locked(lock))
  337.  
  338. /*
  339.  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  340.  */
  341. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  342. # include <linux/spinlock_api_smp.h>
  343. #else
  344. # include <linux/spinlock_api_up.h>
  345. #endif
  346.  
  347. #endif /* __LINUX_SPINLOCK_H */
  348.