Subversion Repositories Kolibri OS

Rev

Rev 6082 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. #ifndef __LINUX_SPINLOCK_H
  2. #define __LINUX_SPINLOCK_H
  3.  
  4. /*
  5.  * include/linux/spinlock.h - generic spinlock/rwlock declarations
  6.  *
  7.  * here's the role of the various spinlock/rwlock related include files:
  8.  *
  9.  * on SMP builds:
  10.  *
  11.  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
  12.  *                        initializers
  13.  *
  14.  *  linux/spinlock_types.h:
  15.  *                        defines the generic type and initializers
  16.  *
  17.  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
  18.  *                        implementations, mostly inline assembly code
  19.  *
  20.  *   (also included on UP-debug builds:)
  21.  *
  22.  *  linux/spinlock_api_smp.h:
  23.  *                        contains the prototypes for the _spin_*() APIs.
  24.  *
  25.  *  linux/spinlock.h:     builds the final spin_*() APIs.
  26.  *
  27.  * on UP builds:
  28.  *
  29.  *  linux/spinlock_type_up.h:
  30.  *                        contains the generic, simplified UP spinlock type.
  31.  *                        (which is an empty structure on non-debug builds)
  32.  *
  33.  *  linux/spinlock_types.h:
  34.  *                        defines the generic type and initializers
  35.  *
  36.  *  linux/spinlock_up.h:
  37.  *                        contains the arch_spin_*()/etc. version of UP
  38.  *                        builds. (which are NOPs on non-debug, non-preempt
  39.  *                        builds)
  40.  *
  41.  *   (included on UP-non-debug builds:)
  42.  *
  43.  *  linux/spinlock_api_up.h:
  44.  *                        builds the _spin_*() APIs.
  45.  *
  46.  *  linux/spinlock.h:     builds the final spin_*() APIs.
  47.  */
  48.  
  49. #include <linux/typecheck.h>
  50. #include <linux/preempt.h>
  51. #include <linux/linkage.h>
  52. #include <linux/compiler.h>
  53. #include <linux/irqflags.h>
  54. #include <linux/thread_info.h>
  55. #include <linux/kernel.h>
  56. #include <linux/stringify.h>
  57. #include <linux/bottom_half.h>
  58. #include <asm/barrier.h>
  59.  
  60.  
  61. /*
  62.  * Must define these before including other files, inline functions need them
  63.  */
  64. #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
  65.  
  66. #define LOCK_SECTION_START(extra)               \
  67.         ".subsection 1\n\t"                     \
  68.         extra                                   \
  69.         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
  70.         LOCK_SECTION_NAME ":\n\t"               \
  71.         ".endif\n"
  72.  
  73. #define LOCK_SECTION_END                        \
  74.         ".previous\n\t"
  75.  
  76. #define __lockfunc __attribute__((section(".spinlock.text")))
  77.  
  78. /*
  79.  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
  80.  */
  81. #include <linux/spinlock_types.h>
  82.  
  83. /*
  84.  * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
  85.  */
  86. #ifdef CONFIG_SMP
  87. # include <asm/spinlock.h>
  88. #else
  89. # include <linux/spinlock_up.h>
  90. #endif
  91.  
  92. #ifdef CONFIG_DEBUG_SPINLOCK
  93.   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
  94.                                    struct lock_class_key *key);
  95. # define raw_spin_lock_init(lock)                               \
  96. do {                                                            \
  97.         static struct lock_class_key __key;                     \
  98.                                                                 \
  99.         __raw_spin_lock_init((lock), #lock, &__key);            \
  100. } while (0)
  101.  
  102. #else
  103. # define raw_spin_lock_init(lock)                               \
  104.         do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
  105. #endif
  106.  
  107. #define raw_spin_is_locked(lock)        arch_spin_is_locked(&(lock)->raw_lock)
  108.  
  109. #ifdef CONFIG_GENERIC_LOCKBREAK
  110. #define raw_spin_is_contended(lock) ((lock)->break_lock)
  111. #else
  112.  
  113. #ifdef arch_spin_is_contended
  114. #define raw_spin_is_contended(lock)     arch_spin_is_contended(&(lock)->raw_lock)
  115. #else
  116. #define raw_spin_is_contended(lock)     (((void)(lock), 0))
  117. #endif /*arch_spin_is_contended*/
  118. #endif
  119.  
  120. /*
  121.  * Despite its name it doesn't necessarily has to be a full barrier.
  122.  * It should only guarantee that a STORE before the critical section
  123.  * can not be reordered with LOADs and STOREs inside this section.
  124.  * spin_lock() is the one-way barrier, this LOAD can not escape out
  125.  * of the region. So the default implementation simply ensures that
  126.  * a STORE can not move into the critical section, smp_wmb() should
  127.  * serialize it with another STORE done by spin_lock().
  128.  */
  129. #ifndef smp_mb__before_spinlock
  130. #define smp_mb__before_spinlock()       smp_wmb()
  131. #endif
  132.  
  133. /**
  134.  * raw_spin_unlock_wait - wait until the spinlock gets unlocked
  135.  * @lock: the spinlock in question.
  136.  */
  137. #define raw_spin_unlock_wait(lock)      arch_spin_unlock_wait(&(lock)->raw_lock)
  138.  
  139. #ifdef CONFIG_DEBUG_SPINLOCK
  140.  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
  141. #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  142.  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  143.  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
  144. #else
  145. static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
  146. {
  147.         __acquire(lock);
  148.         arch_spin_lock(&lock->raw_lock);
  149. }
  150.  
  151. static inline void
  152. do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
  153. {
  154.         __acquire(lock);
  155.         arch_spin_lock_flags(&lock->raw_lock, *flags);
  156. }
  157.  
  158. static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
  159. {
  160.         return arch_spin_trylock(&(lock)->raw_lock);
  161. }
  162.  
  163. static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
  164. {
  165.         arch_spin_unlock(&lock->raw_lock);
  166.         __release(lock);
  167. }
  168. #endif
  169.  
  170. /*
  171.  * Define the various spin_lock methods.  Note we define these
  172.  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
  173.  * various methods are defined as nops in the case they are not
  174.  * required.
  175.  */
  176. #define raw_spin_trylock(lock)  __cond_lock(lock, _raw_spin_trylock(lock))
  177.  
  178. #define raw_spin_lock(lock)     _raw_spin_lock(lock)
  179.  
  180. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  181. # define raw_spin_lock_nested(lock, subclass) \
  182.         _raw_spin_lock_nested(lock, subclass)
  183. # define raw_spin_lock_bh_nested(lock, subclass) \
  184.         _raw_spin_lock_bh_nested(lock, subclass)
  185.  
  186. # define raw_spin_lock_nest_lock(lock, nest_lock)                       \
  187.          do {                                                           \
  188.                  typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
  189.                  _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
  190.          } while (0)
  191. #else
  192. /*
  193.  * Always evaluate the 'subclass' argument to avoid that the compiler
  194.  * warns about set-but-not-used variables when building with
  195.  * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
  196.  */
  197. # define raw_spin_lock_nested(lock, subclass)           \
  198.         _raw_spin_lock(((void)(subclass), (lock)))
  199. # define raw_spin_lock_nest_lock(lock, nest_lock)       _raw_spin_lock(lock)
  200. # define raw_spin_lock_bh_nested(lock, subclass)        _raw_spin_lock_bh(lock)
  201. #endif
  202.  
  203. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  204.  
  205. #define raw_spin_lock_irqsave(lock, flags)                      \
  206.         do {                                            \
  207.                 typecheck(unsigned long, flags);        \
  208.                 flags = _raw_spin_lock_irqsave(lock);   \
  209.         } while (0)
  210.  
  211. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  212. #define raw_spin_lock_irqsave_nested(lock, flags, subclass)             \
  213.         do {                                                            \
  214.                 typecheck(unsigned long, flags);                        \
  215.                 flags = _raw_spin_lock_irqsave_nested(lock, subclass);  \
  216.         } while (0)
  217. #else
  218. #define raw_spin_lock_irqsave_nested(lock, flags, subclass)             \
  219.         do {                                                            \
  220.                 typecheck(unsigned long, flags);                        \
  221.                 flags = _raw_spin_lock_irqsave(lock);                   \
  222.         } while (0)
  223. #endif
  224.  
  225. #else
  226.  
  227. #define raw_spin_lock_irqsave(lock, flags)              \
  228.         do {                                            \
  229.                 typecheck(unsigned long, flags);        \
  230.                 _raw_spin_lock_irqsave(lock, flags);    \
  231.         } while (0)
  232.  
  233. #define raw_spin_lock_irqsave_nested(lock, flags, subclass)     \
  234.         raw_spin_lock_irqsave(lock, flags)
  235.  
  236. #endif
  237.  
  238. #define raw_spin_lock_irq(lock)         _raw_spin_lock_irq(lock)
  239. #define raw_spin_lock_bh(lock)          _raw_spin_lock_bh(lock)
  240. #define raw_spin_unlock(lock)           _raw_spin_unlock(lock)
  241. #define raw_spin_unlock_irq(lock)       _raw_spin_unlock_irq(lock)
  242.  
  243. #define raw_spin_unlock_irqrestore(lock, flags)         \
  244.         do {                                                    \
  245.                 typecheck(unsigned long, flags);                \
  246.                 _raw_spin_unlock_irqrestore(lock, flags);       \
  247.         } while (0)
  248. #define raw_spin_unlock_bh(lock)        _raw_spin_unlock_bh(lock)
  249.  
  250. #define raw_spin_trylock_bh(lock) \
  251.         __cond_lock(lock, _raw_spin_trylock_bh(lock))
  252.  
  253. #define raw_spin_trylock_irq(lock) \
  254. ({ \
  255.         local_irq_disable(); \
  256.         raw_spin_trylock(lock) ? \
  257.         1 : ({ local_irq_enable(); 0;  }); \
  258. })
  259.  
  260. #define raw_spin_trylock_irqsave(lock, flags) \
  261. ({ \
  262.         local_irq_save(flags); \
  263.         raw_spin_trylock(lock) ? \
  264.         1 : ({ local_irq_restore(flags); 0; }); \
  265. })
  266.  
  267. /**
  268.  * raw_spin_can_lock - would raw_spin_trylock() succeed?
  269.  * @lock: the spinlock in question.
  270.  */
  271. #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
  272.  
  273. /* Include rwlock functions */
  274. #include <linux/rwlock.h>
  275.  
  276. /*
  277.  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  278.  */
  279. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  280. # include <linux/spinlock_api_smp.h>
  281. #else
  282. # include <linux/spinlock_api_up.h>
  283. #endif
  284.  
  285. /*
  286.  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  287.  */
  288.  
  289. static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
  290. {
  291.         return &lock->rlock;
  292. }
  293.  
  294. #define spin_lock_init(_lock)                           \
  295. do {                                                    \
  296.         spinlock_check(_lock);                          \
  297.         raw_spin_lock_init(&(_lock)->rlock);            \
  298. } while (0)
  299.  
  300. static __always_inline void spin_lock(spinlock_t *lock)
  301. {
  302.         raw_spin_lock(&lock->rlock);
  303. }
  304.  
  305. static __always_inline void spin_lock_bh(spinlock_t *lock)
  306. {
  307.         raw_spin_lock_bh(&lock->rlock);
  308. }
  309.  
  310. static __always_inline int spin_trylock(spinlock_t *lock)
  311. {
  312.         return raw_spin_trylock(&lock->rlock);
  313. }
  314.  
  315. #define spin_lock_nested(lock, subclass)                        \
  316. do {                                                            \
  317.         raw_spin_lock_nested(spinlock_check(lock), subclass);   \
  318. } while (0)
  319.  
  320. #define spin_lock_bh_nested(lock, subclass)                     \
  321. do {                                                            \
  322.         raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
  323. } while (0)
  324.  
  325. #define spin_lock_nest_lock(lock, nest_lock)                            \
  326. do {                                                                    \
  327.         raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);       \
  328. } while (0)
  329.  
  330. static __always_inline void spin_lock_irq(spinlock_t *lock)
  331. {
  332.         raw_spin_lock_irq(&lock->rlock);
  333. }
  334.  
  335. #define spin_lock_irqsave(lock, flags)                          \
  336. do {                                                            \
  337.         raw_spin_lock_irqsave(spinlock_check(lock), flags);     \
  338. } while (0)
  339.  
  340. #define spin_lock_irqsave_nested(lock, flags, subclass)                 \
  341. do {                                                                    \
  342.         raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
  343. } while (0)
  344.  
  345. static __always_inline void spin_unlock(spinlock_t *lock)
  346. {
  347.         raw_spin_unlock(&lock->rlock);
  348. }
  349.  
  350. static __always_inline void spin_unlock_bh(spinlock_t *lock)
  351. {
  352.         raw_spin_unlock_bh(&lock->rlock);
  353. }
  354.  
  355. static __always_inline void spin_unlock_irq(spinlock_t *lock)
  356. {
  357.         raw_spin_unlock_irq(&lock->rlock);
  358. }
  359.  
  360. static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  361. {
  362.         raw_spin_unlock_irqrestore(&lock->rlock, flags);
  363. }
  364.  
  365. static __always_inline int spin_trylock_bh(spinlock_t *lock)
  366. {
  367.         return raw_spin_trylock_bh(&lock->rlock);
  368. }
  369.  
  370. static __always_inline int spin_trylock_irq(spinlock_t *lock)
  371. {
  372.         return raw_spin_trylock_irq(&lock->rlock);
  373. }
  374.  
  375. #define spin_trylock_irqsave(lock, flags)                       \
  376. ({                                                              \
  377.         raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
  378. })
  379.  
  380. static __always_inline void spin_unlock_wait(spinlock_t *lock)
  381. {
  382.         raw_spin_unlock_wait(&lock->rlock);
  383. }
  384.  
  385. static __always_inline int spin_is_locked(spinlock_t *lock)
  386. {
  387.         return raw_spin_is_locked(&lock->rlock);
  388. }
  389.  
  390. static __always_inline int spin_is_contended(spinlock_t *lock)
  391. {
  392.         return raw_spin_is_contended(&lock->rlock);
  393. }
  394.  
  395. static __always_inline int spin_can_lock(spinlock_t *lock)
  396. {
  397.         return raw_spin_can_lock(&lock->rlock);
  398. }
  399.  
  400. #define assert_spin_locked(lock)        assert_raw_spin_locked(&(lock)->rlock)
  401.  
  402. /*
  403.  * Pull the atomic_t declaration:
  404.  * (asm-mips/atomic.h needs above definitions)
  405.  */
  406. #include <linux/atomic.h>
  407. /**
  408.  * atomic_dec_and_lock - lock on reaching reference count zero
  409.  * @atomic: the atomic counter
  410.  * @lock: the spinlock in question
  411.  *
  412.  * Decrements @atomic by 1.  If the result is 0, returns true and locks
  413.  * @lock.  Returns false for all other cases.
  414.  */
  415. extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  416. #define atomic_dec_and_lock(atomic, lock) \
  417.                 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  418.  
  419. #endif /* __LINUX_SPINLOCK_H */
  420.