Subversion Repositories Kolibri OS

Rev

Rev 5270 | Rev 6082 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. #ifndef __LINUX_SPINLOCK_H
  2. #define __LINUX_SPINLOCK_H
  3.  
  4. /*
  5.  * include/linux/spinlock.h - generic spinlock/rwlock declarations
  6.  *
  7.  * here's the role of the various spinlock/rwlock related include files:
  8.  *
  9.  * on SMP builds:
  10.  *
  11.  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
  12.  *                        initializers
  13.  *
  14.  *  linux/spinlock_types.h:
  15.  *                        defines the generic type and initializers
  16.  *
  17.  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
  18.  *                        implementations, mostly inline assembly code
  19.  *
  20.  *   (also included on UP-debug builds:)
  21.  *
  22.  *  linux/spinlock_api_smp.h:
  23.  *                        contains the prototypes for the _spin_*() APIs.
  24.  *
  25.  *  linux/spinlock.h:     builds the final spin_*() APIs.
  26.  *
  27.  * on UP builds:
  28.  *
  29.  *  linux/spinlock_type_up.h:
  30.  *                        contains the generic, simplified UP spinlock type.
  31.  *                        (which is an empty structure on non-debug builds)
  32.  *
  33.  *  linux/spinlock_types.h:
  34.  *                        defines the generic type and initializers
  35.  *
  36.  *  linux/spinlock_up.h:
  37.  *                        contains the arch_spin_*()/etc. version of UP
  38.  *                        builds. (which are NOPs on non-debug, non-preempt
  39.  *                        builds)
  40.  *
  41.  *   (included on UP-non-debug builds:)
  42.  *
  43.  *  linux/spinlock_api_up.h:
  44.  *                        builds the _spin_*() APIs.
  45.  *
  46.  *  linux/spinlock.h:     builds the final spin_*() APIs.
  47.  */
  48.  
  49. #include <linux/typecheck.h>
  50. #include <linux/preempt.h>
  51. #include <linux/linkage.h>
  52. #include <linux/compiler.h>
  53. #include <linux/irqflags.h>
  54. #include <linux/kernel.h>
  55. #include <linux/stringify.h>
  56. #include <linux/bottom_half.h>
  57. #include <asm/barrier.h>
  58.  
  59.  
  60. /*
  61.  * Must define these before including other files, inline functions need them
  62.  */
  63. #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
  64.  
  65. #define LOCK_SECTION_START(extra)               \
  66.         ".subsection 1\n\t"                     \
  67.         extra                                   \
  68.         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
  69.         LOCK_SECTION_NAME ":\n\t"               \
  70.         ".endif\n"
  71.  
  72. #define LOCK_SECTION_END                        \
  73.         ".previous\n\t"
  74.  
  75. #define __lockfunc __attribute__((section(".spinlock.text")))
  76.  
  77. /*
  78.  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
  79.  */
  80. #include <linux/spinlock_types.h>
  81.  
  82. /*
  83.  * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
  84.  */
  85. #ifdef CONFIG_SMP
  86. # include <asm/spinlock.h>
  87. #else
  88. # include <linux/spinlock_up.h>
  89. #endif
  90.  
  91. #ifdef CONFIG_DEBUG_SPINLOCK
  92.   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
  93.                                struct lock_class_key *key);
  94. # define raw_spin_lock_init(lock)                               \
  95. do {                                                            \
  96.         static struct lock_class_key __key;                     \
  97.                                                                 \
  98.         __raw_spin_lock_init((lock), #lock, &__key);            \
  99. } while (0)
  100.  
  101. #else
  102. # define raw_spin_lock_init(lock)                               \
  103.         do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
  104. #endif
  105.  
  106. #define raw_spin_is_locked(lock)        arch_spin_is_locked(&(lock)->raw_lock)
  107.  
  108. #ifdef CONFIG_GENERIC_LOCKBREAK
  109. #define raw_spin_is_contended(lock) ((lock)->break_lock)
  110. #else
  111.  
  112. #ifdef arch_spin_is_contended
  113. #define raw_spin_is_contended(lock)     arch_spin_is_contended(&(lock)->raw_lock)
  114. #else
  115. #define raw_spin_is_contended(lock)     (((void)(lock), 0))
  116. #endif /*arch_spin_is_contended*/
  117. #endif
  118.  
  119. /*
  120.  * Despite its name it doesn't necessarily has to be a full barrier.
  121.  * It should only guarantee that a STORE before the critical section
  122.  * can not be reordered with a LOAD inside this section.
  123.  * spin_lock() is the one-way barrier, this LOAD can not escape out
  124.  * of the region. So the default implementation simply ensures that
  125.  * a STORE can not move into the critical section, smp_wmb() should
  126.  * serialize it with another STORE done by spin_lock().
  127.  */
  128. #ifndef smp_mb__before_spinlock
  129. #define smp_mb__before_spinlock()       smp_wmb()
  130. #endif
  131.  
  132. /*
  133.  * Place this after a lock-acquisition primitive to guarantee that
  134.  * an UNLOCK+LOCK pair act as a full barrier.  This guarantee applies
  135.  * if the UNLOCK and LOCK are executed by the same CPU or if the
  136.  * UNLOCK and LOCK operate on the same lock variable.
  137.  */
  138. #ifndef smp_mb__after_unlock_lock
  139. #define smp_mb__after_unlock_lock()     do { } while (0)
  140. #endif
  141.  
  142. /**
  143.  * raw_spin_unlock_wait - wait until the spinlock gets unlocked
  144.  * @lock: the spinlock in question.
  145.  */
  146. #define raw_spin_unlock_wait(lock)      arch_spin_unlock_wait(&(lock)->raw_lock)
  147.  
  148. #ifdef CONFIG_DEBUG_SPINLOCK
  149.  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
  150. #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
  151.  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
  152.  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
  153. #else
  154. static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
  155. {
  156.         __acquire(lock);
  157.         arch_spin_lock(&lock->raw_lock);
  158. }
  159.  
  160. static inline void
  161. do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
  162. {
  163.         __acquire(lock);
  164.         arch_spin_lock_flags(&lock->raw_lock, *flags);
  165. }
  166.  
  167. static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
  168. {
  169.         return arch_spin_trylock(&(lock)->raw_lock);
  170. }
  171.  
  172. static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
  173. {
  174.         arch_spin_unlock(&lock->raw_lock);
  175.         __release(lock);
  176. }
  177. #endif
  178.  
  179. /*
  180.  * Define the various spin_lock methods.  Note we define these
  181.  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
  182.  * various methods are defined as nops in the case they are not
  183.  * required.
  184.  */
  185. #define raw_spin_trylock(lock)  __cond_lock(lock, _raw_spin_trylock(lock))
  186.  
  187. #define raw_spin_lock(lock)     _raw_spin_lock(lock)
  188.  
  189. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  190. # define raw_spin_lock_nested(lock, subclass) \
  191.         _raw_spin_lock_nested(lock, subclass)
  192.  
  193. # define raw_spin_lock_nest_lock(lock, nest_lock)                       \
  194.          do {                                                           \
  195.                  typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
  196.                  _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
  197.          } while (0)
  198. #else
  199. /*
  200.  * Always evaluate the 'subclass' argument to avoid that the compiler
  201.  * warns about set-but-not-used variables when building with
  202.  * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
  203.  */
  204. # define raw_spin_lock_nested(lock, subclass)           \
  205.         _raw_spin_lock(((void)(subclass), (lock)))
  206. # define raw_spin_lock_nest_lock(lock, nest_lock)       _raw_spin_lock(lock)
  207. #endif
  208.  
  209. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  210.  
  211. #define raw_spin_lock_irqsave(lock, flags)                      \
  212.         do {                                            \
  213.                 typecheck(unsigned long, flags);        \
  214.                 flags = _raw_spin_lock_irqsave(lock);   \
  215.         } while (0)
  216.  
  217. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  218. #define raw_spin_lock_irqsave_nested(lock, flags, subclass)             \
  219.         do {                                                            \
  220.                 typecheck(unsigned long, flags);                        \
  221.                 flags = _raw_spin_lock_irqsave_nested(lock, subclass);  \
  222.         } while (0)
  223. #else
  224. #define raw_spin_lock_irqsave_nested(lock, flags, subclass)             \
  225.         do {                                                            \
  226.                 typecheck(unsigned long, flags);                        \
  227.                 flags = _raw_spin_lock_irqsave(lock);                   \
  228.         } while (0)
  229. #endif
  230.  
  231. #else
  232.  
  233. #define raw_spin_lock_irqsave(lock, flags)              \
  234.         do {                                            \
  235.                 typecheck(unsigned long, flags);        \
  236.                 _raw_spin_lock_irqsave(lock, flags);    \
  237.         } while (0)
  238.  
  239. #define raw_spin_lock_irqsave_nested(lock, flags, subclass)     \
  240.         raw_spin_lock_irqsave(lock, flags)
  241.  
  242. #endif
  243.  
  244. #define raw_spin_lock_irq(lock)         _raw_spin_lock_irq(lock)
  245. #define raw_spin_lock_bh(lock)          _raw_spin_lock_bh(lock)
  246. #define raw_spin_unlock(lock)           _raw_spin_unlock(lock)
  247. #define raw_spin_unlock_irq(lock)       _raw_spin_unlock_irq(lock)
  248.  
  249. #define raw_spin_unlock_irqrestore(lock, flags)         \
  250.         do {                                            \
  251.                 typecheck(unsigned long, flags);        \
  252.                 _raw_spin_unlock_irqrestore(lock, flags);       \
  253.         } while (0)
  254. #define raw_spin_unlock_bh(lock)        _raw_spin_unlock_bh(lock)
  255.  
  256. #define raw_spin_trylock_bh(lock) \
  257.         __cond_lock(lock, _raw_spin_trylock_bh(lock))
  258.  
  259. #define raw_spin_trylock_irq(lock) \
  260. ({ \
  261.         local_irq_disable(); \
  262.         raw_spin_trylock(lock) ? \
  263.         1 : ({ local_irq_enable(); 0;  }); \
  264. })
  265.  
  266. #define raw_spin_trylock_irqsave(lock, flags) \
  267. ({ \
  268.         local_irq_save(flags); \
  269.         raw_spin_trylock(lock) ? \
  270.         1 : ({ local_irq_restore(flags); 0; }); \
  271. })
  272.  
  273. /**
  274.  * raw_spin_can_lock - would raw_spin_trylock() succeed?
  275.  * @lock: the spinlock in question.
  276.  */
  277. #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
  278.  
  279. /* Include rwlock functions */
  280. #include <linux/rwlock.h>
  281.  
  282. /*
  283.  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  284.  */
  285. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  286. # include <linux/spinlock_api_smp.h>
  287. #else
  288. # include <linux/spinlock_api_up.h>
  289. #endif
  290.  
  291. /*
  292.  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  293.  */
  294.  
  295. static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
  296. {
  297.         return &lock->rlock;
  298. }
  299.  
  300. #define spin_lock_init(_lock)                           \
  301. do {                                                    \
  302.         spinlock_check(_lock);                          \
  303.         raw_spin_lock_init(&(_lock)->rlock);            \
  304. } while (0)
  305.  
  306. static inline void spin_lock(spinlock_t *lock)
  307. {
  308.         raw_spin_lock(&lock->rlock);
  309. }
  310.  
  311. static inline void spin_lock_bh(spinlock_t *lock)
  312. {
  313.         raw_spin_lock_bh(&lock->rlock);
  314. }
  315.  
  316. static inline int spin_trylock(spinlock_t *lock)
  317. {
  318.         return raw_spin_trylock(&lock->rlock);
  319. }
  320.  
  321. #define spin_lock_nested(lock, subclass)                        \
  322. do {                                                            \
  323.         raw_spin_lock_nested(spinlock_check(lock), subclass);   \
  324. } while (0)
  325.  
  326. #define spin_lock_nest_lock(lock, nest_lock)                            \
  327. do {                                                                    \
  328.         raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);       \
  329. } while (0)
  330.  
  331. static inline void spin_lock_irq(spinlock_t *lock)
  332. {
  333.         raw_spin_lock_irq(&lock->rlock);
  334. }
  335.  
  336. #define spin_lock_irqsave(lock, flags)                          \
  337. do {                                                            \
  338.         raw_spin_lock_irqsave(spinlock_check(lock), flags);     \
  339. } while (0)
  340.  
  341. #define spin_lock_irqsave_nested(lock, flags, subclass)                 \
  342. do {                                                                    \
  343.         raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
  344. } while (0)
  345.  
  346. static inline void spin_unlock(spinlock_t *lock)
  347. {
  348.         raw_spin_unlock(&lock->rlock);
  349. }
  350.  
  351. static inline void spin_unlock_bh(spinlock_t *lock)
  352. {
  353.         raw_spin_unlock_bh(&lock->rlock);
  354. }
  355.  
  356. static inline void spin_unlock_irq(spinlock_t *lock)
  357. {
  358.         raw_spin_unlock_irq(&lock->rlock);
  359. }
  360.  
  361. static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  362. {
  363.         raw_spin_unlock_irqrestore(&lock->rlock, flags);
  364. }
  365.  
  366. static inline int spin_trylock_bh(spinlock_t *lock)
  367. {
  368.         return raw_spin_trylock_bh(&lock->rlock);
  369. }
  370.  
  371. static inline int spin_trylock_irq(spinlock_t *lock)
  372. {
  373.         return raw_spin_trylock_irq(&lock->rlock);
  374. }
  375.  
  376. #define spin_trylock_irqsave(lock, flags)                       \
  377. ({ \
  378.         raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
  379. })
  380.  
  381. static inline void spin_unlock_wait(spinlock_t *lock)
  382. {
  383.         raw_spin_unlock_wait(&lock->rlock);
  384. }
  385.  
  386. static inline int spin_is_locked(spinlock_t *lock)
  387. {
  388.         return raw_spin_is_locked(&lock->rlock);
  389. }
  390.  
  391. static inline int spin_is_contended(spinlock_t *lock)
  392. {
  393.         return raw_spin_is_contended(&lock->rlock);
  394. }
  395.  
  396. static inline int spin_can_lock(spinlock_t *lock)
  397. {
  398.         return raw_spin_can_lock(&lock->rlock);
  399. }
  400.  
  401. #define assert_spin_locked(lock)        assert_raw_spin_locked(&(lock)->rlock)
  402.  
  403. /*
  404.  * Pull the atomic_t declaration:
  405.  * (asm-mips/atomic.h needs above definitions)
  406.  */
  407. #include <linux/atomic.h>
  408. /**
  409.  * atomic_dec_and_lock - lock on reaching reference count zero
  410.  * @atomic: the atomic counter
  411.  * @lock: the spinlock in question
  412.  *
  413.  * Decrements @atomic by 1.  If the result is 0, returns true and locks
  414.  * @lock.  Returns false for all other cases.
  415.  */
  416. extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
  417. #define atomic_dec_and_lock(atomic, lock) \
  418.                 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
  419.  
  420. #endif /* __LINUX_SPINLOCK_H */
  421.