5,6 → 5,8 |
# error "please don't include this file directly" |
#endif |
|
#include <asm/processor.h> /* for cpu_relax() */ |
|
/* |
* include/linux/spinlock_up.h - UP-debug version of spinlocks. |
* |
21,31 → 23,35 |
*/ |
|
#ifdef CONFIG_DEBUG_SPINLOCK |
#define __raw_spin_is_locked(x) ((x)->slock == 0) |
#define arch_spin_is_locked(x) ((x)->slock == 0) |
|
static inline void __raw_spin_lock(raw_spinlock_t *lock) |
static inline void arch_spin_lock(arch_spinlock_t *lock) |
{ |
lock->slock = 0; |
barrier(); |
} |
|
static inline void |
__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
{ |
local_irq_save(flags); |
lock->slock = 0; |
barrier(); |
} |
|
static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
static inline int arch_spin_trylock(arch_spinlock_t *lock) |
{ |
char oldval = lock->slock; |
|
lock->slock = 0; |
barrier(); |
|
return oldval > 0; |
} |
|
static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
static inline void arch_spin_unlock(arch_spinlock_t *lock) |
{ |
barrier(); |
lock->slock = 1; |
} |
|
52,28 → 58,28 |
/* |
* Read-write spinlocks. No debug version. |
*/ |
#define __raw_read_lock(lock) do { (void)(lock); } while (0) |
#define __raw_write_lock(lock) do { (void)(lock); } while (0) |
#define __raw_read_trylock(lock) ({ (void)(lock); 1; }) |
#define __raw_write_trylock(lock) ({ (void)(lock); 1; }) |
#define __raw_read_unlock(lock) do { (void)(lock); } while (0) |
#define __raw_write_unlock(lock) do { (void)(lock); } while (0) |
#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) |
#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) |
#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) |
#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) |
|
#else /* DEBUG_SPINLOCK */ |
#define __raw_spin_is_locked(lock) ((void)(lock), 0) |
/* for sched.c and kernel_lock.c: */ |
# define __raw_spin_lock(lock) do { (void)(lock); } while (0) |
# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) |
# define __raw_spin_unlock(lock) do { (void)(lock); } while (0) |
# define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) |
#define arch_spin_is_locked(lock) ((void)(lock), 0) |
/* for sched/core.c and kernel_lock.c: */ |
# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) |
# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) |
# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) |
# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
#endif /* DEBUG_SPINLOCK */ |
|
#define __raw_spin_is_contended(lock) (((void)(lock), 0)) |
#define arch_spin_is_contended(lock) (((void)(lock), 0)) |
|
#define __raw_read_can_lock(lock) (((void)(lock), 1)) |
#define __raw_write_can_lock(lock) (((void)(lock), 1)) |
#define arch_read_can_lock(lock) (((void)(lock), 1)) |
#define arch_write_can_lock(lock) (((void)(lock), 1)) |
|
#define __raw_spin_unlock_wait(lock) \ |
do { cpu_relax(); } while (__raw_spin_is_locked(lock)) |
#define arch_spin_unlock_wait(lock) \ |
do { cpu_relax(); } while (arch_spin_is_locked(lock)) |
|
#endif /* __LINUX_SPINLOCK_UP_H */ |