Subversion Repositories Kolibri OS

Rev

Rev 4065 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1408 serge 1
#ifndef __LINUX_SPINLOCK_UP_H
2
#define __LINUX_SPINLOCK_UP_H
3
 
4
#ifndef __LINUX_SPINLOCK_H
5
# error "please don't include this file directly"
6
#endif
7
 
5272 serge 8
#include 	/* for cpu_relax() */
9
 
1408 serge 10
/*
11
 * include/linux/spinlock_up.h - UP-debug version of spinlocks.
12
 *
13
 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
14
 * Released under the General Public License (GPL).
15
 *
16
 * In the debug case, 1 means unlocked, 0 means locked. (the values
17
 * are inverted, to catch initialization bugs)
18
 *
4065 Serge 19
 * No atomicity anywhere, we are on UP. However, we still need
20
 * the compiler barriers, because we do not want the compiler to
21
 * move potentially faulting instructions (notably user accesses)
22
 * into the locked sequence, resulting in non-atomic execution.
1408 serge 23
 */
24
 
25
#ifdef CONFIG_DEBUG_SPINLOCK
5272 serge 26
#define arch_spin_is_locked(x)		((x)->slock == 0)
1408 serge 27
 
5272 serge 28
static inline void arch_spin_lock(arch_spinlock_t *lock)
1408 serge 29
{
30
	lock->slock = 0;
5272 serge 31
	barrier();
1408 serge 32
}
33
 
34
static inline void
5272 serge 35
arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
1408 serge 36
{
37
	local_irq_save(flags);
38
	lock->slock = 0;
5272 serge 39
	barrier();
1408 serge 40
}
41
 
5272 serge 42
static inline int arch_spin_trylock(arch_spinlock_t *lock)
1408 serge 43
{
44
	char oldval = lock->slock;
45
 
46
	lock->slock = 0;
5272 serge 47
	barrier();
1408 serge 48
 
49
	return oldval > 0;
50
}
51
 
5272 serge 52
static inline void arch_spin_unlock(arch_spinlock_t *lock)
1408 serge 53
{
5272 serge 54
	barrier();
1408 serge 55
	lock->slock = 1;
56
}
57
 
58
/*
59
 * Read-write spinlocks. No debug version.
60
 */
5272 serge 61
#define arch_read_lock(lock)		do { barrier(); (void)(lock); } while (0)
62
#define arch_write_lock(lock)		do { barrier(); (void)(lock); } while (0)
63
#define arch_read_trylock(lock)	({ barrier(); (void)(lock); 1; })
64
#define arch_write_trylock(lock)	({ barrier(); (void)(lock); 1; })
65
#define arch_read_unlock(lock)		do { barrier(); (void)(lock); } while (0)
66
#define arch_write_unlock(lock)	do { barrier(); (void)(lock); } while (0)
1408 serge 67
 
68
#else /* DEBUG_SPINLOCK */
5272 serge 69
#define arch_spin_is_locked(lock)	((void)(lock), 0)
70
/* for sched/core.c and kernel_lock.c: */
71
# define arch_spin_lock(lock)		do { barrier(); (void)(lock); } while (0)
72
# define arch_spin_lock_flags(lock, flags)	do { barrier(); (void)(lock); } while (0)
73
# define arch_spin_unlock(lock)	do { barrier(); (void)(lock); } while (0)
74
# define arch_spin_trylock(lock)	({ barrier(); (void)(lock); 1; })
1408 serge 75
#endif /* DEBUG_SPINLOCK */
76
 
5272 serge 77
#define arch_spin_is_contended(lock)	(((void)(lock), 0))
1408 serge 78
 
5272 serge 79
#define arch_read_can_lock(lock)	(((void)(lock), 1))
80
#define arch_write_can_lock(lock)	(((void)(lock), 1))
1408 serge 81
 
5272 serge 82
#define arch_spin_unlock_wait(lock) \
83
		do { cpu_relax(); } while (arch_spin_is_locked(lock))
1408 serge 84
 
85
#endif /* __LINUX_SPINLOCK_UP_H */