Subversion Repositories Kolibri OS

Rev

Rev 4065 | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4065 Rev 5272
Line 3... Line 3...
3
 
3
 
4
#ifndef __LINUX_SPINLOCK_H
4
#ifndef __LINUX_SPINLOCK_H
5
# error "please don't include this file directly"
5
# error "please don't include this file directly"
Line -... Line 6...
-
 
6
#endif
-
 
7
 
6
#endif
8
#include 	/* for cpu_relax() */
7
 
9
 
8
/*
10
/*
9
 * include/linux/spinlock_up.h - UP-debug version of spinlocks.
11
 * include/linux/spinlock_up.h - UP-debug version of spinlocks.
10
 *
12
 *
Line 19... Line 21...
19
 * move potentially faulting instructions (notably user accesses)
21
 * move potentially faulting instructions (notably user accesses)
20
 * into the locked sequence, resulting in non-atomic execution.
22
 * into the locked sequence, resulting in non-atomic execution.
21
 */
23
 */
Line 22... Line 24...
22
 
24
 
23
#ifdef CONFIG_DEBUG_SPINLOCK
25
#ifdef CONFIG_DEBUG_SPINLOCK
Line 24... Line 26...
24
#define __raw_spin_is_locked(x)		((x)->slock == 0)
26
#define arch_spin_is_locked(x)		((x)->slock == 0)
25
 
27
 
26
static inline void __raw_spin_lock(raw_spinlock_t *lock)
28
static inline void arch_spin_lock(arch_spinlock_t *lock)
-
 
29
{
27
{
30
	lock->slock = 0;
Line 28... Line 31...
28
	lock->slock = 0;
31
	barrier();
29
}
32
}
30
 
33
 
31
static inline void
34
static inline void
32
__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
35
arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
-
 
36
{
33
{
37
	local_irq_save(flags);
Line 34... Line 38...
34
	local_irq_save(flags);
38
	lock->slock = 0;
35
	lock->slock = 0;
39
	barrier();
36
}
40
}
Line 37... Line 41...
37
 
41
 
-
 
42
static inline int arch_spin_trylock(arch_spinlock_t *lock)
Line 38... Line 43...
38
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
43
{
39
{
44
	char oldval = lock->slock;
Line 40... Line 45...
40
	char oldval = lock->slock;
45
 
41
 
46
	lock->slock = 0;
-
 
47
	barrier();
42
	lock->slock = 0;
48
 
43
 
49
	return oldval > 0;
Line 44... Line 50...
44
	return oldval > 0;
50
}
45
}
51
 
46
 
52
static inline void arch_spin_unlock(arch_spinlock_t *lock)
47
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
53
{
48
{
54
	barrier();
49
	lock->slock = 1;
55
	lock->slock = 1;
50
}
56
}
51
 
57
 
52
/*
58
/*
Line 53... Line 59...
53
 * Read-write spinlocks. No debug version.
59
 * Read-write spinlocks. No debug version.
54
 */
60
 */
55
#define __raw_read_lock(lock)		do { (void)(lock); } while (0)
61
#define arch_read_lock(lock)		do { barrier(); (void)(lock); } while (0)
56
#define __raw_write_lock(lock)		do { (void)(lock); } while (0)
62
#define arch_write_lock(lock)		do { barrier(); (void)(lock); } while (0)
57
#define __raw_read_trylock(lock)	({ (void)(lock); 1; })
63
#define arch_read_trylock(lock)	({ barrier(); (void)(lock); 1; })
58
#define __raw_write_trylock(lock)	({ (void)(lock); 1; })
64
#define arch_write_trylock(lock)	({ barrier(); (void)(lock); 1; })
59
#define __raw_read_unlock(lock)		do { (void)(lock); } while (0)
65
#define arch_read_unlock(lock)		do { barrier(); (void)(lock); } while (0)
60
#define __raw_write_unlock(lock)	do { (void)(lock); } while (0)
66
#define arch_write_unlock(lock)	do { barrier(); (void)(lock); } while (0)
Line 61... Line 67...
61
 
67
 
Line 62... Line 68...
62
#else /* DEBUG_SPINLOCK */
68
#else /* DEBUG_SPINLOCK */
63
#define __raw_spin_is_locked(lock)	((void)(lock), 0)
69
#define arch_spin_is_locked(lock)	((void)(lock), 0)
Line 64... Line 70...
64
/* for sched.c and kernel_lock.c: */
70
/* for sched/core.c and kernel_lock.c: */
65
# define __raw_spin_lock(lock)		do { (void)(lock); } while (0)
71
# define arch_spin_lock(lock)		do { barrier(); (void)(lock); } while (0)
Line 66... Line 72...
66
# define __raw_spin_lock_flags(lock, flags)	do { (void)(lock); } while (0)
72
# define arch_spin_lock_flags(lock, flags)	do { barrier(); (void)(lock); } while (0)