Subversion Repositories Kolibri OS

Rev

Rev 5272 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5272 Rev 6082
1
#ifndef __ASM_PREEMPT_H
1
#ifndef __ASM_PREEMPT_H
2
#define __ASM_PREEMPT_H
2
#define __ASM_PREEMPT_H
3
 
3
 
4
#include 
4
#include 
5
#include 
5
#include 
6
//#include 
6
//#include 
7
 
7
 
8
DECLARE_PER_CPU(int, __preempt_count);
8
DECLARE_PER_CPU(int, __preempt_count);
9
 
9
 
10
/*
10
/*
11
 * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
11
 * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
12
 * that a decrement hitting 0 means we can and should reschedule.
12
 * that a decrement hitting 0 means we can and should reschedule.
13
 */
13
 */
14
#define PREEMPT_ENABLED	(0 + PREEMPT_NEED_RESCHED)
14
#define PREEMPT_ENABLED	(0 + PREEMPT_NEED_RESCHED)
15
 
15
 
16
/*
16
/*
17
 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
17
 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
18
 * that think a non-zero value indicates we cannot preempt.
18
 * that think a non-zero value indicates we cannot preempt.
19
 */
19
 */
20
static __always_inline int preempt_count(void)
20
static __always_inline int preempt_count(void)
21
{
21
{
22
	return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
22
	return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
23
}
23
}
24
 
24
 
25
static __always_inline void preempt_count_set(int pc)
25
static __always_inline void preempt_count_set(int pc)
26
{
26
{
27
	raw_cpu_write_4(__preempt_count, pc);
27
	raw_cpu_write_4(__preempt_count, pc);
28
}
28
}
29
 
29
 
30
/*
30
/*
31
 * must be macros to avoid header recursion hell
31
 * must be macros to avoid header recursion hell
32
 */
32
 */
33
#define init_task_preempt_count(p) do { \
33
#define init_task_preempt_count(p) do { } while (0)
34
	task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \
-
 
35
} while (0)
-
 
36
 
34
 
37
#define init_idle_preempt_count(p, cpu) do { \
-
 
38
	task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \
35
#define init_idle_preempt_count(p, cpu) do { \
39
	per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
36
	per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
40
} while (0)
37
} while (0)
41
 
38
 
42
/*
39
/*
43
 * We fold the NEED_RESCHED bit into the preempt count such that
40
 * We fold the NEED_RESCHED bit into the preempt count such that
44
 * preempt_enable() can decrement and test for needing to reschedule with a
41
 * preempt_enable() can decrement and test for needing to reschedule with a
45
 * single instruction.
42
 * single instruction.
46
 *
43
 *
47
 * We invert the actual bit, so that when the decrement hits 0 we know we both
44
 * We invert the actual bit, so that when the decrement hits 0 we know we both
48
 * need to resched (the bit is cleared) and can resched (no preempt count).
45
 * need to resched (the bit is cleared) and can resched (no preempt count).
49
 */
46
 */
50
 
47
 
51
static __always_inline void set_preempt_need_resched(void)
48
static __always_inline void set_preempt_need_resched(void)
52
{
49
{
53
	raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
50
	raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
54
}
51
}
55
 
52
 
56
static __always_inline void clear_preempt_need_resched(void)
53
static __always_inline void clear_preempt_need_resched(void)
57
{
54
{
58
	raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
55
	raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
59
}
56
}
60
 
57
 
61
static __always_inline bool test_preempt_need_resched(void)
58
static __always_inline bool test_preempt_need_resched(void)
62
{
59
{
63
	return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
60
	return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
64
}
61
}
65
 
62
 
66
/*
63
/*
67
 * The various preempt_count add/sub methods
64
 * The various preempt_count add/sub methods
68
 */
65
 */
69
 
66
 
70
static __always_inline void __preempt_count_add(int val)
67
static __always_inline void __preempt_count_add(int val)
71
{
68
{
72
	raw_cpu_add_4(__preempt_count, val);
69
	raw_cpu_add_4(__preempt_count, val);
73
}
70
}
74
 
71
 
75
static __always_inline void __preempt_count_sub(int val)
72
static __always_inline void __preempt_count_sub(int val)
76
{
73
{
77
	raw_cpu_add_4(__preempt_count, -val);
74
	raw_cpu_add_4(__preempt_count, -val);
78
}
75
}
79
 
76
 
80
/*
77
/*
81
 * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
78
 * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
82
 * a decrement which hits zero means we have no preempt_count and should
79
 * a decrement which hits zero means we have no preempt_count and should
83
 * reschedule.
80
 * reschedule.
84
 */
81
 */
85
static __always_inline bool __preempt_count_dec_and_test(void)
82
static __always_inline bool __preempt_count_dec_and_test(void)
86
{
83
{
87
	GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
84
	GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
88
}
85
}
89
 
86
 
90
/*
87
/*
91
 * Returns true when we need to resched and can (barring IRQ state).
88
 * Returns true when we need to resched and can (barring IRQ state).
92
 */
89
 */
93
static __always_inline bool should_resched(void)
90
static __always_inline bool should_resched(int preempt_offset)
94
{
91
{
95
	return unlikely(!raw_cpu_read_4(__preempt_count));
92
	return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
96
}
93
}
97
 
94
 
98
#ifdef CONFIG_PREEMPT
95
#ifdef CONFIG_PREEMPT
99
  extern asmlinkage void ___preempt_schedule(void);
96
  extern asmlinkage void ___preempt_schedule(void);
100
# define __preempt_schedule() asm ("call ___preempt_schedule")
97
# define __preempt_schedule() asm ("call ___preempt_schedule")
101
  extern asmlinkage void preempt_schedule(void);
98
  extern asmlinkage void preempt_schedule(void);
102
# ifdef CONFIG_CONTEXT_TRACKING
-
 
103
    extern asmlinkage void ___preempt_schedule_context(void);
99
  extern asmlinkage void ___preempt_schedule_notrace(void);
104
#   define __preempt_schedule_context() asm ("call ___preempt_schedule_context")
100
# define __preempt_schedule_notrace() asm ("call ___preempt_schedule_notrace")
105
    extern asmlinkage void preempt_schedule_context(void);
101
  extern asmlinkage void preempt_schedule_notrace(void);
106
# endif
-
 
107
#endif
102
#endif
108
 
103
 
109
#endif /* __ASM_PREEMPT_H */
104
#endif /* __ASM_PREEMPT_H */