Rev 6082 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
5272 | serge | 1 | #ifndef __ASM_PREEMPT_H |
2 | #define __ASM_PREEMPT_H |
||
3 | |||
4 | #include |
||
5 | #include |
||
6 | //#include |
||
7 | |||
8 | DECLARE_PER_CPU(int, __preempt_count); |
||
9 | |||
10 | /* |
||
11 | * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such |
||
12 | * that a decrement hitting 0 means we can and should reschedule. |
||
13 | */ |
||
14 | #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) |
||
15 | |||
16 | /* |
||
17 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users |
||
18 | * that think a non-zero value indicates we cannot preempt. |
||
19 | */ |
||
20 | static __always_inline int preempt_count(void) |
||
21 | { |
||
22 | return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED; |
||
23 | } |
||
24 | |||
25 | static __always_inline void preempt_count_set(int pc) |
||
26 | { |
||
27 | raw_cpu_write_4(__preempt_count, pc); |
||
28 | } |
||
29 | |||
30 | /* |
||
31 | * must be macros to avoid header recursion hell |
||
32 | */ |
||
6082 | serge | 33 | #define init_task_preempt_count(p) do { } while (0) |
5272 | serge | 34 | |
35 | #define init_idle_preempt_count(p, cpu) do { \ |
||
36 | per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ |
||
37 | } while (0) |
||
38 | |||
39 | /* |
||
40 | * We fold the NEED_RESCHED bit into the preempt count such that |
||
41 | * preempt_enable() can decrement and test for needing to reschedule with a |
||
42 | * single instruction. |
||
43 | * |
||
44 | * We invert the actual bit, so that when the decrement hits 0 we know we both |
||
45 | * need to resched (the bit is cleared) and can resched (no preempt count). |
||
46 | */ |
||
47 | |||
48 | static __always_inline void set_preempt_need_resched(void) |
||
49 | { |
||
50 | raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED); |
||
51 | } |
||
52 | |||
53 | static __always_inline void clear_preempt_need_resched(void) |
||
54 | { |
||
55 | raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED); |
||
56 | } |
||
57 | |||
58 | static __always_inline bool test_preempt_need_resched(void) |
||
59 | { |
||
60 | return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED); |
||
61 | } |
||
62 | |||
63 | /* |
||
64 | * The various preempt_count add/sub methods |
||
65 | */ |
||
66 | |||
67 | static __always_inline void __preempt_count_add(int val) |
||
68 | { |
||
69 | raw_cpu_add_4(__preempt_count, val); |
||
70 | } |
||
71 | |||
72 | static __always_inline void __preempt_count_sub(int val) |
||
73 | { |
||
74 | raw_cpu_add_4(__preempt_count, -val); |
||
75 | } |
||
76 | |||
77 | /* |
||
78 | * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule |
||
79 | * a decrement which hits zero means we have no preempt_count and should |
||
80 | * reschedule. |
||
81 | */ |
||
82 | static __always_inline bool __preempt_count_dec_and_test(void) |
||
83 | { |
||
84 | GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); |
||
85 | } |
||
86 | |||
87 | /* |
||
88 | * Returns true when we need to resched and can (barring IRQ state). |
||
89 | */ |
||
6082 | serge | 90 | static __always_inline bool should_resched(int preempt_offset) |
5272 | serge | 91 | { |
6082 | serge | 92 | return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); |
5272 | serge | 93 | } |
94 | |||
95 | #ifdef CONFIG_PREEMPT |
||
96 | extern asmlinkage void ___preempt_schedule(void); |
||
97 | # define __preempt_schedule() asm ("call ___preempt_schedule") |
||
98 | extern asmlinkage void preempt_schedule(void); |
||
6082 | serge | 99 | extern asmlinkage void ___preempt_schedule_notrace(void); |
100 | # define __preempt_schedule_notrace() asm ("call ___preempt_schedule_notrace") |
||
101 | extern asmlinkage void preempt_schedule_notrace(void); |
||
5272 | serge | 102 | #endif |
103 | |||
104 | #endif /* __ASM_PREEMPT_H */ |