Rev 5270 | Rev 6125 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 5270 | Rev 6102 | ||
---|---|---|---|
1 | #ifndef _LINUX_WAIT_H |
1 | #ifndef _LINUX_WAIT_H |
2 | #define _LINUX_WAIT_H |
2 | #define _LINUX_WAIT_H |
3 | /* |
3 | /* |
4 | * Linux wait queue related types and methods |
4 | * Linux wait queue related types and methods |
5 | */ |
5 | */ |
6 | #include |
6 | #include |
7 | #include |
7 | #include |
8 | #include |
8 | #include |
9 | #include |
9 | #include |
10 | - | ||
11 | - | ||
12 | - | ||
13 | #include |
10 | #include |
14 | 11 | ||
15 | typedef struct __wait_queue wait_queue_t; |
12 | typedef struct __wait_queue wait_queue_t; |
16 | typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); |
13 | typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); |
17 | int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); |
14 | int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); |
18 | 15 | ||
- | 16 | /* __wait_queue::flags */ |
|
- | 17 | #define WQ_FLAG_EXCLUSIVE 0x01 |
|
19 | typedef struct __wait_queue_head wait_queue_head_t; |
18 | #define WQ_FLAG_WOKEN 0x02 |
20 | 19 | ||
- | 20 | struct __wait_queue { |
|
21 | struct __wait_queue |
21 | unsigned int flags; |
22 | { |
22 | void *private; |
23 | wait_queue_func_t func; |
23 | wait_queue_func_t func; |
24 | struct list_head task_list; |
24 | struct list_head task_list; |
25 | evhandle_t evnt; |
25 | evhandle_t evnt; |
26 | }; |
26 | }; |
- | 27 | ||
- | 28 | struct wait_bit_key { |
|
- | 29 | void *flags; |
|
- | 30 | int bit_nr; |
|
- | 31 | #define WAIT_ATOMIC_T_BIT_NR -1 |
|
- | 32 | unsigned long timeout; |
|
- | 33 | }; |
|
27 | 34 | ||
- | 35 | struct wait_bit_queue { |
|
- | 36 | struct wait_bit_key key; |
|
- | 37 | wait_queue_t wait; |
|
28 | struct __wait_queue_head |
38 | }; |
- | 39 | ||
29 | { |
40 | struct __wait_queue_head { |
30 | spinlock_t lock; |
41 | spinlock_t lock; |
31 | struct list_head task_list; |
42 | struct list_head task_list; |
32 | }; |
43 | }; |
- | 44 | typedef struct __wait_queue_head wait_queue_head_t; |
|
- | 45 | ||
- | 46 | struct task_struct; |
|
- | 47 | ||
- | 48 | /* |
|
- | 49 | * Macros for declaration and initialisaton of the datatypes |
|
- | 50 | */ |
|
- | 51 | ||
- | 52 | #define __WAITQUEUE_INITIALIZER(name, tsk) { \ |
|
- | 53 | .private = tsk, \ |
|
- | 54 | .func = default_wake_function, \ |
|
- | 55 | .task_list = { NULL, NULL } } |
|
- | 56 | ||
- | 57 | #define DECLARE_WAITQUEUE(name, tsk) \ |
|
- | 58 | wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) |
|
- | 59 | ||
- | 60 | #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ |
|
- | 61 | .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ |
|
- | 62 | .task_list = { &(name).task_list, &(name).task_list } } |
|
- | 63 | ||
- | 64 | #define DECLARE_WAIT_QUEUE_HEAD(name) \ |
|
- | 65 | wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name) |
|
- | 66 | ||
- | 67 | #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ |
|
- | 68 | { .flags = word, .bit_nr = bit, } |
|
- | 69 | ||
- | 70 | #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \ |
|
- | 71 | { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, } |
|
- | 72 | ||
- | 73 | extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *); |
|
- | 74 | ||
- | 75 | #ifdef CONFIG_LOCKDEP |
|
- | 76 | # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ |
|
- | 77 | ({ init_waitqueue_head(&name); name; }) |
|
- | 78 | # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \ |
|
- | 79 | wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) |
|
- | 80 | #else |
|
- | 81 | # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) |
|
- | 82 | #endif |
|
- | 83 | ||
- | 84 | static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) |
|
- | 85 | { |
|
- | 86 | q->flags = 0; |
|
- | 87 | q->private = p; |
|
- | 88 | q->func = default_wake_function; |
|
- | 89 | } |
|
- | 90 | ||
- | 91 | static inline void |
|
- | 92 | init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func) |
|
- | 93 | { |
|
- | 94 | q->flags = 0; |
|
- | 95 | q->private = NULL; |
|
- | 96 | q->func = func; |
|
- | 97 | } |
|
- | 98 | ||
33 | static inline int waitqueue_active(wait_queue_head_t *q) |
99 | static inline int waitqueue_active(wait_queue_head_t *q) |
34 | { |
100 | { |
35 | return !list_empty(&q->task_list); |
101 | return !list_empty(&q->task_list); |
36 | } |
102 | } |
37 | 103 | ||
38 | extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); |
104 | extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); |
39 | extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); |
105 | extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); |
40 | extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); |
106 | extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); |
41 | 107 | ||
42 | static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) |
108 | static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) |
43 | { |
109 | { |
44 | list_add(&new->task_list, &head->task_list); |
110 | list_add(&new->task_list, &head->task_list); |
45 | } |
111 | } |
46 | 112 | ||
47 | /* |
113 | /* |
- | 114 | * Used for wake-one threads: |
|
- | 115 | */ |
|
- | 116 | static inline void |
|
- | 117 | __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) |
|
- | 118 | { |
|
- | 119 | wait->flags |= WQ_FLAG_EXCLUSIVE; |
|
- | 120 | __add_wait_queue(q, wait); |
|
- | 121 | } |
|
- | 122 | ||
- | 123 | static inline void __add_wait_queue_tail(wait_queue_head_t *head, |
|
- | 124 | wait_queue_t *new) |
|
- | 125 | { |
|
- | 126 | list_add_tail(&new->task_list, &head->task_list); |
|
- | 127 | } |
|
- | 128 | ||
- | 129 | static inline void |
|
- | 130 | __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait) |
|
- | 131 | { |
|
- | 132 | wait->flags |= WQ_FLAG_EXCLUSIVE; |
|
- | 133 | __add_wait_queue_tail(q, wait); |
|
- | 134 | } |
|
- | 135 | ||
- | 136 | static inline void |
|
- | 137 | __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) |
|
- | 138 | { |
|
- | 139 | list_del(&old->task_list); |
|
- | 140 | } |
|
- | 141 | ||
- | 142 | typedef int wait_bit_action_f(struct wait_bit_key *, int mode); |
|
- | 143 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
|
- | 144 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); |
|
- | 145 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
|
- | 146 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); |
|
- | 147 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); |
|
- | 148 | void __wake_up_bit(wait_queue_head_t *, void *, int); |
|
- | 149 | int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned); |
|
- | 150 | int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned); |
|
- | 151 | void wake_up_bit(void *, int); |
|
- | 152 | void wake_up_atomic_t(atomic_t *); |
|
- | 153 | int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned); |
|
- | 154 | int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long); |
|
- | 155 | int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned); |
|
- | 156 | int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned); |
|
- | 157 | wait_queue_head_t *bit_waitqueue(void *, int); |
|
- | 158 | ||
- | 159 | /* |
|
- | 160 | ||
48 | #define __wait_event(wq, condition) \ |
161 | #define __wait_event(wq, condition) \ |
49 | do { \ |
162 | do { \ |
50 | DEFINE_WAIT(__wait); \ |
163 | DEFINE_WAIT(__wait); \ |
51 | \ |
164 | \ |
52 | for (;;) { \ |
165 | for (;;) { \ |
53 | prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ |
166 | prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ |
54 | if (condition) \ |
167 | if (condition) \ |
55 | break; \ |
168 | break; \ |
56 | schedule(); \ |
169 | schedule(); \ |
57 | } \ |
170 | } \ |
58 | finish_wait(&wq, &__wait); \ |
171 | finish_wait(&wq, &__wait); \ |
59 | } while (0) |
172 | } while (0) |
60 | 173 | ||
61 | */ |
174 | */ |
62 | 175 | ||
63 | #define wait_event_timeout(wq, condition, timeout) \ |
176 | #define wait_event_timeout(wq, condition, timeout) \ |
64 | ({ \ |
177 | ({ \ |
65 | long __ret = timeout; \ |
178 | long __ret = timeout; \ |
66 | do{ \ |
179 | do{ \ |
67 | wait_queue_t __wait = { \ |
180 | wait_queue_t __wait = { \ |
68 | .task_list = LIST_HEAD_INIT(__wait.task_list), \ |
181 | .task_list = LIST_HEAD_INIT(__wait.task_list), \ |
69 | .evnt = CreateEvent(NULL, MANUAL_DESTROY), \ |
182 | .evnt = CreateEvent(NULL, MANUAL_DESTROY), \ |
70 | }; \ |
183 | }; \ |
71 | unsigned long flags; \ |
184 | unsigned long flags; \ |
72 | \ |
185 | \ |
73 | spin_lock_irqsave(&wq.lock, flags); \ |
186 | spin_lock_irqsave(&wq.lock, flags); \ |
74 | if (list_empty(&__wait.task_list)) \ |
187 | if (list_empty(&__wait.task_list)) \ |
75 | __add_wait_queue(&wq, &__wait); \ |
188 | __add_wait_queue(&wq, &__wait); \ |
76 | spin_unlock_irqrestore(&wq.lock, flags); \ |
189 | spin_unlock_irqrestore(&wq.lock, flags); \ |
77 | \ |
190 | \ |
78 | for(;;){ \ |
191 | for(;;){ \ |
79 | if (condition) \ |
192 | if (condition) \ |
80 | break; \ |
193 | break; \ |
81 | WaitEventTimeout(__wait.evnt, timeout); \ |
194 | WaitEventTimeout(__wait.evnt, timeout); \ |
82 | }; \ |
195 | }; \ |
83 | if (!list_empty(&__wait.task_list)) { \ |
196 | if (!list_empty(&__wait.task_list)) { \ |
84 | spin_lock_irqsave(&wq.lock, flags); \ |
197 | spin_lock_irqsave(&wq.lock, flags); \ |
85 | list_del_init(&__wait.task_list); \ |
198 | list_del_init(&__wait.task_list); \ |
86 | spin_unlock_irqrestore(&wq.lock, flags); \ |
199 | spin_unlock_irqrestore(&wq.lock, flags); \ |
87 | }; \ |
200 | }; \ |
88 | DestroyEvent(__wait.evnt); \ |
201 | DestroyEvent(__wait.evnt); \ |
89 | } while (0); \ |
202 | } while (0); \ |
90 | __ret; \ |
203 | __ret; \ |
91 | }) |
204 | }) |
92 | 205 | ||
93 | #define wait_event_interruptible_timeout(wq, condition, timeout) \ |
206 | #define wait_event_interruptible_timeout(wq, condition, timeout) \ |
94 | wait_event_timeout(wq, condition, timeout) |
207 | wait_event_timeout(wq, condition, timeout) |
95 | 208 | ||
96 | 209 | ||
97 | #define wait_event(wq, condition) \ |
210 | #define wait_event(wq, condition) \ |
98 | do{ \ |
211 | do{ \ |
99 | wait_queue_t __wait = { \ |
212 | wait_queue_t __wait = { \ |
100 | .task_list = LIST_HEAD_INIT(__wait.task_list), \ |
213 | .task_list = LIST_HEAD_INIT(__wait.task_list), \ |
101 | .evnt = CreateEvent(NULL, MANUAL_DESTROY), \ |
214 | .evnt = CreateEvent(NULL, MANUAL_DESTROY), \ |
102 | }; \ |
215 | }; \ |
103 | unsigned long flags; \ |
216 | unsigned long flags; \ |
104 | \ |
217 | \ |
105 | spin_lock_irqsave(&wq.lock, flags); \ |
218 | spin_lock_irqsave(&wq.lock, flags); \ |
106 | if (list_empty(&__wait.task_list)) \ |
219 | if (list_empty(&__wait.task_list)) \ |
107 | __add_wait_queue(&wq, &__wait); \ |
220 | __add_wait_queue(&wq, &__wait); \ |
108 | spin_unlock_irqrestore(&wq.lock, flags); \ |
221 | spin_unlock_irqrestore(&wq.lock, flags); \ |
109 | \ |
222 | \ |
110 | for(;;){ \ |
223 | for(;;){ \ |
111 | if (condition) \ |
224 | if (condition) \ |
112 | break; \ |
225 | break; \ |
113 | WaitEvent(__wait.evnt); \ |
226 | WaitEvent(__wait.evnt); \ |
114 | }; \ |
227 | }; \ |
115 | if (!list_empty_careful(&__wait.task_list)) { \ |
228 | if (!list_empty_careful(&__wait.task_list)) { \ |
116 | spin_lock_irqsave(&wq.lock, flags); \ |
229 | spin_lock_irqsave(&wq.lock, flags); \ |
117 | list_del_init(&__wait.task_list); \ |
230 | list_del_init(&__wait.task_list); \ |
118 | spin_unlock_irqrestore(&wq.lock, flags); \ |
231 | spin_unlock_irqrestore(&wq.lock, flags); \ |
119 | }; \ |
232 | }; \ |
120 | DestroyEvent(__wait.evnt); \ |
233 | DestroyEvent(__wait.evnt); \ |
121 | } while (0) |
234 | } while (0) |
122 | 235 | ||
123 | #define wait_event_interruptible(wq, condition) \ |
236 | #define wait_event_interruptible(wq, condition) \ |
124 | ({ \ |
237 | ({ \ |
125 | int __ret = 0; \ |
238 | int __ret = 0; \ |
126 | if (!(condition)) \ |
239 | if (!(condition)) \ |
127 | wait_event(wq, condition); \ |
240 | wait_event(wq, condition); \ |
128 | __ret; \ |
241 | __ret; \ |
129 | }) |
242 | }) |
- | 243 | ||
- | 244 | static inline |
|
- | 245 | void wake_up(wait_queue_head_t *q) |
|
- | 246 | { |
|
- | 247 | wait_queue_t *curr; |
|
- | 248 | unsigned long flags; |
|
- | 249 | ||
- | 250 | spin_lock_irqsave(&q->lock, flags); |
|
- | 251 | curr = list_first_entry(&q->task_list, typeof(*curr), task_list); |
|
- | 252 | { |
|
- | 253 | // printf("raise event \n"); |
|
- | 254 | kevent_t event; |
|
- | 255 | event.code = -1; |
|
- | 256 | RaiseEvent(curr->evnt, 0, &event); |
|
- | 257 | } |
|
- | 258 | spin_unlock_irqrestore(&q->lock, flags); |
|
- | 259 | } |
|
- | 260 | ||
- | 261 | static inline |
|
- | 262 | void wake_up_interruptible(wait_queue_head_t *q) |
|
- | 263 | { |
|
- | 264 | wait_queue_t *curr; |
|
- | 265 | unsigned long flags; |
|
- | 266 | ||
- | 267 | spin_lock_irqsave(&q->lock, flags); |
|
- | 268 | curr = list_first_entry(&q->task_list, typeof(*curr), task_list); |
|
- | 269 | { |
|
- | 270 | // printf("raise event \n"); |
|
- | 271 | kevent_t event; |
|
- | 272 | event.code = -1; |
|
- | 273 | RaiseEvent(curr->evnt, 0, &event); |
|
- | 274 | } |
|
- | 275 | spin_unlock_irqrestore(&q->lock, flags); |
|
130 | 276 | } |
|
131 | 277 | ||
132 | static inline |
278 | static inline |
133 | void wake_up_all(wait_queue_head_t *q) |
279 | void wake_up_all(wait_queue_head_t *q) |
134 | { |
280 | { |
135 | wait_queue_t *curr; |
281 | wait_queue_t *curr; |
136 | unsigned long flags; |
282 | unsigned long flags; |
137 | 283 | ||
138 | spin_lock_irqsave(&q->lock, flags); |
284 | spin_lock_irqsave(&q->lock, flags); |
139 | list_for_each_entry(curr, &q->task_list, task_list) |
285 | list_for_each_entry(curr, &q->task_list, task_list) |
140 | { |
286 | { |
141 | // printf("raise event \n"); |
287 | // printf("raise event \n"); |
142 | - | ||
143 | kevent_t event; |
288 | kevent_t event; |
144 | event.code = -1; |
289 | event.code = -1; |
145 | RaiseEvent(curr->evnt, 0, &event); |
290 | RaiseEvent(curr->evnt, 0, &event); |
146 | } |
291 | } |
147 | spin_unlock_irqrestore(&q->lock, flags); |
292 | spin_unlock_irqrestore(&q->lock, flags); |
148 | } |
293 | } |
149 | 294 | ||
150 | 295 | ||
151 | static inline void |
296 | static inline void |
152 | init_waitqueue_head(wait_queue_head_t *q) |
297 | init_waitqueue_head(wait_queue_head_t *q) |
153 | { |
298 | { |
154 | spin_lock_init(&q->lock); |
299 | spin_lock_init(&q->lock); |
155 | INIT_LIST_HEAD(&q->task_list); |
300 | INIT_LIST_HEAD(&q->task_list); |
156 | }; |
301 | }; |
157 | 302 | ||
158 | 303 | ||
159 | //struct completion { |
304 | //struct completion { |
160 | // unsigned int done; |
305 | // unsigned int done; |
161 | // wait_queue_head_t wait; |
306 | // wait_queue_head_t wait; |
162 | //}; |
307 | //}; |
163 | 308 | ||
164 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); |
309 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); |
165 | 310 | ||
166 | 311 | ||
167 | #define DEFINE_WAIT_FUNC(name, function) \ |
312 | #define DEFINE_WAIT_FUNC(name, function) \ |
168 | wait_queue_t name = { \ |
313 | wait_queue_t name = { \ |
169 | .func = function, \ |
314 | .func = function, \ |
170 | .task_list = LIST_HEAD_INIT((name).task_list), \ |
315 | .task_list = LIST_HEAD_INIT((name).task_list), \ |
171 | .evnt = CreateEvent(NULL, MANUAL_DESTROY), \ |
316 | .evnt = CreateEvent(NULL, MANUAL_DESTROY), \ |
172 | } |
317 | } |
173 | 318 | ||
174 | #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) |
319 | #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) |
175 | 320 | ||
176 | 321 | ||
177 | #endif |
322 | #endif |