Subversion Repositories Kolibri OS

Rev

Rev 3031 | Rev 3297 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2967 Serge 1
#ifndef _LINUX_WAIT_H
2
#define _LINUX_WAIT_H
3
 
3192 Serge 4
#include 
5
 
2967 Serge 6
typedef struct __wait_queue wait_queue_t;
7
typedef struct __wait_queue_head wait_queue_head_t;
8
 
9
struct __wait_queue
10
{
11
    struct list_head task_list;
12
    evhandle_t evnt;
13
};
14
 
15
struct __wait_queue_head
16
{
17
    spinlock_t lock;
18
    struct list_head task_list;
19
};
20
 
21
static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
22
{
23
    list_add(&new->task_list, &head->task_list);
24
}
25
 
26
 
27
#define __wait_event(wq, condition)                                     \
28
do {                                                                    \
29
        DEFINE_WAIT(__wait);                                            \
30
                                                                        \
31
        for (;;) {                                                      \
32
                prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);    \
33
                if (condition)                                          \
34
                        break;                                          \
35
                schedule();                                             \
36
        }                                                               \
37
        finish_wait(&wq, &__wait);                                      \
38
} while (0)
39
 
40
 
3031 serge 41
 
42
 
43
#define wait_event_timeout(wq, condition, timeout)          \
44
({                                                          \
45
    long __ret = timeout;                                   \
46
do{                                                         \
47
    wait_queue_t __wait = {                                 \
48
        .task_list = LIST_HEAD_INIT(__wait.task_list),      \
49
        .evnt      = CreateEvent(NULL, MANUAL_DESTROY),     \
50
    };                                                      \
51
    u32  flags;                                             \
52
                                                            \
53
    spin_lock_irqsave(&wq.lock, flags);                     \
54
    if (list_empty(&__wait.task_list))                      \
55
        __add_wait_queue(&wq, &__wait);                     \
56
    spin_unlock_irqrestore(&wq.lock, flags);                \
57
                                                            \
58
    for(;;){                                                \
59
        if (condition)                                      \
60
            break;                                          \
61
        WaitEvent(__wait.evnt);                             \
62
    };                                                      \
63
    if (!list_empty_careful(&__wait.task_list)) {           \
64
        spin_lock_irqsave(&wq.lock, flags);                 \
65
        list_del_init(&__wait.task_list);                   \
66
        spin_unlock_irqrestore(&wq.lock, flags);            \
67
    };                                                      \
68
    DestroyEvent(__wait.evnt);                              \
69
} while (0);                                                \
70
    __ret;                                                  \
71
})
72
 
3192 Serge 73
#define wait_event_interruptible_timeout(wq, condition, timeout)    \
74
        wait_event_timeout(wq, condition, timeout)
3031 serge 75
 
76
 
2967 Serge 77
#define wait_event(wq, condition)                           \
78
do{                                                         \
79
    wait_queue_t __wait = {                                 \
80
        .task_list = LIST_HEAD_INIT(__wait.task_list),      \
81
        .evnt      = CreateEvent(NULL, MANUAL_DESTROY),     \
82
    };                                                      \
83
    u32  flags;                                             \
84
                                                            \
85
    spin_lock_irqsave(&wq.lock, flags);                     \
86
    if (list_empty(&__wait.task_list))                      \
87
        __add_wait_queue(&wq, &__wait);                     \
88
    spin_unlock_irqrestore(&wq.lock, flags);                \
89
                                                            \
90
    for(;;){                                                \
91
        if (condition)                                      \
92
            break;                                          \
93
        WaitEvent(__wait.evnt);                             \
94
    };                                                      \
95
    if (!list_empty_careful(&__wait.task_list)) {           \
96
        spin_lock_irqsave(&wq.lock, flags);                 \
97
        list_del_init(&__wait.task_list);                   \
98
        spin_unlock_irqrestore(&wq.lock, flags);            \
99
    };                                                      \
100
    DestroyEvent(__wait.evnt);                              \
101
} while (0)
102
 
103
 
3031 serge 104
 
105
 
2967 Serge 106
static inline
107
void wake_up_all(wait_queue_head_t *q)
108
{
109
    wait_queue_t *curr;
110
    unsigned long flags;
111
 
112
    spin_lock_irqsave(&q->lock, flags);
113
    list_for_each_entry(curr, &q->task_list, task_list)
114
    {
115
        kevent_t event;
116
        event.code = -1;
117
        RaiseEvent(curr->evnt, 0, &event);
118
    }
119
    spin_unlock_irqrestore(&q->lock, flags);
120
}
121
 
122
 
123
static inline void
124
init_waitqueue_head(wait_queue_head_t *q)
125
{
126
    spin_lock_init(&q->lock);
127
    INIT_LIST_HEAD(&q->task_list);
128
};
129
 
130
 
131
/*
132
 * Workqueue flags and constants.  For details, please refer to
133
 * Documentation/workqueue.txt.
134
 */
135
enum {
136
    WQ_NON_REENTRANT    = 1 << 0, /* guarantee non-reentrance */
137
    WQ_UNBOUND          = 1 << 1, /* not bound to any cpu */
138
    WQ_FREEZABLE        = 1 << 2, /* freeze during suspend */
139
    WQ_MEM_RECLAIM      = 1 << 3, /* may be used for memory reclaim */
140
    WQ_HIGHPRI          = 1 << 4, /* high priority */
141
    WQ_CPU_INTENSIVE    = 1 << 5, /* cpu instensive workqueue */
142
 
143
    WQ_DRAINING         = 1 << 6, /* internal: workqueue is draining */
144
    WQ_RESCUER          = 1 << 7, /* internal: workqueue has rescuer */
145
 
146
    WQ_MAX_ACTIVE       = 512,    /* I like 512, better ideas? */
147
    WQ_MAX_UNBOUND_PER_CPU  = 4,      /* 4 * #cpus for unbound wq */
148
    WQ_DFL_ACTIVE       = WQ_MAX_ACTIVE / 2,
149
};
150
 
151
struct work_struct;
152
 
153
struct workqueue_struct {
154
    spinlock_t lock;
155
    struct list_head worklist;
156
};
157
 
158
typedef void (*work_func_t)(struct work_struct *work);
159
 
160
struct work_struct {
161
    struct list_head entry;
162
    struct workqueue_struct *data;
163
    work_func_t func;
164
};
165
 
166
struct delayed_work {
167
    struct work_struct work;
168
};
169
 
170
struct workqueue_struct *alloc_workqueue_key(const char *fmt,
171
                           unsigned int flags, int max_active);
172
 
3031 serge 173
 
174
#define alloc_ordered_workqueue(fmt, flags, args...)            \
175
        alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
176
 
2967 Serge 177
int queue_delayed_work(struct workqueue_struct *wq,
178
                        struct delayed_work *dwork, unsigned long delay);
179
 
180
#define INIT_DELAYED_WORK(_work, _func)         \
181
    do {                                        \
182
        INIT_LIST_HEAD(&(_work)->work.entry);   \
183
        (_work)->work.func = _func;             \
184
    } while (0)
185
 
3031 serge 186
 
187
struct completion {
188
    unsigned int done;
189
    wait_queue_head_t wait;
190
};
191
 
192
 
2967 Serge 193
#endif
194