Subversion Repositories Kolibri OS

Rev

Rev 3192 | Rev 3391 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2967 Serge 1
#ifndef _LINUX_WAIT_H
2
#define _LINUX_WAIT_H
3
 
3297 Serge 4
#include 
3192 Serge 5
#include 
6
 
2967 Serge 7
typedef struct __wait_queue wait_queue_t;
8
typedef struct __wait_queue_head wait_queue_head_t;
9
 
10
struct __wait_queue
11
{
12
    struct list_head task_list;
13
    evhandle_t evnt;
14
};
15
 
16
struct __wait_queue_head
17
{
18
    spinlock_t lock;
19
    struct list_head task_list;
20
};
21
 
22
static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
23
{
24
    list_add(&new->task_list, &head->task_list);
25
}
26
 
27
 
28
#define __wait_event(wq, condition)                                     \
29
do {                                                                    \
30
        DEFINE_WAIT(__wait);                                            \
31
                                                                        \
32
        for (;;) {                                                      \
33
                prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);    \
34
                if (condition)                                          \
35
                        break;                                          \
36
                schedule();                                             \
37
        }                                                               \
38
        finish_wait(&wq, &__wait);                                      \
39
} while (0)
40
 
41
 
3031 serge 42
 
43
#define wait_event_timeout(wq, condition, timeout)          \
44
({                                                          \
45
    long __ret = timeout;                                   \
46
do{                                                         \
47
    wait_queue_t __wait = {                                 \
48
        .task_list = LIST_HEAD_INIT(__wait.task_list),      \
49
        .evnt      = CreateEvent(NULL, MANUAL_DESTROY),     \
50
    };                                                      \
3297 Serge 51
    unsigned long flags;                                    \
3031 serge 52
                                                            \
53
    spin_lock_irqsave(&wq.lock, flags);                     \
54
    if (list_empty(&__wait.task_list))                      \
55
        __add_wait_queue(&wq, &__wait);                     \
56
    spin_unlock_irqrestore(&wq.lock, flags);                \
57
                                                            \
58
    for(;;){                                                \
59
        if (condition)                                      \
60
            break;                                          \
61
        WaitEvent(__wait.evnt);                             \
62
    };                                                      \
3297 Serge 63
    if (!list_empty(&__wait.task_list)) {                   \
3031 serge 64
        spin_lock_irqsave(&wq.lock, flags);                 \
65
        list_del_init(&__wait.task_list);                   \
66
        spin_unlock_irqrestore(&wq.lock, flags);            \
67
    };                                                      \
68
    DestroyEvent(__wait.evnt);                              \
69
} while (0);                                                \
70
    __ret;                                                  \
71
})
72
 
3192 Serge 73
#define wait_event_interruptible_timeout(wq, condition, timeout)    \
74
        wait_event_timeout(wq, condition, timeout)
3031 serge 75
 
76
 
2967 Serge 77
#define wait_event(wq, condition)                           \
78
do{                                                         \
79
    wait_queue_t __wait = {                                 \
80
        .task_list = LIST_HEAD_INIT(__wait.task_list),      \
81
        .evnt      = CreateEvent(NULL, MANUAL_DESTROY),     \
82
    };                                                      \
3297 Serge 83
    unsigned long flags;                                    \
2967 Serge 84
                                                            \
85
    spin_lock_irqsave(&wq.lock, flags);                     \
86
    if (list_empty(&__wait.task_list))                      \
87
        __add_wait_queue(&wq, &__wait);                     \
88
    spin_unlock_irqrestore(&wq.lock, flags);                \
89
                                                            \
90
    for(;;){                                                \
91
        if (condition)                                      \
92
            break;                                          \
93
        WaitEvent(__wait.evnt);                             \
94
    };                                                      \
95
    if (!list_empty_careful(&__wait.task_list)) {           \
96
        spin_lock_irqsave(&wq.lock, flags);                 \
97
        list_del_init(&__wait.task_list);                   \
98
        spin_unlock_irqrestore(&wq.lock, flags);            \
99
    };                                                      \
100
    DestroyEvent(__wait.evnt);                              \
101
} while (0)
102
 
103
 
3031 serge 104
 
105
 
2967 Serge 106
static inline
107
void wake_up_all(wait_queue_head_t *q)
108
{
109
    wait_queue_t *curr;
110
    unsigned long flags;
111
 
112
    spin_lock_irqsave(&q->lock, flags);
113
    list_for_each_entry(curr, &q->task_list, task_list)
114
    {
3297 Serge 115
//        printf("raise event \n");
116
 
2967 Serge 117
        kevent_t event;
118
        event.code = -1;
119
        RaiseEvent(curr->evnt, 0, &event);
120
    }
121
    spin_unlock_irqrestore(&q->lock, flags);
122
}
123
 
124
 
125
static inline void
126
init_waitqueue_head(wait_queue_head_t *q)
127
{
128
    spin_lock_init(&q->lock);
129
    INIT_LIST_HEAD(&q->task_list);
130
};
131
 
132
 
133
/*
134
 * Workqueue flags and constants.  For details, please refer to
135
 * Documentation/workqueue.txt.
136
 */
137
enum {
138
    WQ_NON_REENTRANT    = 1 << 0, /* guarantee non-reentrance */
139
    WQ_UNBOUND          = 1 << 1, /* not bound to any cpu */
140
    WQ_FREEZABLE        = 1 << 2, /* freeze during suspend */
141
    WQ_MEM_RECLAIM      = 1 << 3, /* may be used for memory reclaim */
142
    WQ_HIGHPRI          = 1 << 4, /* high priority */
143
    WQ_CPU_INTENSIVE    = 1 << 5, /* cpu instensive workqueue */
144
 
145
    WQ_DRAINING         = 1 << 6, /* internal: workqueue is draining */
146
    WQ_RESCUER          = 1 << 7, /* internal: workqueue has rescuer */
147
 
148
    WQ_MAX_ACTIVE       = 512,    /* I like 512, better ideas? */
149
    WQ_MAX_UNBOUND_PER_CPU  = 4,      /* 4 * #cpus for unbound wq */
150
    WQ_DFL_ACTIVE       = WQ_MAX_ACTIVE / 2,
151
};
152
 
153
struct work_struct;
154
 
155
struct workqueue_struct {
156
    spinlock_t lock;
157
    struct list_head worklist;
158
};
159
 
160
typedef void (*work_func_t)(struct work_struct *work);
161
 
162
struct work_struct {
163
    struct list_head entry;
164
    struct workqueue_struct *data;
165
    work_func_t func;
166
};
167
 
168
struct delayed_work {
169
    struct work_struct work;
170
};
171
 
172
struct workqueue_struct *alloc_workqueue_key(const char *fmt,
173
                           unsigned int flags, int max_active);
174
 
3031 serge 175
 
176
#define alloc_ordered_workqueue(fmt, flags, args...)            \
177
        alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
178
 
2967 Serge 179
int queue_delayed_work(struct workqueue_struct *wq,
180
                        struct delayed_work *dwork, unsigned long delay);
181
 
182
#define INIT_DELAYED_WORK(_work, _func)         \
183
    do {                                        \
184
        INIT_LIST_HEAD(&(_work)->work.entry);   \
185
        (_work)->work.func = _func;             \
186
    } while (0)
187
 
3031 serge 188
 
189
struct completion {
190
    unsigned int done;
191
    wait_queue_head_t wait;
192
};
193
 
194
 
2967 Serge 195
#endif
196