Subversion Repositories Kolibri OS

Rev

Rev 2967 | Rev 3192 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2967 Serge 1
#ifndef _LINUX_WAIT_H
2
#define _LINUX_WAIT_H
3
 
4
typedef struct __wait_queue wait_queue_t;
5
typedef struct __wait_queue_head wait_queue_head_t;
6
 
7
struct __wait_queue
8
{
9
    struct list_head task_list;
10
    evhandle_t evnt;
11
};
12
 
13
struct __wait_queue_head
14
{
15
    spinlock_t lock;
16
    struct list_head task_list;
17
};
18
 
19
static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
20
{
21
    list_add(&new->task_list, &head->task_list);
22
}
23
 
24
 
25
#define __wait_event(wq, condition)                                     \
26
do {                                                                    \
27
        DEFINE_WAIT(__wait);                                            \
28
                                                                        \
29
        for (;;) {                                                      \
30
                prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);    \
31
                if (condition)                                          \
32
                        break;                                          \
33
                schedule();                                             \
34
        }                                                               \
35
        finish_wait(&wq, &__wait);                                      \
36
} while (0)
37
 
38
 
3031 serge 39
 
40
 
41
#define wait_event_timeout(wq, condition, timeout)          \
42
({                                                          \
43
    long __ret = timeout;                                   \
44
do{                                                         \
45
    wait_queue_t __wait = {                                 \
46
        .task_list = LIST_HEAD_INIT(__wait.task_list),      \
47
        .evnt      = CreateEvent(NULL, MANUAL_DESTROY),     \
48
    };                                                      \
49
    u32  flags;                                             \
50
                                                            \
51
    spin_lock_irqsave(&wq.lock, flags);                     \
52
    if (list_empty(&__wait.task_list))                      \
53
        __add_wait_queue(&wq, &__wait);                     \
54
    spin_unlock_irqrestore(&wq.lock, flags);                \
55
                                                            \
56
    for(;;){                                                \
57
        if (condition)                                      \
58
            break;                                          \
59
        WaitEvent(__wait.evnt);                             \
60
    };                                                      \
61
    if (!list_empty_careful(&__wait.task_list)) {           \
62
        spin_lock_irqsave(&wq.lock, flags);                 \
63
        list_del_init(&__wait.task_list);                   \
64
        spin_unlock_irqrestore(&wq.lock, flags);            \
65
    };                                                      \
66
    DestroyEvent(__wait.evnt);                              \
67
} while (0);                                                \
68
    __ret;                                                  \
69
})
70
 
71
 
72
 
2967 Serge 73
#define wait_event(wq, condition)                           \
74
do{                                                         \
75
    wait_queue_t __wait = {                                 \
76
        .task_list = LIST_HEAD_INIT(__wait.task_list),      \
77
        .evnt      = CreateEvent(NULL, MANUAL_DESTROY),     \
78
    };                                                      \
79
    u32  flags;                                             \
80
                                                            \
81
    spin_lock_irqsave(&wq.lock, flags);                     \
82
    if (list_empty(&__wait.task_list))                      \
83
        __add_wait_queue(&wq, &__wait);                     \
84
    spin_unlock_irqrestore(&wq.lock, flags);                \
85
                                                            \
86
    for(;;){                                                \
87
        if (condition)                                      \
88
            break;                                          \
89
        WaitEvent(__wait.evnt);                             \
90
    };                                                      \
91
    if (!list_empty_careful(&__wait.task_list)) {           \
92
        spin_lock_irqsave(&wq.lock, flags);                 \
93
        list_del_init(&__wait.task_list);                   \
94
        spin_unlock_irqrestore(&wq.lock, flags);            \
95
    };                                                      \
96
    DestroyEvent(__wait.evnt);                              \
97
} while (0)
98
 
99
 
3031 serge 100
 
101
 
2967 Serge 102
static inline
103
void wake_up_all(wait_queue_head_t *q)
104
{
105
    wait_queue_t *curr;
106
    unsigned long flags;
107
 
108
    spin_lock_irqsave(&q->lock, flags);
109
    list_for_each_entry(curr, &q->task_list, task_list)
110
    {
111
        kevent_t event;
112
        event.code = -1;
113
        RaiseEvent(curr->evnt, 0, &event);
114
    }
115
    spin_unlock_irqrestore(&q->lock, flags);
116
}
117
 
118
 
119
static inline void
120
init_waitqueue_head(wait_queue_head_t *q)
121
{
122
    spin_lock_init(&q->lock);
123
    INIT_LIST_HEAD(&q->task_list);
124
};
125
 
126
 
127
/*
128
 * Workqueue flags and constants.  For details, please refer to
129
 * Documentation/workqueue.txt.
130
 */
131
enum {
132
    WQ_NON_REENTRANT    = 1 << 0, /* guarantee non-reentrance */
133
    WQ_UNBOUND          = 1 << 1, /* not bound to any cpu */
134
    WQ_FREEZABLE        = 1 << 2, /* freeze during suspend */
135
    WQ_MEM_RECLAIM      = 1 << 3, /* may be used for memory reclaim */
136
    WQ_HIGHPRI          = 1 << 4, /* high priority */
137
    WQ_CPU_INTENSIVE    = 1 << 5, /* cpu instensive workqueue */
138
 
139
    WQ_DRAINING         = 1 << 6, /* internal: workqueue is draining */
140
    WQ_RESCUER          = 1 << 7, /* internal: workqueue has rescuer */
141
 
142
    WQ_MAX_ACTIVE       = 512,    /* I like 512, better ideas? */
143
    WQ_MAX_UNBOUND_PER_CPU  = 4,      /* 4 * #cpus for unbound wq */
144
    WQ_DFL_ACTIVE       = WQ_MAX_ACTIVE / 2,
145
};
146
 
147
struct work_struct;
148
 
149
struct workqueue_struct {
150
    spinlock_t lock;
151
    struct list_head worklist;
152
};
153
 
154
typedef void (*work_func_t)(struct work_struct *work);
155
 
156
struct work_struct {
157
    struct list_head entry;
158
    struct workqueue_struct *data;
159
    work_func_t func;
160
};
161
 
162
struct delayed_work {
163
    struct work_struct work;
164
};
165
 
166
struct workqueue_struct *alloc_workqueue_key(const char *fmt,
167
                           unsigned int flags, int max_active);
168
 
3031 serge 169
 
170
#define alloc_ordered_workqueue(fmt, flags, args...)            \
171
        alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
172
 
2967 Serge 173
int queue_delayed_work(struct workqueue_struct *wq,
174
                        struct delayed_work *dwork, unsigned long delay);
175
 
176
#define INIT_DELAYED_WORK(_work, _func)         \
177
    do {                                        \
178
        INIT_LIST_HEAD(&(_work)->work.entry);   \
179
        (_work)->work.func = _func;             \
180
    } while (0)
181
 
3031 serge 182
 
183
struct completion {
184
    unsigned int done;
185
    wait_queue_head_t wait;
186
};
187
 
188
 
2967 Serge 189
#endif
190