Subversion Repositories Kolibri OS

Rev

Rev 3031 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
2967 Serge 1
#ifndef _LINUX_WAIT_H
2
#define _LINUX_WAIT_H
3
 
4
typedef struct __wait_queue wait_queue_t;
5
typedef struct __wait_queue_head wait_queue_head_t;
6
 
7
struct __wait_queue
8
{
9
    struct list_head task_list;
10
    evhandle_t evnt;
11
};
12
 
13
struct __wait_queue_head
14
{
15
    spinlock_t lock;
16
    struct list_head task_list;
17
};
18
 
19
static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
20
{
21
    list_add(&new->task_list, &head->task_list);
22
}
23
 
24
 
25
#define __wait_event(wq, condition)                                     \
26
do {                                                                    \
27
        DEFINE_WAIT(__wait);                                            \
28
                                                                        \
29
        for (;;) {                                                      \
30
                prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);    \
31
                if (condition)                                          \
32
                        break;                                          \
33
                schedule();                                             \
34
        }                                                               \
35
        finish_wait(&wq, &__wait);                                      \
36
} while (0)
37
 
38
 
39
#define wait_event(wq, condition)                           \
40
do{                                                         \
41
    wait_queue_t __wait = {                                 \
42
        .task_list = LIST_HEAD_INIT(__wait.task_list),      \
43
        .evnt      = CreateEvent(NULL, MANUAL_DESTROY),     \
44
    };                                                      \
45
    u32  flags;                                             \
46
                                                            \
47
    spin_lock_irqsave(&wq.lock, flags);                     \
48
    if (list_empty(&__wait.task_list))                      \
49
        __add_wait_queue(&wq, &__wait);                     \
50
    spin_unlock_irqrestore(&wq.lock, flags);                \
51
                                                            \
52
    for(;;){                                                \
53
        if (condition)                                      \
54
            break;                                          \
55
        WaitEvent(__wait.evnt);                             \
56
    };                                                      \
57
    if (!list_empty_careful(&__wait.task_list)) {           \
58
        spin_lock_irqsave(&wq.lock, flags);                 \
59
        list_del_init(&__wait.task_list);                   \
60
        spin_unlock_irqrestore(&wq.lock, flags);            \
61
    };                                                      \
62
    DestroyEvent(__wait.evnt);                              \
63
} while (0)
64
 
65
 
66
static inline
67
void wake_up_all(wait_queue_head_t *q)
68
{
69
    wait_queue_t *curr;
70
    unsigned long flags;
71
 
72
    spin_lock_irqsave(&q->lock, flags);
73
    list_for_each_entry(curr, &q->task_list, task_list)
74
    {
75
        kevent_t event;
76
        event.code = -1;
77
        RaiseEvent(curr->evnt, 0, &event);
78
    }
79
    spin_unlock_irqrestore(&q->lock, flags);
80
}
81
 
82
 
83
static inline void
84
init_waitqueue_head(wait_queue_head_t *q)
85
{
86
    spin_lock_init(&q->lock);
87
    INIT_LIST_HEAD(&q->task_list);
88
};
89
 
90
 
91
/*
92
 * Workqueue flags and constants.  For details, please refer to
93
 * Documentation/workqueue.txt.
94
 */
95
enum {
96
    WQ_NON_REENTRANT    = 1 << 0, /* guarantee non-reentrance */
97
    WQ_UNBOUND          = 1 << 1, /* not bound to any cpu */
98
    WQ_FREEZABLE        = 1 << 2, /* freeze during suspend */
99
    WQ_MEM_RECLAIM      = 1 << 3, /* may be used for memory reclaim */
100
    WQ_HIGHPRI          = 1 << 4, /* high priority */
101
    WQ_CPU_INTENSIVE    = 1 << 5, /* cpu instensive workqueue */
102
 
103
    WQ_DRAINING         = 1 << 6, /* internal: workqueue is draining */
104
    WQ_RESCUER          = 1 << 7, /* internal: workqueue has rescuer */
105
 
106
    WQ_MAX_ACTIVE       = 512,    /* I like 512, better ideas? */
107
    WQ_MAX_UNBOUND_PER_CPU  = 4,      /* 4 * #cpus for unbound wq */
108
    WQ_DFL_ACTIVE       = WQ_MAX_ACTIVE / 2,
109
};
110
 
111
struct work_struct;
112
 
113
struct workqueue_struct {
114
    spinlock_t lock;
115
    struct list_head worklist;
116
};
117
 
118
typedef void (*work_func_t)(struct work_struct *work);
119
 
120
struct work_struct {
121
    struct list_head entry;
122
    struct workqueue_struct *data;
123
    work_func_t func;
124
};
125
 
126
struct delayed_work {
127
    struct work_struct work;
128
};
129
 
130
 
131
struct workqueue_struct *alloc_workqueue_key(const char *fmt,
132
                           unsigned int flags, int max_active);
133
 
134
int queue_delayed_work(struct workqueue_struct *wq,
135
                        struct delayed_work *dwork, unsigned long delay);
136
 
137
#define INIT_DELAYED_WORK(_work, _func)         \
138
    do {                                        \
139
        INIT_LIST_HEAD(&(_work)->work.entry);   \
140
        (_work)->work.func = _func;             \
141
    } while (0)
142
 
143
#endif
144