Subversion Repositories Kolibri OS

Rev

Rev 4292 | Rev 6336 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5270 serge 1
/*
2
 * kernel/workqueue.c - generic async execution with shared worker pool
3
 *
4
 * Copyright (C) 2002		Ingo Molnar
5
 *
6
 *   Derived from the taskqueue/keventd code by:
7
 *     David Woodhouse 
8
 *     Andrew Morton
9
 *     Kai Petzke 
10
 *     Theodore Ts'o 
11
 *
12
 * Made to use alloc_percpu by Christoph Lameter.
13
 *
14
 * Copyright (C) 2010		SUSE Linux Products GmbH
15
 * Copyright (C) 2010		Tejun Heo 
16
 *
17
 * This is the generic async execution mechanism.  Work items as are
18
 * executed in process context.  The worker pool is shared and
19
 * automatically managed.  There are two worker pools for each CPU (one for
20
 * normal work items and the other for high priority ones) and some extra
21
 * pools for workqueues which are not bound to any specific CPU - the
22
 * number of these backing pools is dynamic.
23
 *
24
 * Please read Documentation/workqueue.txt for details.
25
 */
26
 
27
#include 
3482 Serge 28
#include 
5270 serge 29
#include 
30
#include 
3482 Serge 31
#include 
5270 serge 32
#include 
33
#include 
34
#include 
35
 
36
 
3482 Serge 37
#include 
38
 
3763 Serge 39
extern int driver_wq_state;
40
 
3482 Serge 41
struct workqueue_struct *alloc_workqueue(const char *fmt,
42
                           unsigned int flags,
43
                           int max_active)
44
{
45
    struct workqueue_struct *wq;
46
 
47
    wq = kzalloc(sizeof(*wq),0);
48
    if (!wq)
49
        goto err;
50
 
51
    INIT_LIST_HEAD(&wq->worklist);
52
    INIT_LIST_HEAD(&wq->delayed_worklist);
53
 
54
    return wq;
55
err:
56
    return NULL;
57
}
58
 
59
 
60
 
61
void run_workqueue(struct workqueue_struct *cwq)
62
{
63
    unsigned long irqflags;
64
 
65
//    dbgprintf("wq: %x head %x, next %x\n",
66
//               cwq, &cwq->worklist, cwq->worklist.next);
67
 
3763 Serge 68
    while(driver_wq_state != 0)
69
    {
70
        spin_lock_irqsave(&cwq->lock, irqflags);
3482 Serge 71
 
3763 Serge 72
        while (!list_empty(&cwq->worklist))
73
        {
74
            struct work_struct *work = list_entry(cwq->worklist.next,
3482 Serge 75
                                        struct work_struct, entry);
3763 Serge 76
            work_func_t f = work->func;
77
            list_del_init(cwq->worklist.next);
78
//            printf("work %p, func %p\n",
79
//                      work, f);
3482 Serge 80
 
3763 Serge 81
            spin_unlock_irqrestore(&cwq->lock, irqflags);
82
            f(work);
83
            spin_lock_irqsave(&cwq->lock, irqflags);
84
        }
85
 
3482 Serge 86
        spin_unlock_irqrestore(&cwq->lock, irqflags);
87
 
3763 Serge 88
        delay(1);
89
    };
3482 Serge 90
}
91
 
92
 
93
bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
94
{
95
    unsigned long flags;
96
 
97
    if(!list_empty(&work->entry))
98
        return 0;
99
 
100
//    dbgprintf("%s %p queue: %p\n", __FUNCTION__, work, wq);
101
 
102
    spin_lock_irqsave(&wq->lock, flags);
103
 
104
    list_add_tail(&work->entry, &wq->worklist);
105
 
106
    spin_unlock_irqrestore(&wq->lock, flags);
107
 
108
    return 1;
109
};
110
 
111
 
112
void __stdcall delayed_work_timer_fn(unsigned long __data)
113
{
114
    struct delayed_work *dwork = (struct delayed_work *)__data;
115
    struct workqueue_struct *wq = dwork->work.data;
116
 
117
    queue_work(wq, &dwork->work);
118
}
119
 
120
int queue_delayed_work(struct workqueue_struct *wq,
121
                        struct delayed_work *dwork, unsigned long delay)
122
{
123
    struct work_struct *work = &dwork->work;
124
 
125
    if (delay == 0)
126
        return queue_work(wq, &dwork->work);
127
 
128
//    dbgprintf("%s %p queue: %p\n", __FUNCTION__, &dwork->work, wq);
129
 
130
    work->data = wq;
4125 Serge 131
    TimerHS(delay,0, delayed_work_timer_fn, dwork);
3482 Serge 132
    return 1;
133
}
134
 
135
 
136
bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
137
{
138
    return queue_delayed_work(system_wq, dwork, delay);
139
}
140
 
4292 Serge 141
bool mod_delayed_work(struct workqueue_struct *wq,
142
                                    struct delayed_work *dwork,
143
                                    unsigned long delay)
144
{
145
    return queue_delayed_work(wq, dwork, delay);
146
}
147
 
148
int del_timer(struct timer_list *timer)
149
{
150
    int ret = 0;
151
 
152
    if(timer->handle)
153
    {
154
        CancelTimerHS(timer->handle);
155
        timer->handle = 0;
156
        ret = 1;
157
    };
158
    return ret;
159
};
160
 
161
bool cancel_work_sync(struct work_struct *work)
162
{
163
    unsigned long flags;
164
    int ret = 0;
165
 
166
    spin_lock_irqsave(&system_wq->lock, flags);
167
    if(!list_empty(&work->entry))
168
    {
169
        list_del(&work->entry);
170
        ret = 1;
171
    };
172
    spin_unlock_irqrestore(&system_wq->lock, flags);
173
    return ret;
174
}
175
 
176
bool cancel_delayed_work(struct delayed_work *dwork)
177
{
178
    return cancel_work_sync(&dwork->work);
179
}
180
 
181
bool cancel_delayed_work_sync(struct delayed_work *dwork)
182
{
183
    return cancel_work_sync(&dwork->work);
184
}
185
 
4125 Serge 186
int mod_timer(struct timer_list *timer, unsigned long expires)
187
{
188
    int ret = 0;
189
    expires - GetTimerTicks();
190
 
191
    if(timer->handle)
192
    {
193
        CancelTimerHS(timer->handle);
194
        timer->handle = 0;
195
        ret = 1;
196
    };
197
 
198
    timer->handle = TimerHS(expires, 0, timer->function, timer->data);
199
 
200
    return ret;
201
}
202