Subversion Repositories Kolibri OS

Rev

Rev 4292 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4292 Rev 5270
-
 
1
/*
-
 
2
 * kernel/workqueue.c - generic async execution with shared worker pool
-
 
3
 *
-
 
4
 * Copyright (C) 2002		Ingo Molnar
-
 
5
 *
-
 
6
 *   Derived from the taskqueue/keventd code by:
-
 
7
 *     David Woodhouse 
-
 
8
 *     Andrew Morton
-
 
9
 *     Kai Petzke 
-
 
10
 *     Theodore Ts'o 
-
 
11
 *
-
 
12
 * Made to use alloc_percpu by Christoph Lameter.
-
 
13
 *
-
 
14
 * Copyright (C) 2010		SUSE Linux Products GmbH
-
 
15
 * Copyright (C) 2010		Tejun Heo 
-
 
16
 *
-
 
17
 * This is the generic async execution mechanism.  Work items as are
-
 
18
 * executed in process context.  The worker pool is shared and
-
 
19
 * automatically managed.  There are two worker pools for each CPU (one for
-
 
20
 * normal work items and the other for high priority ones) and some extra
-
 
21
 * pools for workqueues which are not bound to any specific CPU - the
-
 
22
 * number of these backing pools is dynamic.
-
 
23
 *
-
 
24
 * Please read Documentation/workqueue.txt for details.
-
 
25
 */
-
 
26
 
-
 
27
#include 
1
#include 
28
#include 
-
 
29
#include 
-
 
30
#include 
2
#include 
31
#include 
-
 
32
#include 
-
 
33
#include 
-
 
34
#include 
-
 
35
 
-
 
36
 
3
#include 
37
#include 
4
 
38
 
5
extern int driver_wq_state;
39
extern int driver_wq_state;
6
 
40
 
7
struct workqueue_struct *alloc_workqueue(const char *fmt,
41
struct workqueue_struct *alloc_workqueue(const char *fmt,
8
                           unsigned int flags,
42
                           unsigned int flags,
9
                           int max_active)
43
                           int max_active)
10
{
44
{
11
    struct workqueue_struct *wq;
45
    struct workqueue_struct *wq;
12
 
46
 
13
    wq = kzalloc(sizeof(*wq),0);
47
    wq = kzalloc(sizeof(*wq),0);
14
    if (!wq)
48
    if (!wq)
15
        goto err;
49
        goto err;
16
 
50
 
17
    INIT_LIST_HEAD(&wq->worklist);
51
    INIT_LIST_HEAD(&wq->worklist);
18
    INIT_LIST_HEAD(&wq->delayed_worklist);
52
    INIT_LIST_HEAD(&wq->delayed_worklist);
19
 
53
 
20
    return wq;
54
    return wq;
21
err:
55
err:
22
    return NULL;
56
    return NULL;
23
}
57
}
24
 
58
 
25
 
59
 
26
 
60
 
27
void run_workqueue(struct workqueue_struct *cwq)
61
void run_workqueue(struct workqueue_struct *cwq)
28
{
62
{
29
    unsigned long irqflags;
63
    unsigned long irqflags;
30
 
64
 
31
//    dbgprintf("wq: %x head %x, next %x\n",
65
//    dbgprintf("wq: %x head %x, next %x\n",
32
//               cwq, &cwq->worklist, cwq->worklist.next);
66
//               cwq, &cwq->worklist, cwq->worklist.next);
33
 
67
 
34
    while(driver_wq_state != 0)
68
    while(driver_wq_state != 0)
35
    {
69
    {
36
        spin_lock_irqsave(&cwq->lock, irqflags);
70
        spin_lock_irqsave(&cwq->lock, irqflags);
37
 
71
 
38
        while (!list_empty(&cwq->worklist))
72
        while (!list_empty(&cwq->worklist))
39
        {
73
        {
40
            struct work_struct *work = list_entry(cwq->worklist.next,
74
            struct work_struct *work = list_entry(cwq->worklist.next,
41
                                        struct work_struct, entry);
75
                                        struct work_struct, entry);
42
            work_func_t f = work->func;
76
            work_func_t f = work->func;
43
            list_del_init(cwq->worklist.next);
77
            list_del_init(cwq->worklist.next);
44
//            printf("work %p, func %p\n",
78
//            printf("work %p, func %p\n",
45
//                      work, f);
79
//                      work, f);
46
 
80
 
47
            spin_unlock_irqrestore(&cwq->lock, irqflags);
81
            spin_unlock_irqrestore(&cwq->lock, irqflags);
48
            f(work);
82
            f(work);
49
            spin_lock_irqsave(&cwq->lock, irqflags);
83
            spin_lock_irqsave(&cwq->lock, irqflags);
50
        }
84
        }
51
 
85
 
52
        spin_unlock_irqrestore(&cwq->lock, irqflags);
86
        spin_unlock_irqrestore(&cwq->lock, irqflags);
53
 
87
 
54
        delay(1);
88
        delay(1);
55
    };
89
    };
56
}
90
}
57
 
91
 
58
 
92
 
59
bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
93
bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
60
{
94
{
61
    unsigned long flags;
95
    unsigned long flags;
62
 
96
 
63
    if(!list_empty(&work->entry))
97
    if(!list_empty(&work->entry))
64
        return 0;
98
        return 0;
65
 
99
 
66
//    dbgprintf("%s %p queue: %p\n", __FUNCTION__, work, wq);
100
//    dbgprintf("%s %p queue: %p\n", __FUNCTION__, work, wq);
67
 
101
 
68
    spin_lock_irqsave(&wq->lock, flags);
102
    spin_lock_irqsave(&wq->lock, flags);
69
 
103
 
70
    list_add_tail(&work->entry, &wq->worklist);
104
    list_add_tail(&work->entry, &wq->worklist);
71
 
105
 
72
    spin_unlock_irqrestore(&wq->lock, flags);
106
    spin_unlock_irqrestore(&wq->lock, flags);
73
 
107
 
74
    return 1;
108
    return 1;
75
};
109
};
76
 
110
 
77
 
111
 
78
void __stdcall delayed_work_timer_fn(unsigned long __data)
112
void __stdcall delayed_work_timer_fn(unsigned long __data)
79
{
113
{
80
    struct delayed_work *dwork = (struct delayed_work *)__data;
114
    struct delayed_work *dwork = (struct delayed_work *)__data;
81
    struct workqueue_struct *wq = dwork->work.data;
115
    struct workqueue_struct *wq = dwork->work.data;
82
 
116
 
83
    queue_work(wq, &dwork->work);
117
    queue_work(wq, &dwork->work);
84
}
118
}
85
 
119
 
86
int queue_delayed_work(struct workqueue_struct *wq,
120
int queue_delayed_work(struct workqueue_struct *wq,
87
                        struct delayed_work *dwork, unsigned long delay)
121
                        struct delayed_work *dwork, unsigned long delay)
88
{
122
{
89
    struct work_struct *work = &dwork->work;
123
    struct work_struct *work = &dwork->work;
90
 
124
 
91
    if (delay == 0)
125
    if (delay == 0)
92
        return queue_work(wq, &dwork->work);
126
        return queue_work(wq, &dwork->work);
93
 
127
 
94
//    dbgprintf("%s %p queue: %p\n", __FUNCTION__, &dwork->work, wq);
128
//    dbgprintf("%s %p queue: %p\n", __FUNCTION__, &dwork->work, wq);
95
 
129
 
96
    work->data = wq;
130
    work->data = wq;
97
    TimerHS(delay,0, delayed_work_timer_fn, dwork);
131
    TimerHS(delay,0, delayed_work_timer_fn, dwork);
98
    return 1;
132
    return 1;
99
}
133
}
100
 
134
 
101
 
135
 
102
bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
136
bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
103
{
137
{
104
    return queue_delayed_work(system_wq, dwork, delay);
138
    return queue_delayed_work(system_wq, dwork, delay);
105
}
139
}
106
 
140
 
107
bool mod_delayed_work(struct workqueue_struct *wq,
141
bool mod_delayed_work(struct workqueue_struct *wq,
108
                                    struct delayed_work *dwork,
142
                                    struct delayed_work *dwork,
109
                                    unsigned long delay)
143
                                    unsigned long delay)
110
{
144
{
111
    return queue_delayed_work(wq, dwork, delay);
145
    return queue_delayed_work(wq, dwork, delay);
112
}
146
}
113
 
147
 
114
int del_timer(struct timer_list *timer)
148
int del_timer(struct timer_list *timer)
115
{
149
{
116
    int ret = 0;
150
    int ret = 0;
117
 
151
 
118
    if(timer->handle)
152
    if(timer->handle)
119
    {
153
    {
120
        CancelTimerHS(timer->handle);
154
        CancelTimerHS(timer->handle);
121
        timer->handle = 0;
155
        timer->handle = 0;
122
        ret = 1;
156
        ret = 1;
123
    };
157
    };
124
    return ret;
158
    return ret;
125
};
159
};
126
 
160
 
127
bool cancel_work_sync(struct work_struct *work)
161
bool cancel_work_sync(struct work_struct *work)
128
{
162
{
129
    unsigned long flags;
163
    unsigned long flags;
130
    int ret = 0;
164
    int ret = 0;
131
 
165
 
132
    spin_lock_irqsave(&system_wq->lock, flags);
166
    spin_lock_irqsave(&system_wq->lock, flags);
133
    if(!list_empty(&work->entry))
167
    if(!list_empty(&work->entry))
134
    {
168
    {
135
        list_del(&work->entry);
169
        list_del(&work->entry);
136
        ret = 1;
170
        ret = 1;
137
    };
171
    };
138
    spin_unlock_irqrestore(&system_wq->lock, flags);
172
    spin_unlock_irqrestore(&system_wq->lock, flags);
139
    return ret;
173
    return ret;
140
}
174
}
141
 
175
 
142
bool cancel_delayed_work(struct delayed_work *dwork)
176
bool cancel_delayed_work(struct delayed_work *dwork)
143
{
177
{
144
    return cancel_work_sync(&dwork->work);
178
    return cancel_work_sync(&dwork->work);
145
}
179
}
146
 
180
 
147
bool cancel_delayed_work_sync(struct delayed_work *dwork)
181
bool cancel_delayed_work_sync(struct delayed_work *dwork)
148
{
182
{
149
    return cancel_work_sync(&dwork->work);
183
    return cancel_work_sync(&dwork->work);
150
}
184
}
151
 
185
 
152
int mod_timer(struct timer_list *timer, unsigned long expires)
186
int mod_timer(struct timer_list *timer, unsigned long expires)
153
{
187
{
154
    int ret = 0;
188
    int ret = 0;
155
    expires - GetTimerTicks();
189
    expires - GetTimerTicks();
156
 
190
 
157
    if(timer->handle)
191
    if(timer->handle)
158
    {
192
    {
159
        CancelTimerHS(timer->handle);
193
        CancelTimerHS(timer->handle);
160
        timer->handle = 0;
194
        timer->handle = 0;
161
        ret = 1;
195
        ret = 1;
162
    };
196
    };
163
 
197
 
164
    timer->handle = TimerHS(expires, 0, timer->function, timer->data);
198
    timer->handle = TimerHS(expires, 0, timer->function, timer->data);
165
 
199
 
166
    return ret;
200
    return ret;
167
}
201
}