Subversion Repositories Kolibri OS

Rev

Rev 5056 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5056 serge 1
/*
2
 * kernel/locking/mutex.c
3
 *
4
 * Mutexes: blocking mutual exclusion locks
5
 *
6
 * Started by Ingo Molnar:
7
 *
8
 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar 
9
 *
10
 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11
 * David Howells for suggestions and improvements.
12
 *
13
 *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14
 *    from the -rt tree, where it was originally implemented for rtmutexes
15
 *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16
 *    and Sven Dietrich.
17
 *
18
 * Also see Documentation/mutex-design.txt.
19
 */
20
#include 
21
#include 
22
#include 
23
#include 
24
#include 
25
#include 
26
#include 
6082 serge 27
 
28
struct  kos_taskdata
29
{
30
    u32 event_mask;
31
    u32 pid;
32
    u16 r0;
33
    u8  state;
34
    u8  r1;
35
    u16 r2;
36
    u8  wnd_number;
37
    u8  r3;
38
    u32 mem_start;
39
    u32 counter_sum;
40
    u32 counter_add;
41
    u32 cpu_usage;
42
}__attribute__((packed));
43
 
44
static inline void mutex_set_owner(struct mutex *lock)
45
{
46
}
47
 
5056 serge 48
/*
49
 * A negative mutex count indicates that waiters are sleeping waiting for the
50
 * mutex.
51
 */
52
#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
53
 
54
void
55
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
56
{
57
    atomic_set(&lock->count, 1);
58
//    spin_lock_init(&lock->wait_lock);
59
    INIT_LIST_HEAD(&lock->wait_list);
60
//    mutex_clear_owner(lock);
61
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
62
    lock->osq = NULL;
63
#endif
64
 
65
}
66
 
6082 serge 67
static inline int __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
5056 serge 68
{
6082 serge 69
        struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
70
        struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
5056 serge 71
 
6082 serge 72
        if (!hold_ctx)
73
                return 0;
5056 serge 74
 
6082 serge 75
        if (unlikely(ctx == hold_ctx))
76
                return -EALREADY;
5056 serge 77
 
6082 serge 78
        if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
79
            (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
80
                return -EDEADLK;
81
        }
5056 serge 82
 
6082 serge 83
        return 0;
84
}
85
 
86
 
87
static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
88
                           struct ww_acquire_ctx *ww_ctx)
89
{
5056 serge 90
    ww_ctx->acquired++;
91
}
92
 
93
void ww_mutex_unlock(struct ww_mutex *lock)
94
{
95
    /*
96
     * The unlocking fastpath is the 0->1 transition from 'locked'
97
     * into 'unlocked' state:
98
     */
99
    if (lock->ctx) {
6082 serge 100
            if (lock->ctx->acquired > 0)
101
                    lock->ctx->acquired--;
102
            lock->ctx = NULL;
5056 serge 103
    }
104
    MutexUnlock(&lock->base);
105
}
106
 
6082 serge 107
static inline int __mutex_fastpath_lock_retval(atomic_t *count)
5056 serge 108
{
6082 serge 109
    if (unlikely(atomic_dec_return(count) < 0))
110
        return -1;
111
    else
112
        return 0;
113
}
114
 
115
static __always_inline void
116
ww_mutex_set_context_fastpath(struct ww_mutex *lock,
117
                               struct ww_acquire_ctx *ctx)
118
{
119
    u32 flags;
120
    struct mutex_waiter *cur;
121
 
5056 serge 122
    ww_mutex_lock_acquired(lock, ctx);
6082 serge 123
 
5056 serge 124
    lock->ctx = ctx;
125
 
6082 serge 126
    /*
127
     * The lock->ctx update should be visible on all cores before
128
     * the atomic read is done, otherwise contended waiters might be
129
     * missed. The contended waiters will either see ww_ctx == NULL
130
     * and keep spinning, or it will acquire wait_lock, add itself
131
     * to waiter list and sleep.
132
     */
133
    smp_mb(); /* ^^^ */
134
 
135
    /*
136
     * Check if lock is contended, if not there is nobody to wake up
137
     */
138
    if (likely(atomic_read(&lock->base.count) == 0))
139
            return;
140
 
141
    /*
142
     * Uh oh, we raced in fastpath, wake up everyone in this case,
143
     * so they can see the new lock->ctx.
144
     */
145
    flags = safe_cli();
146
    list_for_each_entry(cur, &lock->base.wait_list, list) {
147
        ((struct kos_taskdata*)cur->task)->state = 0;
148
    }
149
    safe_sti(flags);
5056 serge 150
}
151
 
6082 serge 152
ww_mutex_set_context_slowpath(struct ww_mutex *lock,
153
                              struct ww_acquire_ctx *ctx)
154
{
155
    struct mutex_waiter *cur;
5056 serge 156
 
157
    ww_mutex_lock_acquired(lock, ctx);
158
    lock->ctx = ctx;
159
 
6082 serge 160
    /*
161
     * Give any possible sleeping processes the chance to wake up,
162
     * so they can recheck if they have to back off.
163
     */
164
    list_for_each_entry(cur, &lock->base.wait_list, list) {
165
        ((struct kos_taskdata*)cur->task)->state = 0;
166
    }
5056 serge 167
}
6082 serge 168
 
169
int __ww_mutex_lock_slowpath(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
170
{
171
    struct mutex *lock;
172
    struct mutex_waiter waiter;
173
    struct kos_taskdata* taskdata;
174
    u32 eflags;
175
    int ret = 0;
176
 
177
    lock = &ww->base;
178
    taskdata = (struct kos_taskdata*)(0x80003010);
179
    waiter.task = (u32*)taskdata;
180
 
181
    eflags = safe_cli();
182
 
183
    list_add_tail(&waiter.list, &lock->wait_list);
184
 
185
    for(;;)
186
    {
187
        if( atomic_xchg(&lock->count, -1) == 1)
188
            break;
189
 
190
        if (ctx->acquired > 0) {
191
            ret = __ww_mutex_lock_check_stamp(lock, ctx);
192
            if (ret)
193
                goto err;
194
        };
195
        taskdata->state = 1;
196
        change_task();
197
    };
198
 
199
    if (likely(list_empty(&lock->wait_list)))
200
        atomic_set(&lock->count, 0);
201
 
202
    ww_mutex_set_context_slowpath(ww, ctx);
203
 
204
err:
205
    list_del(&waiter.list);
206
    safe_sti(eflags);
207
 
208
    return ret;
209
}
210
 
211
 
212
int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
213
{
214
    int ret;
215
 
216
    ret = __mutex_fastpath_lock_retval(&lock->base.count);
217
 
218
    if (likely(!ret)) {
219
            ww_mutex_set_context_fastpath(lock, ctx);
220
            mutex_set_owner(&lock->base);
221
    } else
222
            ret = __ww_mutex_lock_slowpath(lock, ctx);
223
    return ret;
224
}
225
 
226
 
227
int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
228
{
229
    int ret;
230
 
231
    ret = __mutex_fastpath_lock_retval(&lock->base.count);
232
 
233
    if (likely(!ret)) {
234
            ww_mutex_set_context_fastpath(lock, ctx);
235
            mutex_set_owner(&lock->base);
236
    } else
237
            ret = __ww_mutex_lock_slowpath(lock, ctx);
238
    return ret;
239
}