Rev 6082 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
5056 | serge | 1 | /* |
2 | * kernel/locking/mutex.c |
||
3 | * |
||
4 | * Mutexes: blocking mutual exclusion locks |
||
5 | * |
||
6 | * Started by Ingo Molnar: |
||
7 | * |
||
8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar |
||
9 | * |
||
10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and |
||
11 | * David Howells for suggestions and improvements. |
||
12 | * |
||
13 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
||
14 | * from the -rt tree, where it was originally implemented for rtmutexes |
||
15 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale |
||
16 | * and Sven Dietrich. |
||
17 | * |
||
18 | * Also see Documentation/mutex-design.txt. |
||
19 | */ |
||
20 | #include |
||
21 | #include |
||
22 | #include |
||
23 | #include |
||
24 | #include |
||
25 | #include |
||
26 | #include |
||
6082 | serge | 27 | |
28 | static inline void mutex_set_owner(struct mutex *lock) |
||
29 | { |
||
30 | } |
||
31 | |||
5056 | serge | 32 | /* |
33 | * A negative mutex count indicates that waiters are sleeping waiting for the |
||
34 | * mutex. |
||
35 | */ |
||
36 | #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0) |
||
37 | |||
38 | void |
||
39 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
||
40 | { |
||
41 | atomic_set(&lock->count, 1); |
||
42 | // spin_lock_init(&lock->wait_lock); |
||
43 | INIT_LIST_HEAD(&lock->wait_list); |
||
44 | // mutex_clear_owner(lock); |
||
45 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
||
46 | lock->osq = NULL; |
||
47 | #endif |
||
48 | |||
49 | } |
||
50 | |||
6082 | serge | 51 | static inline int __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) |
5056 | serge | 52 | { |
6082 | serge | 53 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); |
54 | struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); |
||
5056 | serge | 55 | |
6082 | serge | 56 | if (!hold_ctx) |
57 | return 0; |
||
5056 | serge | 58 | |
6082 | serge | 59 | if (unlikely(ctx == hold_ctx)) |
60 | return -EALREADY; |
||
5056 | serge | 61 | |
6082 | serge | 62 | if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && |
63 | (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { |
||
64 | return -EDEADLK; |
||
65 | } |
||
5056 | serge | 66 | |
6082 | serge | 67 | return 0; |
68 | } |
||
69 | |||
70 | |||
71 | static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, |
||
72 | struct ww_acquire_ctx *ww_ctx) |
||
73 | { |
||
5056 | serge | 74 | ww_ctx->acquired++; |
75 | } |
||
76 | |||
77 | void ww_mutex_unlock(struct ww_mutex *lock) |
||
78 | { |
||
79 | /* |
||
80 | * The unlocking fastpath is the 0->1 transition from 'locked' |
||
81 | * into 'unlocked' state: |
||
82 | */ |
||
83 | if (lock->ctx) { |
||
6082 | serge | 84 | if (lock->ctx->acquired > 0) |
85 | lock->ctx->acquired--; |
||
86 | lock->ctx = NULL; |
||
5056 | serge | 87 | } |
88 | MutexUnlock(&lock->base); |
||
89 | } |
||
90 | |||
6082 | serge | 91 | static inline int __mutex_fastpath_lock_retval(atomic_t *count) |
5056 | serge | 92 | { |
6082 | serge | 93 | if (unlikely(atomic_dec_return(count) < 0)) |
94 | return -1; |
||
95 | else |
||
96 | return 0; |
||
97 | } |
||
98 | |||
99 | static __always_inline void |
||
100 | ww_mutex_set_context_fastpath(struct ww_mutex *lock, |
||
101 | struct ww_acquire_ctx *ctx) |
||
102 | { |
||
103 | u32 flags; |
||
104 | struct mutex_waiter *cur; |
||
105 | |||
5056 | serge | 106 | ww_mutex_lock_acquired(lock, ctx); |
6082 | serge | 107 | |
5056 | serge | 108 | lock->ctx = ctx; |
109 | |||
6082 | serge | 110 | /* |
111 | * The lock->ctx update should be visible on all cores before |
||
112 | * the atomic read is done, otherwise contended waiters might be |
||
113 | * missed. The contended waiters will either see ww_ctx == NULL |
||
114 | * and keep spinning, or it will acquire wait_lock, add itself |
||
115 | * to waiter list and sleep. |
||
116 | */ |
||
117 | smp_mb(); /* ^^^ */ |
||
118 | |||
119 | /* |
||
120 | * Check if lock is contended, if not there is nobody to wake up |
||
121 | */ |
||
122 | if (likely(atomic_read(&lock->base.count) == 0)) |
||
123 | return; |
||
124 | |||
125 | /* |
||
126 | * Uh oh, we raced in fastpath, wake up everyone in this case, |
||
127 | * so they can see the new lock->ctx. |
||
128 | */ |
||
129 | flags = safe_cli(); |
||
130 | list_for_each_entry(cur, &lock->base.wait_list, list) { |
||
9833 | turbocat | 131 | ((struct kos_appdata*)cur->task)->state = KOS_SLOT_STATE_RUNNING; |
6082 | serge | 132 | } |
133 | safe_sti(flags); |
||
5056 | serge | 134 | } |
135 | |||
9833 | turbocat | 136 | static __always_inline void |
6082 | serge | 137 | ww_mutex_set_context_slowpath(struct ww_mutex *lock, |
138 | struct ww_acquire_ctx *ctx) |
||
139 | { |
||
140 | struct mutex_waiter *cur; |
||
5056 | serge | 141 | |
142 | ww_mutex_lock_acquired(lock, ctx); |
||
143 | lock->ctx = ctx; |
||
144 | |||
6082 | serge | 145 | /* |
146 | * Give any possible sleeping processes the chance to wake up, |
||
147 | * so they can recheck if they have to back off. |
||
148 | */ |
||
149 | list_for_each_entry(cur, &lock->base.wait_list, list) { |
||
9833 | turbocat | 150 | ((struct kos_appdata*)cur->task)->state = KOS_SLOT_STATE_RUNNING; |
6082 | serge | 151 | } |
5056 | serge | 152 | } |
6082 | serge | 153 | |
154 | int __ww_mutex_lock_slowpath(struct ww_mutex *ww, struct ww_acquire_ctx *ctx) |
||
155 | { |
||
156 | struct mutex *lock; |
||
157 | struct mutex_waiter waiter; |
||
9833 | turbocat | 158 | struct kos_appdata *appdata; |
6082 | serge | 159 | u32 eflags; |
160 | int ret = 0; |
||
161 | |||
162 | lock = &ww->base; |
||
9833 | turbocat | 163 | appdata = GetCurrSlot(); |
164 | waiter.task = appdata; |
||
6082 | serge | 165 | |
166 | eflags = safe_cli(); |
||
167 | |||
168 | list_add_tail(&waiter.list, &lock->wait_list); |
||
169 | |||
170 | for(;;) |
||
171 | { |
||
172 | if( atomic_xchg(&lock->count, -1) == 1) |
||
173 | break; |
||
174 | |||
175 | if (ctx->acquired > 0) { |
||
176 | ret = __ww_mutex_lock_check_stamp(lock, ctx); |
||
177 | if (ret) |
||
178 | goto err; |
||
179 | }; |
||
9833 | turbocat | 180 | appdata->state = KOS_SLOT_STATE_SUSPENDED; |
6082 | serge | 181 | change_task(); |
182 | }; |
||
183 | |||
184 | if (likely(list_empty(&lock->wait_list))) |
||
185 | atomic_set(&lock->count, 0); |
||
186 | |||
187 | ww_mutex_set_context_slowpath(ww, ctx); |
||
188 | |||
189 | err: |
||
190 | list_del(&waiter.list); |
||
191 | safe_sti(eflags); |
||
192 | |||
193 | return ret; |
||
194 | } |
||
195 | |||
196 | |||
197 | int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
||
198 | { |
||
199 | int ret; |
||
200 | |||
201 | ret = __mutex_fastpath_lock_retval(&lock->base.count); |
||
202 | |||
203 | if (likely(!ret)) { |
||
204 | ww_mutex_set_context_fastpath(lock, ctx); |
||
205 | mutex_set_owner(&lock->base); |
||
206 | } else |
||
207 | ret = __ww_mutex_lock_slowpath(lock, ctx); |
||
208 | return ret; |
||
209 | } |
||
210 | |||
211 | |||
212 | int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
||
213 | { |
||
214 | int ret; |
||
215 | |||
216 | ret = __mutex_fastpath_lock_retval(&lock->base.count); |
||
217 | |||
218 | if (likely(!ret)) { |
||
219 | ww_mutex_set_context_fastpath(lock, ctx); |
||
220 | mutex_set_owner(&lock->base); |
||
221 | } else |
||
222 | ret = __ww_mutex_lock_slowpath(lock, ctx); |
||
223 | return ret; |
||
224 | }>=> |