Rev 5270 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
5056 | serge | 1 | /* |
2 | * Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance |
||
3 | * |
||
4 | * Original mutex implementation started by Ingo Molnar: |
||
5 | * |
||
6 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar |
||
7 | * |
||
8 | * Wound/wait implementation: |
||
9 | * Copyright (C) 2013 Canonical Ltd. |
||
10 | * |
||
11 | * This file contains the main data structure and API definitions. |
||
12 | */ |
||
13 | |||
14 | #ifndef __LINUX_WW_MUTEX_H |
||
15 | #define __LINUX_WW_MUTEX_H |
||
16 | |||
17 | #include |
||
18 | #include |
||
19 | |||
20 | struct ww_class { |
||
21 | atomic_long_t stamp; |
||
22 | struct lock_class_key acquire_key; |
||
23 | struct lock_class_key mutex_key; |
||
24 | const char *acquire_name; |
||
25 | const char *mutex_name; |
||
26 | }; |
||
27 | |||
28 | struct ww_acquire_ctx { |
||
29 | struct task_struct *task; |
||
30 | unsigned long stamp; |
||
31 | unsigned acquired; |
||
32 | #ifdef CONFIG_DEBUG_MUTEXES |
||
33 | unsigned done_acquire; |
||
34 | struct ww_class *ww_class; |
||
35 | struct ww_mutex *contending_lock; |
||
36 | #endif |
||
37 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
||
38 | struct lockdep_map dep_map; |
||
39 | #endif |
||
40 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH |
||
41 | unsigned deadlock_inject_interval; |
||
42 | unsigned deadlock_inject_countdown; |
||
43 | #endif |
||
44 | }; |
||
45 | |||
46 | struct ww_mutex { |
||
47 | struct mutex base; |
||
48 | struct ww_acquire_ctx *ctx; |
||
49 | #ifdef CONFIG_DEBUG_MUTEXES |
||
50 | struct ww_class *ww_class; |
||
51 | #endif |
||
52 | }; |
||
53 | |||
54 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
||
55 | # define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \ |
||
56 | , .ww_class = &ww_class |
||
57 | #else |
||
58 | # define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) |
||
59 | #endif |
||
60 | |||
61 | #define __WW_CLASS_INITIALIZER(ww_class) \ |
||
62 | { .stamp = ATOMIC_LONG_INIT(0) \ |
||
63 | , .acquire_name = #ww_class "_acquire" \ |
||
64 | , .mutex_name = #ww_class "_mutex" } |
||
65 | |||
66 | #define __WW_MUTEX_INITIALIZER(lockname, class) \ |
||
67 | { .base = { \__MUTEX_INITIALIZER(lockname) } \ |
||
68 | __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } |
||
69 | |||
70 | #define DEFINE_WW_CLASS(classname) \ |
||
71 | struct ww_class classname = __WW_CLASS_INITIALIZER(classname) |
||
72 | |||
73 | #define DEFINE_WW_MUTEX(mutexname, ww_class) \ |
||
74 | struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) |
||
75 | |||
76 | /** |
||
77 | * ww_mutex_init - initialize the w/w mutex |
||
78 | * @lock: the mutex to be initialized |
||
79 | * @ww_class: the w/w class the mutex should belong to |
||
80 | * |
||
81 | * Initialize the w/w mutex to unlocked state and associate it with the given |
||
82 | * class. |
||
83 | * |
||
84 | * It is not allowed to initialize an already locked mutex. |
||
85 | */ |
||
86 | static inline void ww_mutex_init(struct ww_mutex *lock, |
||
87 | struct ww_class *ww_class) |
||
88 | { |
||
89 | MutexInit(&lock->base); |
||
90 | lock->ctx = NULL; |
||
91 | #ifdef CONFIG_DEBUG_MUTEXES |
||
92 | lock->ww_class = ww_class; |
||
93 | #endif |
||
94 | } |
||
95 | |||
96 | /** |
||
97 | * ww_acquire_init - initialize a w/w acquire context |
||
98 | * @ctx: w/w acquire context to initialize |
||
99 | * @ww_class: w/w class of the context |
||
100 | * |
||
101 | * Initializes an context to acquire multiple mutexes of the given w/w class. |
||
102 | * |
||
103 | * Context-based w/w mutex acquiring can be done in any order whatsoever within |
||
104 | * a given lock class. Deadlocks will be detected and handled with the |
||
105 | * wait/wound logic. |
||
106 | * |
||
107 | * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can |
||
108 | * result in undetected deadlocks and is so forbidden. Mixing different contexts |
||
109 | * for the same w/w class when acquiring mutexes can also result in undetected |
||
110 | * deadlocks, and is hence also forbidden. Both types of abuse will be caught by |
||
111 | * enabling CONFIG_PROVE_LOCKING. |
||
112 | * |
||
113 | * Nesting of acquire contexts for _different_ w/w classes is possible, subject |
||
114 | * to the usual locking rules between different lock classes. |
||
115 | * |
||
116 | * An acquire context must be released with ww_acquire_fini by the same task |
||
117 | * before the memory is freed. It is recommended to allocate the context itself |
||
118 | * on the stack. |
||
119 | */ |
||
120 | static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, |
||
121 | struct ww_class *ww_class) |
||
122 | { |
||
6082 | serge | 123 | ctx->task = current; |
5056 | serge | 124 | ctx->stamp = atomic_long_inc_return(&ww_class->stamp); |
125 | ctx->acquired = 0; |
||
126 | #ifdef CONFIG_DEBUG_MUTEXES |
||
127 | ctx->ww_class = ww_class; |
||
128 | ctx->done_acquire = 0; |
||
129 | ctx->contending_lock = NULL; |
||
130 | #endif |
||
131 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
||
132 | debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); |
||
133 | lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, |
||
134 | &ww_class->acquire_key, 0); |
||
135 | mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); |
||
136 | #endif |
||
137 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH |
||
138 | ctx->deadlock_inject_interval = 1; |
||
139 | ctx->deadlock_inject_countdown = ctx->stamp & 0xf; |
||
140 | #endif |
||
141 | } |
||
142 | |||
143 | /** |
||
144 | * ww_acquire_done - marks the end of the acquire phase |
||
145 | * @ctx: the acquire context |
||
146 | * |
||
147 | * Marks the end of the acquire phase, any further w/w mutex lock calls using |
||
148 | * this context are forbidden. |
||
149 | * |
||
150 | * Calling this function is optional, it is just useful to document w/w mutex |
||
151 | * code and clearly designated the acquire phase from actually using the locked |
||
152 | * data structures. |
||
153 | */ |
||
154 | static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) |
||
155 | { |
||
156 | #ifdef CONFIG_DEBUG_MUTEXES |
||
157 | lockdep_assert_held(ctx); |
||
158 | |||
159 | DEBUG_LOCKS_WARN_ON(ctx->done_acquire); |
||
160 | ctx->done_acquire = 1; |
||
161 | #endif |
||
162 | } |
||
163 | |||
164 | /** |
||
165 | * ww_acquire_fini - releases a w/w acquire context |
||
166 | * @ctx: the acquire context to free |
||
167 | * |
||
168 | * Releases a w/w acquire context. This must be called _after_ all acquired w/w |
||
169 | * mutexes have been released with ww_mutex_unlock. |
||
170 | */ |
||
171 | static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) |
||
172 | { |
||
173 | #ifdef CONFIG_DEBUG_MUTEXES |
||
174 | mutex_release(&ctx->dep_map, 0, _THIS_IP_); |
||
175 | |||
176 | DEBUG_LOCKS_WARN_ON(ctx->acquired); |
||
177 | if (!config_enabled(CONFIG_PROVE_LOCKING)) |
||
178 | /* |
||
179 | * lockdep will normally handle this, |
||
180 | * but fail without anyway |
||
181 | */ |
||
182 | ctx->done_acquire = 1; |
||
183 | |||
184 | if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC)) |
||
185 | /* ensure ww_acquire_fini will still fail if called twice */ |
||
186 | ctx->acquired = ~0U; |
||
187 | #endif |
||
188 | } |
||
189 | |||
190 | extern int __must_check __ww_mutex_lock(struct ww_mutex *lock, |
||
191 | struct ww_acquire_ctx *ctx); |
||
192 | extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock, |
||
193 | struct ww_acquire_ctx *ctx); |
||
194 | |||
195 | /** |
||
196 | * ww_mutex_lock - acquire the w/w mutex |
||
197 | * @lock: the mutex to be acquired |
||
198 | * @ctx: w/w acquire context, or NULL to acquire only a single lock. |
||
199 | * |
||
200 | * Lock the w/w mutex exclusively for this task. |
||
201 | * |
||
202 | * Deadlocks within a given w/w class of locks are detected and handled with the |
||
203 | * wait/wound algorithm. If the lock isn't immediately avaiable this function |
||
204 | * will either sleep until it is (wait case). Or it selects the current context |
||
205 | * for backing off by returning -EDEADLK (wound case). Trying to acquire the |
||
206 | * same lock with the same context twice is also detected and signalled by |
||
207 | * returning -EALREADY. Returns 0 if the mutex was successfully acquired. |
||
208 | * |
||
209 | * In the wound case the caller must release all currently held w/w mutexes for |
||
210 | * the given context and then wait for this contending lock to be available by |
||
211 | * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this |
||
212 | * lock and proceed with trying to acquire further w/w mutexes (e.g. when |
||
213 | * scanning through lru lists trying to free resources). |
||
214 | * |
||
215 | * The mutex must later on be released by the same task that |
||
216 | * acquired it. The task may not exit without first unlocking the mutex. Also, |
||
217 | * kernel memory where the mutex resides must not be freed with the mutex still |
||
218 | * locked. The mutex must first be initialized (or statically defined) before it |
||
219 | * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be |
||
220 | * of the same w/w lock class as was used to initialize the acquire context. |
||
221 | * |
||
222 | * A mutex acquired with this function must be released with ww_mutex_unlock. |
||
223 | */ |
||
224 | static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
||
225 | { |
||
226 | if (ctx) |
||
227 | return __ww_mutex_lock(lock, ctx); |
||
228 | |||
229 | mutex_lock(&lock->base); |
||
230 | return 0; |
||
231 | } |
||
232 | |||
233 | /** |
||
234 | * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible |
||
235 | * @lock: the mutex to be acquired |
||
236 | * @ctx: w/w acquire context |
||
237 | * |
||
238 | * Lock the w/w mutex exclusively for this task. |
||
239 | * |
||
240 | * Deadlocks within a given w/w class of locks are detected and handled with the |
||
241 | * wait/wound algorithm. If the lock isn't immediately avaiable this function |
||
242 | * will either sleep until it is (wait case). Or it selects the current context |
||
243 | * for backing off by returning -EDEADLK (wound case). Trying to acquire the |
||
244 | * same lock with the same context twice is also detected and signalled by |
||
245 | * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a |
||
246 | * signal arrives while waiting for the lock then this function returns -EINTR. |
||
247 | * |
||
248 | * In the wound case the caller must release all currently held w/w mutexes for |
||
249 | * the given context and then wait for this contending lock to be available by |
||
250 | * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to |
||
251 | * not acquire this lock and proceed with trying to acquire further w/w mutexes |
||
252 | * (e.g. when scanning through lru lists trying to free resources). |
||
253 | * |
||
254 | * The mutex must later on be released by the same task that |
||
255 | * acquired it. The task may not exit without first unlocking the mutex. Also, |
||
256 | * kernel memory where the mutex resides must not be freed with the mutex still |
||
257 | * locked. The mutex must first be initialized (or statically defined) before it |
||
258 | * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be |
||
259 | * of the same w/w lock class as was used to initialize the acquire context. |
||
260 | * |
||
261 | * A mutex acquired with this function must be released with ww_mutex_unlock. |
||
262 | */ |
||
263 | static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, |
||
264 | struct ww_acquire_ctx *ctx) |
||
265 | { |
||
266 | if (ctx) |
||
267 | return __ww_mutex_lock_interruptible(lock, ctx); |
||
268 | else |
||
269 | return mutex_lock_interruptible(&lock->base); |
||
270 | } |
||
271 | |||
272 | /** |
||
273 | * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex |
||
274 | * @lock: the mutex to be acquired |
||
275 | * @ctx: w/w acquire context |
||
276 | * |
||
277 | * Acquires a w/w mutex with the given context after a wound case. This function |
||
278 | * will sleep until the lock becomes available. |
||
279 | * |
||
280 | * The caller must have released all w/w mutexes already acquired with the |
||
281 | * context and then call this function on the contended lock. |
||
282 | * |
||
283 | * Afterwards the caller may continue to (re)acquire the other w/w mutexes it |
||
284 | * needs with ww_mutex_lock. Note that the -EALREADY return code from |
||
285 | * ww_mutex_lock can be used to avoid locking this contended mutex twice. |
||
286 | * |
||
287 | * It is forbidden to call this function with any other w/w mutexes associated |
||
288 | * with the context held. It is forbidden to call this on anything else than the |
||
289 | * contending mutex. |
||
290 | * |
||
291 | * Note that the slowpath lock acquiring can also be done by calling |
||
292 | * ww_mutex_lock directly. This function here is simply to help w/w mutex |
||
293 | * locking code readability by clearly denoting the slowpath. |
||
294 | */ |
||
295 | static inline void |
||
296 | ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
||
297 | { |
||
298 | int ret; |
||
299 | #ifdef CONFIG_DEBUG_MUTEXES |
||
300 | DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); |
||
301 | #endif |
||
302 | ret = ww_mutex_lock(lock, ctx); |
||
303 | (void)ret; |
||
304 | } |
||
305 | |||
306 | /** |
||
307 | * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible |
||
308 | * @lock: the mutex to be acquired |
||
309 | * @ctx: w/w acquire context |
||
310 | * |
||
311 | * Acquires a w/w mutex with the given context after a wound case. This function |
||
312 | * will sleep until the lock becomes available and returns 0 when the lock has |
||
313 | * been acquired. If a signal arrives while waiting for the lock then this |
||
314 | * function returns -EINTR. |
||
315 | * |
||
316 | * The caller must have released all w/w mutexes already acquired with the |
||
317 | * context and then call this function on the contended lock. |
||
318 | * |
||
319 | * Afterwards the caller may continue to (re)acquire the other w/w mutexes it |
||
320 | * needs with ww_mutex_lock. Note that the -EALREADY return code from |
||
321 | * ww_mutex_lock can be used to avoid locking this contended mutex twice. |
||
322 | * |
||
323 | * It is forbidden to call this function with any other w/w mutexes associated |
||
324 | * with the given context held. It is forbidden to call this on anything else |
||
325 | * than the contending mutex. |
||
326 | * |
||
327 | * Note that the slowpath lock acquiring can also be done by calling |
||
328 | * ww_mutex_lock_interruptible directly. This function here is simply to help |
||
329 | * w/w mutex locking code readability by clearly denoting the slowpath. |
||
330 | */ |
||
331 | static inline int __must_check |
||
332 | ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, |
||
333 | struct ww_acquire_ctx *ctx) |
||
334 | { |
||
335 | #ifdef CONFIG_DEBUG_MUTEXES |
||
336 | DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); |
||
337 | #endif |
||
338 | return ww_mutex_lock_interruptible(lock, ctx); |
||
339 | } |
||
340 | |||
341 | extern void ww_mutex_unlock(struct ww_mutex *lock); |
||
342 | |||
343 | /** |
||
344 | * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context |
||
345 | * @lock: mutex to lock |
||
346 | * |
||
347 | * Trylocks a mutex without acquire context, so no deadlock detection is |
||
348 | * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. |
||
349 | */ |
||
350 | static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) |
||
351 | { |
||
352 | return mutex_trylock(&lock->base); |
||
353 | } |
||
354 | |||
355 | /*** |
||
356 | * ww_mutex_destroy - mark a w/w mutex unusable |
||
357 | * @lock: the mutex to be destroyed |
||
358 | * |
||
359 | * This function marks the mutex uninitialized, and any subsequent |
||
360 | * use of the mutex is forbidden. The mutex must not be locked when |
||
361 | * this function is called. |
||
362 | */ |
||
363 | static inline void ww_mutex_destroy(struct ww_mutex *lock) |
||
364 | { |
||
365 | mutex_destroy(&lock->base); |
||
366 | } |
||
367 | |||
368 | /** |
||
369 | * ww_mutex_is_locked - is the w/w mutex locked |
||
370 | * @lock: the mutex to be queried |
||
371 | * |
||
372 | * Returns 1 if the mutex is locked, 0 if unlocked. |
||
373 | */ |
||
374 | static inline bool ww_mutex_is_locked(struct ww_mutex *lock) |
||
375 | { |
||
376 | return mutex_is_locked(&lock->base); |
||
377 | } |
||
378 | |||
379 | #endif |