Subversion Repositories Kolibri OS

Rev

Rev 5056 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5056 Rev 5270
1
/*
1
/*
2
 * Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance
2
 * Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance
3
 *
3
 *
4
 * Original mutex implementation started by Ingo Molnar:
4
 * Original mutex implementation started by Ingo Molnar:
5
 *
5
 *
6
 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar 
6
 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar 
7
 *
7
 *
8
 * Wound/wait implementation:
8
 * Wound/wait implementation:
9
 *  Copyright (C) 2013 Canonical Ltd.
9
 *  Copyright (C) 2013 Canonical Ltd.
10
 *
10
 *
11
 * This file contains the main data structure and API definitions.
11
 * This file contains the main data structure and API definitions.
12
 */
12
 */
13
 
13
 
14
#ifndef __LINUX_WW_MUTEX_H
14
#ifndef __LINUX_WW_MUTEX_H
15
#define __LINUX_WW_MUTEX_H
15
#define __LINUX_WW_MUTEX_H
16
 
16
 
17
#include 
17
#include 
18
#include 
18
#include 
19
 
-
 
20
#define current (void*)GetPid()
-
 
21
 
19
 
22
struct ww_class {
20
struct ww_class {
23
	atomic_long_t stamp;
21
	atomic_long_t stamp;
24
	struct lock_class_key acquire_key;
22
	struct lock_class_key acquire_key;
25
	struct lock_class_key mutex_key;
23
	struct lock_class_key mutex_key;
26
	const char *acquire_name;
24
	const char *acquire_name;
27
	const char *mutex_name;
25
	const char *mutex_name;
28
};
26
};
29
 
27
 
30
struct ww_acquire_ctx {
28
struct ww_acquire_ctx {
31
	struct task_struct *task;
29
	struct task_struct *task;
32
	unsigned long stamp;
30
	unsigned long stamp;
33
	unsigned acquired;
31
	unsigned acquired;
34
#ifdef CONFIG_DEBUG_MUTEXES
32
#ifdef CONFIG_DEBUG_MUTEXES
35
	unsigned done_acquire;
33
	unsigned done_acquire;
36
	struct ww_class *ww_class;
34
	struct ww_class *ww_class;
37
	struct ww_mutex *contending_lock;
35
	struct ww_mutex *contending_lock;
38
#endif
36
#endif
39
#ifdef CONFIG_DEBUG_LOCK_ALLOC
37
#ifdef CONFIG_DEBUG_LOCK_ALLOC
40
	struct lockdep_map dep_map;
38
	struct lockdep_map dep_map;
41
#endif
39
#endif
42
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
40
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
43
	unsigned deadlock_inject_interval;
41
	unsigned deadlock_inject_interval;
44
	unsigned deadlock_inject_countdown;
42
	unsigned deadlock_inject_countdown;
45
#endif
43
#endif
46
};
44
};
47
 
45
 
48
struct ww_mutex {
46
struct ww_mutex {
49
	struct mutex base;
47
	struct mutex base;
50
	struct ww_acquire_ctx *ctx;
48
	struct ww_acquire_ctx *ctx;
51
#ifdef CONFIG_DEBUG_MUTEXES
49
#ifdef CONFIG_DEBUG_MUTEXES
52
	struct ww_class *ww_class;
50
	struct ww_class *ww_class;
53
#endif
51
#endif
54
};
52
};
55
 
53
 
56
#ifdef CONFIG_DEBUG_LOCK_ALLOC
54
#ifdef CONFIG_DEBUG_LOCK_ALLOC
57
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \
55
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \
58
		, .ww_class = &ww_class
56
		, .ww_class = &ww_class
59
#else
57
#else
60
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class)
58
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class)
61
#endif
59
#endif
62
 
60
 
63
#define __WW_CLASS_INITIALIZER(ww_class) \
61
#define __WW_CLASS_INITIALIZER(ww_class) \
64
		{ .stamp = ATOMIC_LONG_INIT(0) \
62
		{ .stamp = ATOMIC_LONG_INIT(0) \
65
		, .acquire_name = #ww_class "_acquire" \
63
		, .acquire_name = #ww_class "_acquire" \
66
		, .mutex_name = #ww_class "_mutex" }
64
		, .mutex_name = #ww_class "_mutex" }
67
 
65
 
68
#define __WW_MUTEX_INITIALIZER(lockname, class) \
66
#define __WW_MUTEX_INITIALIZER(lockname, class) \
69
		{ .base = { \__MUTEX_INITIALIZER(lockname) } \
67
		{ .base = { \__MUTEX_INITIALIZER(lockname) } \
70
		__WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
68
		__WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
71
 
69
 
72
#define DEFINE_WW_CLASS(classname) \
70
#define DEFINE_WW_CLASS(classname) \
73
	struct ww_class classname = __WW_CLASS_INITIALIZER(classname)
71
	struct ww_class classname = __WW_CLASS_INITIALIZER(classname)
74
 
72
 
75
#define DEFINE_WW_MUTEX(mutexname, ww_class) \
73
#define DEFINE_WW_MUTEX(mutexname, ww_class) \
76
	struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
74
	struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
77
 
75
 
78
/**
76
/**
79
 * ww_mutex_init - initialize the w/w mutex
77
 * ww_mutex_init - initialize the w/w mutex
80
 * @lock: the mutex to be initialized
78
 * @lock: the mutex to be initialized
81
 * @ww_class: the w/w class the mutex should belong to
79
 * @ww_class: the w/w class the mutex should belong to
82
 *
80
 *
83
 * Initialize the w/w mutex to unlocked state and associate it with the given
81
 * Initialize the w/w mutex to unlocked state and associate it with the given
84
 * class.
82
 * class.
85
 *
83
 *
86
 * It is not allowed to initialize an already locked mutex.
84
 * It is not allowed to initialize an already locked mutex.
87
 */
85
 */
88
static inline void ww_mutex_init(struct ww_mutex *lock,
86
static inline void ww_mutex_init(struct ww_mutex *lock,
89
				 struct ww_class *ww_class)
87
				 struct ww_class *ww_class)
90
{
88
{
91
    MutexInit(&lock->base);
89
    MutexInit(&lock->base);
92
	lock->ctx = NULL;
90
	lock->ctx = NULL;
93
#ifdef CONFIG_DEBUG_MUTEXES
91
#ifdef CONFIG_DEBUG_MUTEXES
94
	lock->ww_class = ww_class;
92
	lock->ww_class = ww_class;
95
#endif
93
#endif
96
}
94
}
97
 
95
 
98
/**
96
/**
99
 * ww_acquire_init - initialize a w/w acquire context
97
 * ww_acquire_init - initialize a w/w acquire context
100
 * @ctx: w/w acquire context to initialize
98
 * @ctx: w/w acquire context to initialize
101
 * @ww_class: w/w class of the context
99
 * @ww_class: w/w class of the context
102
 *
100
 *
103
 * Initializes an context to acquire multiple mutexes of the given w/w class.
101
 * Initializes an context to acquire multiple mutexes of the given w/w class.
104
 *
102
 *
105
 * Context-based w/w mutex acquiring can be done in any order whatsoever within
103
 * Context-based w/w mutex acquiring can be done in any order whatsoever within
106
 * a given lock class. Deadlocks will be detected and handled with the
104
 * a given lock class. Deadlocks will be detected and handled with the
107
 * wait/wound logic.
105
 * wait/wound logic.
108
 *
106
 *
109
 * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
107
 * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
110
 * result in undetected deadlocks and is so forbidden. Mixing different contexts
108
 * result in undetected deadlocks and is so forbidden. Mixing different contexts
111
 * for the same w/w class when acquiring mutexes can also result in undetected
109
 * for the same w/w class when acquiring mutexes can also result in undetected
112
 * deadlocks, and is hence also forbidden. Both types of abuse will be caught by
110
 * deadlocks, and is hence also forbidden. Both types of abuse will be caught by
113
 * enabling CONFIG_PROVE_LOCKING.
111
 * enabling CONFIG_PROVE_LOCKING.
114
 *
112
 *
115
 * Nesting of acquire contexts for _different_ w/w classes is possible, subject
113
 * Nesting of acquire contexts for _different_ w/w classes is possible, subject
116
 * to the usual locking rules between different lock classes.
114
 * to the usual locking rules between different lock classes.
117
 *
115
 *
118
 * An acquire context must be released with ww_acquire_fini by the same task
116
 * An acquire context must be released with ww_acquire_fini by the same task
119
 * before the memory is freed. It is recommended to allocate the context itself
117
 * before the memory is freed. It is recommended to allocate the context itself
120
 * on the stack.
118
 * on the stack.
121
 */
119
 */
122
static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
120
static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
123
				   struct ww_class *ww_class)
121
				   struct ww_class *ww_class)
124
{
122
{
125
    ctx->task = current;
123
    ctx->task = current;
126
	ctx->stamp = atomic_long_inc_return(&ww_class->stamp);
124
	ctx->stamp = atomic_long_inc_return(&ww_class->stamp);
127
	ctx->acquired = 0;
125
	ctx->acquired = 0;
128
#ifdef CONFIG_DEBUG_MUTEXES
126
#ifdef CONFIG_DEBUG_MUTEXES
129
	ctx->ww_class = ww_class;
127
	ctx->ww_class = ww_class;
130
	ctx->done_acquire = 0;
128
	ctx->done_acquire = 0;
131
	ctx->contending_lock = NULL;
129
	ctx->contending_lock = NULL;
132
#endif
130
#endif
133
#ifdef CONFIG_DEBUG_LOCK_ALLOC
131
#ifdef CONFIG_DEBUG_LOCK_ALLOC
134
	debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
132
	debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
135
	lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
133
	lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
136
			 &ww_class->acquire_key, 0);
134
			 &ww_class->acquire_key, 0);
137
	mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
135
	mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
138
#endif
136
#endif
139
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
137
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
140
	ctx->deadlock_inject_interval = 1;
138
	ctx->deadlock_inject_interval = 1;
141
	ctx->deadlock_inject_countdown = ctx->stamp & 0xf;
139
	ctx->deadlock_inject_countdown = ctx->stamp & 0xf;
142
#endif
140
#endif
143
}
141
}
144
 
142
 
145
/**
143
/**
146
 * ww_acquire_done - marks the end of the acquire phase
144
 * ww_acquire_done - marks the end of the acquire phase
147
 * @ctx: the acquire context
145
 * @ctx: the acquire context
148
 *
146
 *
149
 * Marks the end of the acquire phase, any further w/w mutex lock calls using
147
 * Marks the end of the acquire phase, any further w/w mutex lock calls using
150
 * this context are forbidden.
148
 * this context are forbidden.
151
 *
149
 *
152
 * Calling this function is optional, it is just useful to document w/w mutex
150
 * Calling this function is optional, it is just useful to document w/w mutex
153
 * code and clearly designated the acquire phase from actually using the locked
151
 * code and clearly designated the acquire phase from actually using the locked
154
 * data structures.
152
 * data structures.
155
 */
153
 */
156
static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
154
static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
157
{
155
{
158
#ifdef CONFIG_DEBUG_MUTEXES
156
#ifdef CONFIG_DEBUG_MUTEXES
159
	lockdep_assert_held(ctx);
157
	lockdep_assert_held(ctx);
160
 
158
 
161
	DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
159
	DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
162
	ctx->done_acquire = 1;
160
	ctx->done_acquire = 1;
163
#endif
161
#endif
164
}
162
}
165
 
163
 
166
/**
164
/**
167
 * ww_acquire_fini - releases a w/w acquire context
165
 * ww_acquire_fini - releases a w/w acquire context
168
 * @ctx: the acquire context to free
166
 * @ctx: the acquire context to free
169
 *
167
 *
170
 * Releases a w/w acquire context. This must be called _after_ all acquired w/w
168
 * Releases a w/w acquire context. This must be called _after_ all acquired w/w
171
 * mutexes have been released with ww_mutex_unlock.
169
 * mutexes have been released with ww_mutex_unlock.
172
 */
170
 */
173
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
171
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
174
{
172
{
175
#ifdef CONFIG_DEBUG_MUTEXES
173
#ifdef CONFIG_DEBUG_MUTEXES
176
	mutex_release(&ctx->dep_map, 0, _THIS_IP_);
174
	mutex_release(&ctx->dep_map, 0, _THIS_IP_);
177
 
175
 
178
	DEBUG_LOCKS_WARN_ON(ctx->acquired);
176
	DEBUG_LOCKS_WARN_ON(ctx->acquired);
179
	if (!config_enabled(CONFIG_PROVE_LOCKING))
177
	if (!config_enabled(CONFIG_PROVE_LOCKING))
180
		/*
178
		/*
181
		 * lockdep will normally handle this,
179
		 * lockdep will normally handle this,
182
		 * but fail without anyway
180
		 * but fail without anyway
183
		 */
181
		 */
184
		ctx->done_acquire = 1;
182
		ctx->done_acquire = 1;
185
 
183
 
186
	if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC))
184
	if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC))
187
		/* ensure ww_acquire_fini will still fail if called twice */
185
		/* ensure ww_acquire_fini will still fail if called twice */
188
		ctx->acquired = ~0U;
186
		ctx->acquired = ~0U;
189
#endif
187
#endif
190
}
188
}
191
 
189
 
192
extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
190
extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
193
					struct ww_acquire_ctx *ctx);
191
					struct ww_acquire_ctx *ctx);
194
extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
192
extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
195
						      struct ww_acquire_ctx *ctx);
193
						      struct ww_acquire_ctx *ctx);
196
 
194
 
197
/**
195
/**
198
 * ww_mutex_lock - acquire the w/w mutex
196
 * ww_mutex_lock - acquire the w/w mutex
199
 * @lock: the mutex to be acquired
197
 * @lock: the mutex to be acquired
200
 * @ctx: w/w acquire context, or NULL to acquire only a single lock.
198
 * @ctx: w/w acquire context, or NULL to acquire only a single lock.
201
 *
199
 *
202
 * Lock the w/w mutex exclusively for this task.
200
 * Lock the w/w mutex exclusively for this task.
203
 *
201
 *
204
 * Deadlocks within a given w/w class of locks are detected and handled with the
202
 * Deadlocks within a given w/w class of locks are detected and handled with the
205
 * wait/wound algorithm. If the lock isn't immediately avaiable this function
203
 * wait/wound algorithm. If the lock isn't immediately avaiable this function
206
 * will either sleep until it is (wait case). Or it selects the current context
204
 * will either sleep until it is (wait case). Or it selects the current context
207
 * for backing off by returning -EDEADLK (wound case). Trying to acquire the
205
 * for backing off by returning -EDEADLK (wound case). Trying to acquire the
208
 * same lock with the same context twice is also detected and signalled by
206
 * same lock with the same context twice is also detected and signalled by
209
 * returning -EALREADY. Returns 0 if the mutex was successfully acquired.
207
 * returning -EALREADY. Returns 0 if the mutex was successfully acquired.
210
 *
208
 *
211
 * In the wound case the caller must release all currently held w/w mutexes for
209
 * In the wound case the caller must release all currently held w/w mutexes for
212
 * the given context and then wait for this contending lock to be available by
210
 * the given context and then wait for this contending lock to be available by
213
 * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
211
 * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
214
 * lock and proceed with trying to acquire further w/w mutexes (e.g. when
212
 * lock and proceed with trying to acquire further w/w mutexes (e.g. when
215
 * scanning through lru lists trying to free resources).
213
 * scanning through lru lists trying to free resources).
216
 *
214
 *
217
 * The mutex must later on be released by the same task that
215
 * The mutex must later on be released by the same task that
218
 * acquired it. The task may not exit without first unlocking the mutex. Also,
216
 * acquired it. The task may not exit without first unlocking the mutex. Also,
219
 * kernel memory where the mutex resides must not be freed with the mutex still
217
 * kernel memory where the mutex resides must not be freed with the mutex still
220
 * locked. The mutex must first be initialized (or statically defined) before it
218
 * locked. The mutex must first be initialized (or statically defined) before it
221
 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
219
 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
222
 * of the same w/w lock class as was used to initialize the acquire context.
220
 * of the same w/w lock class as was used to initialize the acquire context.
223
 *
221
 *
224
 * A mutex acquired with this function must be released with ww_mutex_unlock.
222
 * A mutex acquired with this function must be released with ww_mutex_unlock.
225
 */
223
 */
226
static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
224
static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
227
{
225
{
228
	if (ctx)
226
	if (ctx)
229
		return __ww_mutex_lock(lock, ctx);
227
		return __ww_mutex_lock(lock, ctx);
230
 
228
 
231
	mutex_lock(&lock->base);
229
	mutex_lock(&lock->base);
232
	return 0;
230
	return 0;
233
}
231
}
234
 
232
 
235
/**
233
/**
236
 * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
234
 * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
237
 * @lock: the mutex to be acquired
235
 * @lock: the mutex to be acquired
238
 * @ctx: w/w acquire context
236
 * @ctx: w/w acquire context
239
 *
237
 *
240
 * Lock the w/w mutex exclusively for this task.
238
 * Lock the w/w mutex exclusively for this task.
241
 *
239
 *
242
 * Deadlocks within a given w/w class of locks are detected and handled with the
240
 * Deadlocks within a given w/w class of locks are detected and handled with the
243
 * wait/wound algorithm. If the lock isn't immediately avaiable this function
241
 * wait/wound algorithm. If the lock isn't immediately avaiable this function
244
 * will either sleep until it is (wait case). Or it selects the current context
242
 * will either sleep until it is (wait case). Or it selects the current context
245
 * for backing off by returning -EDEADLK (wound case). Trying to acquire the
243
 * for backing off by returning -EDEADLK (wound case). Trying to acquire the
246
 * same lock with the same context twice is also detected and signalled by
244
 * same lock with the same context twice is also detected and signalled by
247
 * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
245
 * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
248
 * signal arrives while waiting for the lock then this function returns -EINTR.
246
 * signal arrives while waiting for the lock then this function returns -EINTR.
249
 *
247
 *
250
 * In the wound case the caller must release all currently held w/w mutexes for
248
 * In the wound case the caller must release all currently held w/w mutexes for
251
 * the given context and then wait for this contending lock to be available by
249
 * the given context and then wait for this contending lock to be available by
252
 * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
250
 * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
253
 * not acquire this lock and proceed with trying to acquire further w/w mutexes
251
 * not acquire this lock and proceed with trying to acquire further w/w mutexes
254
 * (e.g. when scanning through lru lists trying to free resources).
252
 * (e.g. when scanning through lru lists trying to free resources).
255
 *
253
 *
256
 * The mutex must later on be released by the same task that
254
 * The mutex must later on be released by the same task that
257
 * acquired it. The task may not exit without first unlocking the mutex. Also,
255
 * acquired it. The task may not exit without first unlocking the mutex. Also,
258
 * kernel memory where the mutex resides must not be freed with the mutex still
256
 * kernel memory where the mutex resides must not be freed with the mutex still
259
 * locked. The mutex must first be initialized (or statically defined) before it
257
 * locked. The mutex must first be initialized (or statically defined) before it
260
 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
258
 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
261
 * of the same w/w lock class as was used to initialize the acquire context.
259
 * of the same w/w lock class as was used to initialize the acquire context.
262
 *
260
 *
263
 * A mutex acquired with this function must be released with ww_mutex_unlock.
261
 * A mutex acquired with this function must be released with ww_mutex_unlock.
264
 */
262
 */
265
static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
263
static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
266
							   struct ww_acquire_ctx *ctx)
264
							   struct ww_acquire_ctx *ctx)
267
{
265
{
268
	if (ctx)
266
	if (ctx)
269
		return __ww_mutex_lock_interruptible(lock, ctx);
267
		return __ww_mutex_lock_interruptible(lock, ctx);
270
	else
268
	else
271
		return mutex_lock_interruptible(&lock->base);
269
		return mutex_lock_interruptible(&lock->base);
272
}
270
}
273
 
271
 
274
/**
272
/**
275
 * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
273
 * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
276
 * @lock: the mutex to be acquired
274
 * @lock: the mutex to be acquired
277
 * @ctx: w/w acquire context
275
 * @ctx: w/w acquire context
278
 *
276
 *
279
 * Acquires a w/w mutex with the given context after a wound case. This function
277
 * Acquires a w/w mutex with the given context after a wound case. This function
280
 * will sleep until the lock becomes available.
278
 * will sleep until the lock becomes available.
281
 *
279
 *
282
 * The caller must have released all w/w mutexes already acquired with the
280
 * The caller must have released all w/w mutexes already acquired with the
283
 * context and then call this function on the contended lock.
281
 * context and then call this function on the contended lock.
284
 *
282
 *
285
 * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
283
 * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
286
 * needs with ww_mutex_lock. Note that the -EALREADY return code from
284
 * needs with ww_mutex_lock. Note that the -EALREADY return code from
287
 * ww_mutex_lock can be used to avoid locking this contended mutex twice.
285
 * ww_mutex_lock can be used to avoid locking this contended mutex twice.
288
 *
286
 *
289
 * It is forbidden to call this function with any other w/w mutexes associated
287
 * It is forbidden to call this function with any other w/w mutexes associated
290
 * with the context held. It is forbidden to call this on anything else than the
288
 * with the context held. It is forbidden to call this on anything else than the
291
 * contending mutex.
289
 * contending mutex.
292
 *
290
 *
293
 * Note that the slowpath lock acquiring can also be done by calling
291
 * Note that the slowpath lock acquiring can also be done by calling
294
 * ww_mutex_lock directly. This function here is simply to help w/w mutex
292
 * ww_mutex_lock directly. This function here is simply to help w/w mutex
295
 * locking code readability by clearly denoting the slowpath.
293
 * locking code readability by clearly denoting the slowpath.
296
 */
294
 */
297
static inline void
295
static inline void
298
ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
296
ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
299
{
297
{
300
	int ret;
298
	int ret;
301
#ifdef CONFIG_DEBUG_MUTEXES
299
#ifdef CONFIG_DEBUG_MUTEXES
302
	DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
300
	DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
303
#endif
301
#endif
304
	ret = ww_mutex_lock(lock, ctx);
302
	ret = ww_mutex_lock(lock, ctx);
305
	(void)ret;
303
	(void)ret;
306
}
304
}
307
 
305
 
308
/**
306
/**
309
 * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible
307
 * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible
310
 * @lock: the mutex to be acquired
308
 * @lock: the mutex to be acquired
311
 * @ctx: w/w acquire context
309
 * @ctx: w/w acquire context
312
 *
310
 *
313
 * Acquires a w/w mutex with the given context after a wound case. This function
311
 * Acquires a w/w mutex with the given context after a wound case. This function
314
 * will sleep until the lock becomes available and returns 0 when the lock has
312
 * will sleep until the lock becomes available and returns 0 when the lock has
315
 * been acquired. If a signal arrives while waiting for the lock then this
313
 * been acquired. If a signal arrives while waiting for the lock then this
316
 * function returns -EINTR.
314
 * function returns -EINTR.
317
 *
315
 *
318
 * The caller must have released all w/w mutexes already acquired with the
316
 * The caller must have released all w/w mutexes already acquired with the
319
 * context and then call this function on the contended lock.
317
 * context and then call this function on the contended lock.
320
 *
318
 *
321
 * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
319
 * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
322
 * needs with ww_mutex_lock. Note that the -EALREADY return code from
320
 * needs with ww_mutex_lock. Note that the -EALREADY return code from
323
 * ww_mutex_lock can be used to avoid locking this contended mutex twice.
321
 * ww_mutex_lock can be used to avoid locking this contended mutex twice.
324
 *
322
 *
325
 * It is forbidden to call this function with any other w/w mutexes associated
323
 * It is forbidden to call this function with any other w/w mutexes associated
326
 * with the given context held. It is forbidden to call this on anything else
324
 * with the given context held. It is forbidden to call this on anything else
327
 * than the contending mutex.
325
 * than the contending mutex.
328
 *
326
 *
329
 * Note that the slowpath lock acquiring can also be done by calling
327
 * Note that the slowpath lock acquiring can also be done by calling
330
 * ww_mutex_lock_interruptible directly. This function here is simply to help
328
 * ww_mutex_lock_interruptible directly. This function here is simply to help
331
 * w/w mutex locking code readability by clearly denoting the slowpath.
329
 * w/w mutex locking code readability by clearly denoting the slowpath.
332
 */
330
 */
333
static inline int __must_check
331
static inline int __must_check
334
ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
332
ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
335
				 struct ww_acquire_ctx *ctx)
333
				 struct ww_acquire_ctx *ctx)
336
{
334
{
337
#ifdef CONFIG_DEBUG_MUTEXES
335
#ifdef CONFIG_DEBUG_MUTEXES
338
	DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
336
	DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
339
#endif
337
#endif
340
	return ww_mutex_lock_interruptible(lock, ctx);
338
	return ww_mutex_lock_interruptible(lock, ctx);
341
}
339
}
342
 
340
 
343
extern void ww_mutex_unlock(struct ww_mutex *lock);
341
extern void ww_mutex_unlock(struct ww_mutex *lock);
344
 
342
 
345
/**
343
/**
346
 * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context
344
 * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context
347
 * @lock: mutex to lock
345
 * @lock: mutex to lock
348
 *
346
 *
349
 * Trylocks a mutex without acquire context, so no deadlock detection is
347
 * Trylocks a mutex without acquire context, so no deadlock detection is
350
 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
348
 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
351
 */
349
 */
352
static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
350
static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
353
{
351
{
354
	return mutex_trylock(&lock->base);
352
	return mutex_trylock(&lock->base);
355
}
353
}
356
 
354
 
357
/***
355
/***
358
 * ww_mutex_destroy - mark a w/w mutex unusable
356
 * ww_mutex_destroy - mark a w/w mutex unusable
359
 * @lock: the mutex to be destroyed
357
 * @lock: the mutex to be destroyed
360
 *
358
 *
361
 * This function marks the mutex uninitialized, and any subsequent
359
 * This function marks the mutex uninitialized, and any subsequent
362
 * use of the mutex is forbidden. The mutex must not be locked when
360
 * use of the mutex is forbidden. The mutex must not be locked when
363
 * this function is called.
361
 * this function is called.
364
 */
362
 */
365
static inline void ww_mutex_destroy(struct ww_mutex *lock)
363
static inline void ww_mutex_destroy(struct ww_mutex *lock)
366
{
364
{
367
	mutex_destroy(&lock->base);
365
	mutex_destroy(&lock->base);
368
}
366
}
369
 
367
 
370
/**
368
/**
371
 * ww_mutex_is_locked - is the w/w mutex locked
369
 * ww_mutex_is_locked - is the w/w mutex locked
372
 * @lock: the mutex to be queried
370
 * @lock: the mutex to be queried
373
 *
371
 *
374
 * Returns 1 if the mutex is locked, 0 if unlocked.
372
 * Returns 1 if the mutex is locked, 0 if unlocked.
375
 */
373
 */
376
static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
374
static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
377
{
375
{
378
	return mutex_is_locked(&lock->base);
376
	return mutex_is_locked(&lock->base);
379
}
377
}
380
 
378
 
381
#endif
379
#endif