Subversion Repositories Kolibri OS

Rev

Rev 5271 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5271 Rev 6084
1
/*
1
/*
2
 * Copyright (C) 2014 Red Hat
2
 * Copyright (C) 2014 Red Hat
3
 * Author: Rob Clark 
3
 * Author: Rob Clark 
4
 *
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the "Software"),
6
 * copy of this software and associated documentation files (the "Software"),
7
 * to deal in the Software without restriction, including without limitation
7
 * to deal in the Software without restriction, including without limitation
8
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * Software is furnished to do so, subject to the following conditions:
10
 * Software is furnished to do so, subject to the following conditions:
11
 *
11
 *
12
 * The above copyright notice and this permission notice shall be included in
12
 * The above copyright notice and this permission notice shall be included in
13
 * all copies or substantial portions of the Software.
13
 * all copies or substantial portions of the Software.
14
 *
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * OTHER DEALINGS IN THE SOFTWARE.
21
 * OTHER DEALINGS IN THE SOFTWARE.
22
 */
22
 */
23
 
23
 
24
#include 
24
#include 
25
#include 
25
#include 
26
#include 
26
#include 
27
 
27
 
28
/**
28
/**
29
 * DOC: kms locking
29
 * DOC: kms locking
30
 *
30
 *
31
 * As KMS moves toward more fine grained locking, and atomic ioctl where
31
 * As KMS moves toward more fine grained locking, and atomic ioctl where
32
 * userspace can indirectly control locking order, it becomes necessary
32
 * userspace can indirectly control locking order, it becomes necessary
33
 * to use ww_mutex and acquire-contexts to avoid deadlocks.  But because
33
 * to use ww_mutex and acquire-contexts to avoid deadlocks.  But because
34
 * the locking is more distributed around the driver code, we want a bit
34
 * the locking is more distributed around the driver code, we want a bit
35
 * of extra utility/tracking out of our acquire-ctx.  This is provided
35
 * of extra utility/tracking out of our acquire-ctx.  This is provided
36
 * by drm_modeset_lock / drm_modeset_acquire_ctx.
36
 * by drm_modeset_lock / drm_modeset_acquire_ctx.
37
 *
37
 *
38
 * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
38
 * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
39
 *
39
 *
40
 * The basic usage pattern is to:
40
 * The basic usage pattern is to:
41
 *
41
 *
42
 *     drm_modeset_acquire_init(&ctx)
42
 *     drm_modeset_acquire_init(&ctx)
43
 *   retry:
43
 *   retry:
44
 *     foreach (lock in random_ordered_set_of_locks) {
44
 *     foreach (lock in random_ordered_set_of_locks) {
45
 *       ret = drm_modeset_lock(lock, &ctx)
45
 *       ret = drm_modeset_lock(lock, &ctx)
46
 *       if (ret == -EDEADLK) {
46
 *       if (ret == -EDEADLK) {
47
 *          drm_modeset_backoff(&ctx);
47
 *          drm_modeset_backoff(&ctx);
48
 *          goto retry;
48
 *          goto retry;
49
 *       }
49
 *       }
50
 *     }
50
 *     }
51
 *
51
 *
52
 *     ... do stuff ...
52
 *     ... do stuff ...
53
 *
53
 *
54
 *     drm_modeset_drop_locks(&ctx);
54
 *     drm_modeset_drop_locks(&ctx);
55
 *     drm_modeset_acquire_fini(&ctx);
55
 *     drm_modeset_acquire_fini(&ctx);
56
 */
56
 */
57
 
-
 
58
 
57
 
59
/**
58
/**
60
 * __drm_modeset_lock_all - internal helper to grab all modeset locks
59
 * drm_modeset_lock_all - take all modeset locks
61
 * @dev: DRM device
-
 
62
 * @trylock: trylock mode for atomic contexts
-
 
63
 *
-
 
64
 * This is a special version of drm_modeset_lock_all() which can also be used in
-
 
65
 * atomic contexts. Then @trylock must be set to true.
60
 * @dev: drm device
66
 *
61
 *
67
 * Returns:
62
 * This function takes all modeset locks, suitable where a more fine-grained
-
 
63
 * scheme isn't (yet) implemented. Locks must be dropped with
68
 * 0 on success or negative error code on failure.
64
 * drm_modeset_unlock_all.
69
 */
65
 */
70
int __drm_modeset_lock_all(struct drm_device *dev,
-
 
71
			   bool trylock)
66
void drm_modeset_lock_all(struct drm_device *dev)
72
{
67
{
73
	struct drm_mode_config *config = &dev->mode_config;
68
	struct drm_mode_config *config = &dev->mode_config;
74
	struct drm_modeset_acquire_ctx *ctx;
69
	struct drm_modeset_acquire_ctx *ctx;
75
	int ret;
70
	int ret;
76
 
71
 
77
	ctx = kzalloc(sizeof(*ctx),
-
 
78
		      trylock ? GFP_ATOMIC : GFP_KERNEL);
72
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
79
	if (!ctx)
73
	if (WARN_ON(!ctx))
80
		return -ENOMEM;
-
 
81
 
-
 
82
	if (trylock) {
-
 
83
		if (!mutex_trylock(&config->mutex))
-
 
84
			return -EBUSY;
74
		return;
85
	} else {
-
 
86
		mutex_lock(&config->mutex);
75
 
87
	}
-
 
88
 
76
	mutex_lock(&config->mutex);
89
	drm_modeset_acquire_init(ctx, 0);
77
 
90
	ctx->trylock_only = trylock;
78
	drm_modeset_acquire_init(ctx, 0);
91
 
79
 
92
retry:
80
retry:
93
	ret = drm_modeset_lock(&config->connection_mutex, ctx);
81
	ret = drm_modeset_lock(&config->connection_mutex, ctx);
94
	if (ret)
82
	if (ret)
95
		goto fail;
83
		goto fail;
96
	ret = drm_modeset_lock_all_crtcs(dev, ctx);
84
	ret = drm_modeset_lock_all_crtcs(dev, ctx);
97
	if (ret)
85
	if (ret)
98
		goto fail;
86
		goto fail;
99
 
87
 
100
	WARN_ON(config->acquire_ctx);
88
	WARN_ON(config->acquire_ctx);
101
 
89
 
102
	/* now we hold the locks, so now that it is safe, stash the
90
	/* now we hold the locks, so now that it is safe, stash the
103
	 * ctx for drm_modeset_unlock_all():
91
	 * ctx for drm_modeset_unlock_all():
104
	 */
92
	 */
105
	config->acquire_ctx = ctx;
93
	config->acquire_ctx = ctx;
106
 
94
 
107
	drm_warn_on_modeset_not_all_locked(dev);
95
	drm_warn_on_modeset_not_all_locked(dev);
108
 
96
 
109
	return 0;
97
	return;
110
 
98
 
111
fail:
99
fail:
112
	if (ret == -EDEADLK) {
100
	if (ret == -EDEADLK) {
113
		drm_modeset_backoff(ctx);
101
		drm_modeset_backoff(ctx);
114
		goto retry;
102
		goto retry;
115
	}
103
	}
116
 
104
 
117
	return ret;
-
 
118
}
-
 
119
EXPORT_SYMBOL(__drm_modeset_lock_all);
-
 
120
 
-
 
121
/**
-
 
122
 * drm_modeset_lock_all - take all modeset locks
-
 
123
 * @dev: drm device
-
 
124
 *
-
 
125
 * This function takes all modeset locks, suitable where a more fine-grained
-
 
126
 * scheme isn't (yet) implemented. Locks must be dropped with
-
 
127
 * drm_modeset_unlock_all.
-
 
128
 */
-
 
129
void drm_modeset_lock_all(struct drm_device *dev)
-
 
130
{
-
 
131
	WARN_ON(__drm_modeset_lock_all(dev, false) != 0);
105
	kfree(ctx);
132
}
106
}
133
EXPORT_SYMBOL(drm_modeset_lock_all);
107
EXPORT_SYMBOL(drm_modeset_lock_all);
134
 
108
 
135
/**
109
/**
136
 * drm_modeset_unlock_all - drop all modeset locks
110
 * drm_modeset_unlock_all - drop all modeset locks
137
 * @dev: device
111
 * @dev: device
138
 *
112
 *
139
 * This function drop all modeset locks taken by drm_modeset_lock_all.
113
 * This function drop all modeset locks taken by drm_modeset_lock_all.
140
 */
114
 */
141
void drm_modeset_unlock_all(struct drm_device *dev)
115
void drm_modeset_unlock_all(struct drm_device *dev)
142
{
116
{
143
	struct drm_mode_config *config = &dev->mode_config;
117
	struct drm_mode_config *config = &dev->mode_config;
144
	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
118
	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
145
 
119
 
146
	if (WARN_ON(!ctx))
120
	if (WARN_ON(!ctx))
147
		return;
121
		return;
148
 
122
 
149
	config->acquire_ctx = NULL;
123
	config->acquire_ctx = NULL;
150
	drm_modeset_drop_locks(ctx);
124
	drm_modeset_drop_locks(ctx);
151
	drm_modeset_acquire_fini(ctx);
125
	drm_modeset_acquire_fini(ctx);
152
 
126
 
153
	kfree(ctx);
127
	kfree(ctx);
154
 
128
 
155
	mutex_unlock(&dev->mode_config.mutex);
129
	mutex_unlock(&dev->mode_config.mutex);
156
}
130
}
157
EXPORT_SYMBOL(drm_modeset_unlock_all);
131
EXPORT_SYMBOL(drm_modeset_unlock_all);
158
 
132
 
159
/**
133
/**
160
 * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
134
 * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
161
 * @crtc: DRM CRTC
135
 * @crtc: DRM CRTC
162
 * @plane: DRM plane to be updated on @crtc
136
 * @plane: DRM plane to be updated on @crtc
163
 *
137
 *
164
 * This function locks the given crtc and plane (which should be either the
138
 * This function locks the given crtc and plane (which should be either the
165
 * primary or cursor plane) using a hidden acquire context. This is necessary so
139
 * primary or cursor plane) using a hidden acquire context. This is necessary so
166
 * that drivers internally using the atomic interfaces can grab further locks
140
 * that drivers internally using the atomic interfaces can grab further locks
167
 * with the lock acquire context.
141
 * with the lock acquire context.
168
 *
142
 *
169
 * Note that @plane can be NULL, e.g. when the cursor support hasn't yet been
143
 * Note that @plane can be NULL, e.g. when the cursor support hasn't yet been
170
 * converted to universal planes yet.
144
 * converted to universal planes yet.
171
 */
145
 */
172
void drm_modeset_lock_crtc(struct drm_crtc *crtc,
146
void drm_modeset_lock_crtc(struct drm_crtc *crtc,
173
			   struct drm_plane *plane)
147
			   struct drm_plane *plane)
174
{
148
{
175
	struct drm_modeset_acquire_ctx *ctx;
149
	struct drm_modeset_acquire_ctx *ctx;
176
	int ret;
150
	int ret;
177
 
151
 
178
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
152
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
179
	if (WARN_ON(!ctx))
153
	if (WARN_ON(!ctx))
180
		return;
154
		return;
181
 
155
 
182
	drm_modeset_acquire_init(ctx, 0);
156
	drm_modeset_acquire_init(ctx, 0);
183
 
157
 
184
retry:
158
retry:
185
	ret = drm_modeset_lock(&crtc->mutex, ctx);
159
	ret = drm_modeset_lock(&crtc->mutex, ctx);
186
	if (ret)
160
	if (ret)
187
		goto fail;
161
		goto fail;
188
 
162
 
189
	if (plane) {
163
	if (plane) {
190
		ret = drm_modeset_lock(&plane->mutex, ctx);
164
		ret = drm_modeset_lock(&plane->mutex, ctx);
191
		if (ret)
165
		if (ret)
192
			goto fail;
166
			goto fail;
193
 
167
 
194
		if (plane->crtc) {
168
		if (plane->crtc) {
195
			ret = drm_modeset_lock(&plane->crtc->mutex, ctx);
169
			ret = drm_modeset_lock(&plane->crtc->mutex, ctx);
196
			if (ret)
170
			if (ret)
197
				goto fail;
171
				goto fail;
198
		}
172
		}
199
	}
173
	}
200
 
174
 
201
	WARN_ON(crtc->acquire_ctx);
175
	WARN_ON(crtc->acquire_ctx);
202
 
176
 
203
	/* now we hold the locks, so now that it is safe, stash the
177
	/* now we hold the locks, so now that it is safe, stash the
204
	 * ctx for drm_modeset_unlock_crtc():
178
	 * ctx for drm_modeset_unlock_crtc():
205
	 */
179
	 */
206
	crtc->acquire_ctx = ctx;
180
	crtc->acquire_ctx = ctx;
207
 
181
 
208
	return;
182
	return;
209
 
183
 
210
fail:
184
fail:
211
	if (ret == -EDEADLK) {
185
	if (ret == -EDEADLK) {
212
		drm_modeset_backoff(ctx);
186
		drm_modeset_backoff(ctx);
213
		goto retry;
187
		goto retry;
214
	}
188
	}
215
}
189
}
216
EXPORT_SYMBOL(drm_modeset_lock_crtc);
190
EXPORT_SYMBOL(drm_modeset_lock_crtc);
217
 
191
 
218
/**
192
/**
219
 * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
193
 * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
220
 * @crtc: drm crtc
194
 * @crtc: drm crtc
221
 *
195
 *
222
 * Legacy ioctl operations like cursor updates or page flips only have per-crtc
196
 * Legacy ioctl operations like cursor updates or page flips only have per-crtc
223
 * locking, and store the acquire ctx in the corresponding crtc. All other
197
 * locking, and store the acquire ctx in the corresponding crtc. All other
224
 * legacy operations take all locks and use a global acquire context. This
198
 * legacy operations take all locks and use a global acquire context. This
225
 * function grabs the right one.
199
 * function grabs the right one.
226
 */
200
 */
227
struct drm_modeset_acquire_ctx *
201
struct drm_modeset_acquire_ctx *
228
drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
202
drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
229
{
203
{
230
	if (crtc->acquire_ctx)
204
	if (crtc->acquire_ctx)
231
		return crtc->acquire_ctx;
205
		return crtc->acquire_ctx;
232
 
206
 
233
	WARN_ON(!crtc->dev->mode_config.acquire_ctx);
207
	WARN_ON(!crtc->dev->mode_config.acquire_ctx);
234
 
208
 
235
	return crtc->dev->mode_config.acquire_ctx;
209
	return crtc->dev->mode_config.acquire_ctx;
236
}
210
}
237
EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
211
EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
238
 
212
 
239
/**
213
/**
240
 * drm_modeset_unlock_crtc - drop crtc lock
214
 * drm_modeset_unlock_crtc - drop crtc lock
241
 * @crtc: drm crtc
215
 * @crtc: drm crtc
242
 *
216
 *
243
 * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
217
 * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
244
 * locks acquired through the hidden context.
218
 * locks acquired through the hidden context.
245
 */
219
 */
246
void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
220
void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
247
{
221
{
248
	struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
222
	struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
249
 
223
 
250
	if (WARN_ON(!ctx))
224
	if (WARN_ON(!ctx))
251
		return;
225
		return;
252
 
226
 
253
	crtc->acquire_ctx = NULL;
227
	crtc->acquire_ctx = NULL;
254
	drm_modeset_drop_locks(ctx);
228
	drm_modeset_drop_locks(ctx);
255
	drm_modeset_acquire_fini(ctx);
229
	drm_modeset_acquire_fini(ctx);
256
 
230
 
257
	kfree(ctx);
231
	kfree(ctx);
258
}
232
}
259
EXPORT_SYMBOL(drm_modeset_unlock_crtc);
233
EXPORT_SYMBOL(drm_modeset_unlock_crtc);
260
 
234
 
261
/**
235
/**
262
 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
236
 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
263
 * @dev: device
237
 * @dev: device
264
 *
238
 *
265
 * Useful as a debug assert.
239
 * Useful as a debug assert.
266
 */
240
 */
267
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
241
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
268
{
242
{
269
	struct drm_crtc *crtc;
243
	struct drm_crtc *crtc;
270
 
244
 
271
	/* Locking is currently fubar in the panic handler. */
245
	/* Locking is currently fubar in the panic handler. */
272
//   if (oops_in_progress)
246
	if (oops_in_progress)
273
//       return;
247
		return;
274
 
248
 
275
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
249
	drm_for_each_crtc(crtc, dev)
276
		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
250
		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
277
 
251
 
278
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
252
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
279
	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
253
	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
280
}
254
}
281
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
255
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
282
 
256
 
283
/**
257
/**
284
 * drm_modeset_acquire_init - initialize acquire context
258
 * drm_modeset_acquire_init - initialize acquire context
285
 * @ctx: the acquire context
259
 * @ctx: the acquire context
286
 * @flags: for future
260
 * @flags: for future
287
 */
261
 */
288
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
262
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
289
		uint32_t flags)
263
		uint32_t flags)
290
{
264
{
291
	memset(ctx, 0, sizeof(*ctx));
265
	memset(ctx, 0, sizeof(*ctx));
292
	ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
266
	ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
293
	INIT_LIST_HEAD(&ctx->locked);
267
	INIT_LIST_HEAD(&ctx->locked);
294
}
268
}
295
EXPORT_SYMBOL(drm_modeset_acquire_init);
269
EXPORT_SYMBOL(drm_modeset_acquire_init);
296
 
270
 
297
/**
271
/**
298
 * drm_modeset_acquire_fini - cleanup acquire context
272
 * drm_modeset_acquire_fini - cleanup acquire context
299
 * @ctx: the acquire context
273
 * @ctx: the acquire context
300
 */
274
 */
301
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
275
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
302
{
276
{
303
	ww_acquire_fini(&ctx->ww_ctx);
277
	ww_acquire_fini(&ctx->ww_ctx);
304
}
278
}
305
EXPORT_SYMBOL(drm_modeset_acquire_fini);
279
EXPORT_SYMBOL(drm_modeset_acquire_fini);
306
 
280
 
307
/**
281
/**
308
 * drm_modeset_drop_locks - drop all locks
282
 * drm_modeset_drop_locks - drop all locks
309
 * @ctx: the acquire context
283
 * @ctx: the acquire context
310
 *
284
 *
311
 * Drop all locks currently held against this acquire context.
285
 * Drop all locks currently held against this acquire context.
312
 */
286
 */
313
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
287
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
314
{
288
{
315
	WARN_ON(ctx->contended);
289
	WARN_ON(ctx->contended);
316
	while (!list_empty(&ctx->locked)) {
290
	while (!list_empty(&ctx->locked)) {
317
		struct drm_modeset_lock *lock;
291
		struct drm_modeset_lock *lock;
318
 
292
 
319
		lock = list_first_entry(&ctx->locked,
293
		lock = list_first_entry(&ctx->locked,
320
				struct drm_modeset_lock, head);
294
				struct drm_modeset_lock, head);
321
 
295
 
322
		drm_modeset_unlock(lock);
296
		drm_modeset_unlock(lock);
323
	}
297
	}
324
}
298
}
325
EXPORT_SYMBOL(drm_modeset_drop_locks);
299
EXPORT_SYMBOL(drm_modeset_drop_locks);
326
 
300
 
327
static inline int modeset_lock(struct drm_modeset_lock *lock,
301
static inline int modeset_lock(struct drm_modeset_lock *lock,
328
		struct drm_modeset_acquire_ctx *ctx,
302
		struct drm_modeset_acquire_ctx *ctx,
329
		bool interruptible, bool slow)
303
		bool interruptible, bool slow)
330
{
304
{
331
	int ret;
305
	int ret;
332
 
306
 
333
	WARN_ON(ctx->contended);
307
	WARN_ON(ctx->contended);
334
 
308
 
335
	if (ctx->trylock_only) {
309
	if (ctx->trylock_only) {
-
 
310
		lockdep_assert_held(&ctx->ww_ctx);
-
 
311
 
336
		if (!ww_mutex_trylock(&lock->mutex))
312
		if (!ww_mutex_trylock(&lock->mutex))
337
			return -EBUSY;
313
			return -EBUSY;
338
		else
314
		else
339
			return 0;
315
			return 0;
340
	} else if (interruptible && slow) {
316
	} else if (interruptible && slow) {
341
		ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
317
		ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
342
	} else if (interruptible) {
318
	} else if (interruptible) {
343
		ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
319
		ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
344
	} else if (slow) {
320
	} else if (slow) {
345
		ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
321
		ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
346
		ret = 0;
322
		ret = 0;
347
	} else {
323
	} else {
348
		ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
324
		ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
349
	}
325
	}
350
	if (!ret) {
326
	if (!ret) {
351
		WARN_ON(!list_empty(&lock->head));
327
		WARN_ON(!list_empty(&lock->head));
352
		list_add(&lock->head, &ctx->locked);
328
		list_add(&lock->head, &ctx->locked);
353
	} else if (ret == -EALREADY) {
329
	} else if (ret == -EALREADY) {
354
		/* we already hold the lock.. this is fine.  For atomic
330
		/* we already hold the lock.. this is fine.  For atomic
355
		 * we will need to be able to drm_modeset_lock() things
331
		 * we will need to be able to drm_modeset_lock() things
356
		 * without having to keep track of what is already locked
332
		 * without having to keep track of what is already locked
357
		 * or not.
333
		 * or not.
358
		 */
334
		 */
359
		ret = 0;
335
		ret = 0;
360
	} else if (ret == -EDEADLK) {
336
	} else if (ret == -EDEADLK) {
361
		ctx->contended = lock;
337
		ctx->contended = lock;
362
	}
338
	}
363
 
339
 
364
	return ret;
340
	return ret;
365
}
341
}
366
 
342
 
367
static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
343
static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
368
		bool interruptible)
344
		bool interruptible)
369
{
345
{
370
	struct drm_modeset_lock *contended = ctx->contended;
346
	struct drm_modeset_lock *contended = ctx->contended;
371
 
347
 
372
	ctx->contended = NULL;
348
	ctx->contended = NULL;
373
 
349
 
374
	if (WARN_ON(!contended))
350
	if (WARN_ON(!contended))
375
		return 0;
351
		return 0;
376
 
352
 
377
	drm_modeset_drop_locks(ctx);
353
	drm_modeset_drop_locks(ctx);
378
 
354
 
379
	return modeset_lock(contended, ctx, interruptible, true);
355
	return modeset_lock(contended, ctx, interruptible, true);
380
}
356
}
381
 
357
 
382
/**
358
/**
383
 * drm_modeset_backoff - deadlock avoidance backoff
359
 * drm_modeset_backoff - deadlock avoidance backoff
384
 * @ctx: the acquire context
360
 * @ctx: the acquire context
385
 *
361
 *
386
 * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
362
 * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
387
 * you must call this function to drop all currently held locks and
363
 * you must call this function to drop all currently held locks and
388
 * block until the contended lock becomes available.
364
 * block until the contended lock becomes available.
389
 */
365
 */
390
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
366
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
391
{
367
{
392
	modeset_backoff(ctx, false);
368
	modeset_backoff(ctx, false);
393
}
369
}
394
EXPORT_SYMBOL(drm_modeset_backoff);
370
EXPORT_SYMBOL(drm_modeset_backoff);
395
 
371
 
396
/**
372
/**
397
 * drm_modeset_backoff_interruptible - deadlock avoidance backoff
373
 * drm_modeset_backoff_interruptible - deadlock avoidance backoff
398
 * @ctx: the acquire context
374
 * @ctx: the acquire context
399
 *
375
 *
400
 * Interruptible version of drm_modeset_backoff()
376
 * Interruptible version of drm_modeset_backoff()
401
 */
377
 */
402
int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
378
int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
403
{
379
{
404
	return modeset_backoff(ctx, true);
380
	return modeset_backoff(ctx, true);
405
}
381
}
406
EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
382
EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
407
 
383
 
408
/**
384
/**
409
 * drm_modeset_lock - take modeset lock
385
 * drm_modeset_lock - take modeset lock
410
 * @lock: lock to take
386
 * @lock: lock to take
411
 * @ctx: acquire ctx
387
 * @ctx: acquire ctx
412
 *
388
 *
413
 * If ctx is not NULL, then its ww acquire context is used and the
389
 * If ctx is not NULL, then its ww acquire context is used and the
414
 * lock will be tracked by the context and can be released by calling
390
 * lock will be tracked by the context and can be released by calling
415
 * drm_modeset_drop_locks().  If -EDEADLK is returned, this means a
391
 * drm_modeset_drop_locks().  If -EDEADLK is returned, this means a
416
 * deadlock scenario has been detected and it is an error to attempt
392
 * deadlock scenario has been detected and it is an error to attempt
417
 * to take any more locks without first calling drm_modeset_backoff().
393
 * to take any more locks without first calling drm_modeset_backoff().
418
 */
394
 */
419
int drm_modeset_lock(struct drm_modeset_lock *lock,
395
int drm_modeset_lock(struct drm_modeset_lock *lock,
420
		struct drm_modeset_acquire_ctx *ctx)
396
		struct drm_modeset_acquire_ctx *ctx)
421
{
397
{
422
	if (ctx)
398
	if (ctx)
423
		return modeset_lock(lock, ctx, false, false);
399
		return modeset_lock(lock, ctx, false, false);
424
 
400
 
425
	ww_mutex_lock(&lock->mutex, NULL);
401
	ww_mutex_lock(&lock->mutex, NULL);
426
	return 0;
402
	return 0;
427
}
403
}
428
EXPORT_SYMBOL(drm_modeset_lock);
404
EXPORT_SYMBOL(drm_modeset_lock);
429
 
405
 
430
/**
406
/**
431
 * drm_modeset_lock_interruptible - take modeset lock
407
 * drm_modeset_lock_interruptible - take modeset lock
432
 * @lock: lock to take
408
 * @lock: lock to take
433
 * @ctx: acquire ctx
409
 * @ctx: acquire ctx
434
 *
410
 *
435
 * Interruptible version of drm_modeset_lock()
411
 * Interruptible version of drm_modeset_lock()
436
 */
412
 */
437
int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
413
int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
438
		struct drm_modeset_acquire_ctx *ctx)
414
		struct drm_modeset_acquire_ctx *ctx)
439
{
415
{
440
	if (ctx)
416
	if (ctx)
441
		return modeset_lock(lock, ctx, true, false);
417
		return modeset_lock(lock, ctx, true, false);
442
 
418
 
443
	return ww_mutex_lock_interruptible(&lock->mutex, NULL);
419
	return ww_mutex_lock_interruptible(&lock->mutex, NULL);
444
}
420
}
445
EXPORT_SYMBOL(drm_modeset_lock_interruptible);
421
EXPORT_SYMBOL(drm_modeset_lock_interruptible);
446
 
422
 
447
/**
423
/**
448
 * drm_modeset_unlock - drop modeset lock
424
 * drm_modeset_unlock - drop modeset lock
449
 * @lock: lock to release
425
 * @lock: lock to release
450
 */
426
 */
451
void drm_modeset_unlock(struct drm_modeset_lock *lock)
427
void drm_modeset_unlock(struct drm_modeset_lock *lock)
452
{
428
{
453
	list_del_init(&lock->head);
429
	list_del_init(&lock->head);
454
	ww_mutex_unlock(&lock->mutex);
430
	ww_mutex_unlock(&lock->mutex);
455
}
431
}
456
EXPORT_SYMBOL(drm_modeset_unlock);
432
EXPORT_SYMBOL(drm_modeset_unlock);
457
 
433
 
458
/* In some legacy codepaths it's convenient to just grab all the crtc and plane
434
/* In some legacy codepaths it's convenient to just grab all the crtc and plane
459
 * related locks. */
435
 * related locks. */
460
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
436
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
461
		struct drm_modeset_acquire_ctx *ctx)
437
		struct drm_modeset_acquire_ctx *ctx)
462
{
438
{
463
	struct drm_mode_config *config = &dev->mode_config;
-
 
464
	struct drm_crtc *crtc;
439
	struct drm_crtc *crtc;
465
	struct drm_plane *plane;
440
	struct drm_plane *plane;
466
	int ret = 0;
441
	int ret = 0;
467
 
442
 
468
	list_for_each_entry(crtc, &config->crtc_list, head) {
443
	drm_for_each_crtc(crtc, dev) {
469
		ret = drm_modeset_lock(&crtc->mutex, ctx);
444
		ret = drm_modeset_lock(&crtc->mutex, ctx);
470
		if (ret)
445
		if (ret)
471
			return ret;
446
			return ret;
472
	}
447
	}
473
 
448
 
474
	list_for_each_entry(plane, &config->plane_list, head) {
449
	drm_for_each_plane(plane, dev) {
475
		ret = drm_modeset_lock(&plane->mutex, ctx);
450
		ret = drm_modeset_lock(&plane->mutex, ctx);
476
		if (ret)
451
		if (ret)
477
			return ret;
452
			return ret;
478
	}
453
	}
479
 
454
 
480
	return 0;
455
	return 0;
481
}
456
}
482
EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
457
EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);