Subversion Repositories Kolibri OS

Rev

Rev 6084 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6084 Rev 6937
1
/*
1
/*
2
 * Copyright (C) 2014 Red Hat
2
 * Copyright (C) 2014 Red Hat
3
 * Author: Rob Clark 
3
 * Author: Rob Clark 
4
 *
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the "Software"),
6
 * copy of this software and associated documentation files (the "Software"),
7
 * to deal in the Software without restriction, including without limitation
7
 * to deal in the Software without restriction, including without limitation
8
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * Software is furnished to do so, subject to the following conditions:
10
 * Software is furnished to do so, subject to the following conditions:
11
 *
11
 *
12
 * The above copyright notice and this permission notice shall be included in
12
 * The above copyright notice and this permission notice shall be included in
13
 * all copies or substantial portions of the Software.
13
 * all copies or substantial portions of the Software.
14
 *
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * OTHER DEALINGS IN THE SOFTWARE.
21
 * OTHER DEALINGS IN THE SOFTWARE.
22
 */
22
 */
23
 
23
 
24
#include 
24
#include 
25
#include 
25
#include 
26
#include 
26
#include 
27
 
27
 
28
/**
28
/**
29
 * DOC: kms locking
29
 * DOC: kms locking
30
 *
30
 *
31
 * As KMS moves toward more fine grained locking, and atomic ioctl where
31
 * As KMS moves toward more fine grained locking, and atomic ioctl where
32
 * userspace can indirectly control locking order, it becomes necessary
32
 * userspace can indirectly control locking order, it becomes necessary
33
 * to use ww_mutex and acquire-contexts to avoid deadlocks.  But because
33
 * to use ww_mutex and acquire-contexts to avoid deadlocks.  But because
34
 * the locking is more distributed around the driver code, we want a bit
34
 * the locking is more distributed around the driver code, we want a bit
35
 * of extra utility/tracking out of our acquire-ctx.  This is provided
35
 * of extra utility/tracking out of our acquire-ctx.  This is provided
36
 * by drm_modeset_lock / drm_modeset_acquire_ctx.
36
 * by drm_modeset_lock / drm_modeset_acquire_ctx.
37
 *
37
 *
38
 * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
38
 * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
39
 *
39
 *
40
 * The basic usage pattern is to:
40
 * The basic usage pattern is to:
41
 *
41
 *
42
 *     drm_modeset_acquire_init(&ctx)
42
 *     drm_modeset_acquire_init(&ctx)
43
 *   retry:
43
 *   retry:
44
 *     foreach (lock in random_ordered_set_of_locks) {
44
 *     foreach (lock in random_ordered_set_of_locks) {
45
 *       ret = drm_modeset_lock(lock, &ctx)
45
 *       ret = drm_modeset_lock(lock, &ctx)
46
 *       if (ret == -EDEADLK) {
46
 *       if (ret == -EDEADLK) {
47
 *          drm_modeset_backoff(&ctx);
47
 *          drm_modeset_backoff(&ctx);
48
 *          goto retry;
48
 *          goto retry;
49
 *       }
49
 *       }
50
 *     }
50
 *     }
51
 *
-
 
52
 *     ... do stuff ...
51
 *     ... do stuff ...
53
 *
-
 
54
 *     drm_modeset_drop_locks(&ctx);
52
 *     drm_modeset_drop_locks(&ctx);
55
 *     drm_modeset_acquire_fini(&ctx);
53
 *     drm_modeset_acquire_fini(&ctx);
56
 */
54
 */
57
 
55
 
58
/**
56
/**
59
 * drm_modeset_lock_all - take all modeset locks
57
 * drm_modeset_lock_all - take all modeset locks
60
 * @dev: drm device
58
 * @dev: DRM device
61
 *
59
 *
62
 * This function takes all modeset locks, suitable where a more fine-grained
60
 * This function takes all modeset locks, suitable where a more fine-grained
63
 * scheme isn't (yet) implemented. Locks must be dropped with
61
 * scheme isn't (yet) implemented. Locks must be dropped by calling the
64
 * drm_modeset_unlock_all.
62
 * drm_modeset_unlock_all() function.
-
 
63
 *
-
 
64
 * This function is deprecated. It allocates a lock acquisition context and
-
 
65
 * stores it in the DRM device's ->mode_config. This facilitate conversion of
-
 
66
 * existing code because it removes the need to manually deal with the
-
 
67
 * acquisition context, but it is also brittle because the context is global
-
 
68
 * and care must be taken not to nest calls. New code should use the
-
 
69
 * drm_modeset_lock_all_ctx() function and pass in the context explicitly.
65
 */
70
 */
66
void drm_modeset_lock_all(struct drm_device *dev)
71
void drm_modeset_lock_all(struct drm_device *dev)
67
{
72
{
68
	struct drm_mode_config *config = &dev->mode_config;
73
	struct drm_mode_config *config = &dev->mode_config;
69
	struct drm_modeset_acquire_ctx *ctx;
74
	struct drm_modeset_acquire_ctx *ctx;
70
	int ret;
75
	int ret;
71
 
76
 
72
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
77
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
73
	if (WARN_ON(!ctx))
78
	if (WARN_ON(!ctx))
74
		return;
79
		return;
75
 
80
 
76
	mutex_lock(&config->mutex);
81
	mutex_lock(&config->mutex);
77
 
82
 
78
	drm_modeset_acquire_init(ctx, 0);
83
	drm_modeset_acquire_init(ctx, 0);
79
 
84
 
80
retry:
85
retry:
81
	ret = drm_modeset_lock(&config->connection_mutex, ctx);
86
	ret = drm_modeset_lock_all_ctx(dev, ctx);
82
	if (ret)
87
	if (ret < 0) {
-
 
88
		if (ret == -EDEADLK) {
-
 
89
			drm_modeset_backoff(ctx);
83
		goto fail;
90
			goto retry;
-
 
91
		}
-
 
92
 
84
	ret = drm_modeset_lock_all_crtcs(dev, ctx);
93
		drm_modeset_acquire_fini(ctx);
85
	if (ret)
94
		kfree(ctx);
86
		goto fail;
95
		return;
-
 
96
	}
87
 
97
 
88
	WARN_ON(config->acquire_ctx);
98
	WARN_ON(config->acquire_ctx);
-
 
99
 
89
 
100
	/*
90
	/* now we hold the locks, so now that it is safe, stash the
101
	 * We hold the locks now, so it is safe to stash the acquisition
91
	 * ctx for drm_modeset_unlock_all():
102
	 * context for drm_modeset_unlock_all().
92
	 */
103
	 */
93
	config->acquire_ctx = ctx;
104
	config->acquire_ctx = ctx;
94
 
105
 
95
	drm_warn_on_modeset_not_all_locked(dev);
106
	drm_warn_on_modeset_not_all_locked(dev);
96
 
-
 
97
	return;
-
 
98
 
-
 
99
fail:
-
 
100
	if (ret == -EDEADLK) {
-
 
101
		drm_modeset_backoff(ctx);
-
 
102
		goto retry;
-
 
103
	}
-
 
104
 
-
 
105
	kfree(ctx);
-
 
106
}
107
}
107
EXPORT_SYMBOL(drm_modeset_lock_all);
108
EXPORT_SYMBOL(drm_modeset_lock_all);
108
 
109
 
109
/**
110
/**
110
 * drm_modeset_unlock_all - drop all modeset locks
111
 * drm_modeset_unlock_all - drop all modeset locks
111
 * @dev: device
112
 * @dev: DRM device
112
 *
113
 *
113
 * This function drop all modeset locks taken by drm_modeset_lock_all.
114
 * This function drops all modeset locks taken by a previous call to the
-
 
115
 * drm_modeset_lock_all() function.
-
 
116
 *
-
 
117
 * This function is deprecated. It uses the lock acquisition context stored
-
 
118
 * in the DRM device's ->mode_config. This facilitates conversion of existing
-
 
119
 * code because it removes the need to manually deal with the acquisition
-
 
120
 * context, but it is also brittle because the context is global and care must
-
 
121
 * be taken not to nest calls. New code should pass the acquisition context
-
 
122
 * directly to the drm_modeset_drop_locks() function.
114
 */
123
 */
115
void drm_modeset_unlock_all(struct drm_device *dev)
124
void drm_modeset_unlock_all(struct drm_device *dev)
116
{
125
{
117
	struct drm_mode_config *config = &dev->mode_config;
126
	struct drm_mode_config *config = &dev->mode_config;
118
	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
127
	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
119
 
128
 
120
	if (WARN_ON(!ctx))
129
	if (WARN_ON(!ctx))
121
		return;
130
		return;
122
 
131
 
123
	config->acquire_ctx = NULL;
132
	config->acquire_ctx = NULL;
124
	drm_modeset_drop_locks(ctx);
133
	drm_modeset_drop_locks(ctx);
125
	drm_modeset_acquire_fini(ctx);
134
	drm_modeset_acquire_fini(ctx);
126
 
135
 
127
	kfree(ctx);
136
	kfree(ctx);
128
 
137
 
129
	mutex_unlock(&dev->mode_config.mutex);
138
	mutex_unlock(&dev->mode_config.mutex);
130
}
139
}
131
EXPORT_SYMBOL(drm_modeset_unlock_all);
140
EXPORT_SYMBOL(drm_modeset_unlock_all);
132
 
141
 
133
/**
142
/**
134
 * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
143
 * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
135
 * @crtc: DRM CRTC
144
 * @crtc: DRM CRTC
136
 * @plane: DRM plane to be updated on @crtc
145
 * @plane: DRM plane to be updated on @crtc
137
 *
146
 *
138
 * This function locks the given crtc and plane (which should be either the
147
 * This function locks the given crtc and plane (which should be either the
139
 * primary or cursor plane) using a hidden acquire context. This is necessary so
148
 * primary or cursor plane) using a hidden acquire context. This is necessary so
140
 * that drivers internally using the atomic interfaces can grab further locks
149
 * that drivers internally using the atomic interfaces can grab further locks
141
 * with the lock acquire context.
150
 * with the lock acquire context.
142
 *
151
 *
143
 * Note that @plane can be NULL, e.g. when the cursor support hasn't yet been
152
 * Note that @plane can be NULL, e.g. when the cursor support hasn't yet been
144
 * converted to universal planes yet.
153
 * converted to universal planes yet.
145
 */
154
 */
146
void drm_modeset_lock_crtc(struct drm_crtc *crtc,
155
void drm_modeset_lock_crtc(struct drm_crtc *crtc,
147
			   struct drm_plane *plane)
156
			   struct drm_plane *plane)
148
{
157
{
149
	struct drm_modeset_acquire_ctx *ctx;
158
	struct drm_modeset_acquire_ctx *ctx;
150
	int ret;
159
	int ret;
151
 
160
 
152
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
161
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
153
	if (WARN_ON(!ctx))
162
	if (WARN_ON(!ctx))
154
		return;
163
		return;
155
 
164
 
156
	drm_modeset_acquire_init(ctx, 0);
165
	drm_modeset_acquire_init(ctx, 0);
157
 
166
 
158
retry:
167
retry:
159
	ret = drm_modeset_lock(&crtc->mutex, ctx);
168
	ret = drm_modeset_lock(&crtc->mutex, ctx);
160
	if (ret)
169
	if (ret)
161
		goto fail;
170
		goto fail;
162
 
171
 
163
	if (plane) {
172
	if (plane) {
164
		ret = drm_modeset_lock(&plane->mutex, ctx);
173
		ret = drm_modeset_lock(&plane->mutex, ctx);
165
		if (ret)
174
		if (ret)
166
			goto fail;
175
			goto fail;
167
 
176
 
168
		if (plane->crtc) {
177
		if (plane->crtc) {
169
			ret = drm_modeset_lock(&plane->crtc->mutex, ctx);
178
			ret = drm_modeset_lock(&plane->crtc->mutex, ctx);
170
			if (ret)
179
			if (ret)
171
				goto fail;
180
				goto fail;
172
		}
181
		}
173
	}
182
	}
174
 
183
 
175
	WARN_ON(crtc->acquire_ctx);
184
	WARN_ON(crtc->acquire_ctx);
176
 
185
 
177
	/* now we hold the locks, so now that it is safe, stash the
186
	/* now we hold the locks, so now that it is safe, stash the
178
	 * ctx for drm_modeset_unlock_crtc():
187
	 * ctx for drm_modeset_unlock_crtc():
179
	 */
188
	 */
180
	crtc->acquire_ctx = ctx;
189
	crtc->acquire_ctx = ctx;
181
 
190
 
182
	return;
191
	return;
183
 
192
 
184
fail:
193
fail:
185
	if (ret == -EDEADLK) {
194
	if (ret == -EDEADLK) {
186
		drm_modeset_backoff(ctx);
195
		drm_modeset_backoff(ctx);
187
		goto retry;
196
		goto retry;
188
	}
197
	}
189
}
198
}
190
EXPORT_SYMBOL(drm_modeset_lock_crtc);
199
EXPORT_SYMBOL(drm_modeset_lock_crtc);
191
 
200
 
192
/**
201
/**
193
 * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
202
 * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
194
 * @crtc: drm crtc
203
 * @crtc: drm crtc
195
 *
204
 *
196
 * Legacy ioctl operations like cursor updates or page flips only have per-crtc
205
 * Legacy ioctl operations like cursor updates or page flips only have per-crtc
197
 * locking, and store the acquire ctx in the corresponding crtc. All other
206
 * locking, and store the acquire ctx in the corresponding crtc. All other
198
 * legacy operations take all locks and use a global acquire context. This
207
 * legacy operations take all locks and use a global acquire context. This
199
 * function grabs the right one.
208
 * function grabs the right one.
200
 */
209
 */
201
struct drm_modeset_acquire_ctx *
210
struct drm_modeset_acquire_ctx *
202
drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
211
drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
203
{
212
{
204
	if (crtc->acquire_ctx)
213
	if (crtc->acquire_ctx)
205
		return crtc->acquire_ctx;
214
		return crtc->acquire_ctx;
206
 
215
 
207
	WARN_ON(!crtc->dev->mode_config.acquire_ctx);
216
	WARN_ON(!crtc->dev->mode_config.acquire_ctx);
208
 
217
 
209
	return crtc->dev->mode_config.acquire_ctx;
218
	return crtc->dev->mode_config.acquire_ctx;
210
}
219
}
211
EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
220
EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
212
 
221
 
213
/**
222
/**
214
 * drm_modeset_unlock_crtc - drop crtc lock
223
 * drm_modeset_unlock_crtc - drop crtc lock
215
 * @crtc: drm crtc
224
 * @crtc: drm crtc
216
 *
225
 *
217
 * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
226
 * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
218
 * locks acquired through the hidden context.
227
 * locks acquired through the hidden context.
219
 */
228
 */
220
void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
229
void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
221
{
230
{
222
	struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
231
	struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
223
 
232
 
224
	if (WARN_ON(!ctx))
233
	if (WARN_ON(!ctx))
225
		return;
234
		return;
226
 
235
 
227
	crtc->acquire_ctx = NULL;
236
	crtc->acquire_ctx = NULL;
228
	drm_modeset_drop_locks(ctx);
237
	drm_modeset_drop_locks(ctx);
229
	drm_modeset_acquire_fini(ctx);
238
	drm_modeset_acquire_fini(ctx);
230
 
239
 
231
	kfree(ctx);
240
	kfree(ctx);
232
}
241
}
233
EXPORT_SYMBOL(drm_modeset_unlock_crtc);
242
EXPORT_SYMBOL(drm_modeset_unlock_crtc);
234
 
243
 
235
/**
244
/**
236
 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
245
 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
237
 * @dev: device
246
 * @dev: device
238
 *
247
 *
239
 * Useful as a debug assert.
248
 * Useful as a debug assert.
240
 */
249
 */
241
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
250
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
242
{
251
{
243
	struct drm_crtc *crtc;
252
	struct drm_crtc *crtc;
244
 
253
 
245
	/* Locking is currently fubar in the panic handler. */
254
	/* Locking is currently fubar in the panic handler. */
246
	if (oops_in_progress)
255
	if (oops_in_progress)
247
		return;
256
		return;
248
 
257
 
249
	drm_for_each_crtc(crtc, dev)
258
	drm_for_each_crtc(crtc, dev)
250
		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
259
		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
251
 
260
 
252
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
261
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
253
	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
262
	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
254
}
263
}
255
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
264
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
256
 
265
 
257
/**
266
/**
258
 * drm_modeset_acquire_init - initialize acquire context
267
 * drm_modeset_acquire_init - initialize acquire context
259
 * @ctx: the acquire context
268
 * @ctx: the acquire context
260
 * @flags: for future
269
 * @flags: for future
261
 */
270
 */
262
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
271
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
263
		uint32_t flags)
272
		uint32_t flags)
264
{
273
{
265
	memset(ctx, 0, sizeof(*ctx));
274
	memset(ctx, 0, sizeof(*ctx));
266
	ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
275
	ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
267
	INIT_LIST_HEAD(&ctx->locked);
276
	INIT_LIST_HEAD(&ctx->locked);
268
}
277
}
269
EXPORT_SYMBOL(drm_modeset_acquire_init);
278
EXPORT_SYMBOL(drm_modeset_acquire_init);
270
 
279
 
271
/**
280
/**
272
 * drm_modeset_acquire_fini - cleanup acquire context
281
 * drm_modeset_acquire_fini - cleanup acquire context
273
 * @ctx: the acquire context
282
 * @ctx: the acquire context
274
 */
283
 */
275
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
284
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
276
{
285
{
277
	ww_acquire_fini(&ctx->ww_ctx);
286
	ww_acquire_fini(&ctx->ww_ctx);
278
}
287
}
279
EXPORT_SYMBOL(drm_modeset_acquire_fini);
288
EXPORT_SYMBOL(drm_modeset_acquire_fini);
280
 
289
 
281
/**
290
/**
282
 * drm_modeset_drop_locks - drop all locks
291
 * drm_modeset_drop_locks - drop all locks
283
 * @ctx: the acquire context
292
 * @ctx: the acquire context
284
 *
293
 *
285
 * Drop all locks currently held against this acquire context.
294
 * Drop all locks currently held against this acquire context.
286
 */
295
 */
287
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
296
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
288
{
297
{
289
	WARN_ON(ctx->contended);
298
	WARN_ON(ctx->contended);
290
	while (!list_empty(&ctx->locked)) {
299
	while (!list_empty(&ctx->locked)) {
291
		struct drm_modeset_lock *lock;
300
		struct drm_modeset_lock *lock;
292
 
301
 
293
		lock = list_first_entry(&ctx->locked,
302
		lock = list_first_entry(&ctx->locked,
294
				struct drm_modeset_lock, head);
303
				struct drm_modeset_lock, head);
295
 
304
 
296
		drm_modeset_unlock(lock);
305
		drm_modeset_unlock(lock);
297
	}
306
	}
298
}
307
}
299
EXPORT_SYMBOL(drm_modeset_drop_locks);
308
EXPORT_SYMBOL(drm_modeset_drop_locks);
300
 
309
 
301
static inline int modeset_lock(struct drm_modeset_lock *lock,
310
static inline int modeset_lock(struct drm_modeset_lock *lock,
302
		struct drm_modeset_acquire_ctx *ctx,
311
		struct drm_modeset_acquire_ctx *ctx,
303
		bool interruptible, bool slow)
312
		bool interruptible, bool slow)
304
{
313
{
305
	int ret;
314
	int ret;
306
 
315
 
307
	WARN_ON(ctx->contended);
316
	WARN_ON(ctx->contended);
308
 
317
 
309
	if (ctx->trylock_only) {
318
	if (ctx->trylock_only) {
310
		lockdep_assert_held(&ctx->ww_ctx);
319
		lockdep_assert_held(&ctx->ww_ctx);
311
 
320
 
312
		if (!ww_mutex_trylock(&lock->mutex))
321
		if (!ww_mutex_trylock(&lock->mutex))
313
			return -EBUSY;
322
			return -EBUSY;
314
		else
323
		else
315
			return 0;
324
			return 0;
316
	} else if (interruptible && slow) {
325
	} else if (interruptible && slow) {
317
		ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
326
		ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
318
	} else if (interruptible) {
327
	} else if (interruptible) {
319
		ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
328
		ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
320
	} else if (slow) {
329
	} else if (slow) {
321
		ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
330
		ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
322
		ret = 0;
331
		ret = 0;
323
	} else {
332
	} else {
324
		ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
333
		ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
325
	}
334
	}
326
	if (!ret) {
335
	if (!ret) {
327
		WARN_ON(!list_empty(&lock->head));
336
		WARN_ON(!list_empty(&lock->head));
328
		list_add(&lock->head, &ctx->locked);
337
		list_add(&lock->head, &ctx->locked);
329
	} else if (ret == -EALREADY) {
338
	} else if (ret == -EALREADY) {
330
		/* we already hold the lock.. this is fine.  For atomic
339
		/* we already hold the lock.. this is fine.  For atomic
331
		 * we will need to be able to drm_modeset_lock() things
340
		 * we will need to be able to drm_modeset_lock() things
332
		 * without having to keep track of what is already locked
341
		 * without having to keep track of what is already locked
333
		 * or not.
342
		 * or not.
334
		 */
343
		 */
335
		ret = 0;
344
		ret = 0;
336
	} else if (ret == -EDEADLK) {
345
	} else if (ret == -EDEADLK) {
337
		ctx->contended = lock;
346
		ctx->contended = lock;
338
	}
347
	}
339
 
348
 
340
	return ret;
349
	return ret;
341
}
350
}
342
 
351
 
343
static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
352
static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
344
		bool interruptible)
353
		bool interruptible)
345
{
354
{
346
	struct drm_modeset_lock *contended = ctx->contended;
355
	struct drm_modeset_lock *contended = ctx->contended;
347
 
356
 
348
	ctx->contended = NULL;
357
	ctx->contended = NULL;
349
 
358
 
350
	if (WARN_ON(!contended))
359
	if (WARN_ON(!contended))
351
		return 0;
360
		return 0;
352
 
361
 
353
	drm_modeset_drop_locks(ctx);
362
	drm_modeset_drop_locks(ctx);
354
 
363
 
355
	return modeset_lock(contended, ctx, interruptible, true);
364
	return modeset_lock(contended, ctx, interruptible, true);
356
}
365
}
357
 
366
 
358
/**
367
/**
359
 * drm_modeset_backoff - deadlock avoidance backoff
368
 * drm_modeset_backoff - deadlock avoidance backoff
360
 * @ctx: the acquire context
369
 * @ctx: the acquire context
361
 *
370
 *
362
 * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
371
 * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
363
 * you must call this function to drop all currently held locks and
372
 * you must call this function to drop all currently held locks and
364
 * block until the contended lock becomes available.
373
 * block until the contended lock becomes available.
365
 */
374
 */
366
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
375
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
367
{
376
{
368
	modeset_backoff(ctx, false);
377
	modeset_backoff(ctx, false);
369
}
378
}
370
EXPORT_SYMBOL(drm_modeset_backoff);
379
EXPORT_SYMBOL(drm_modeset_backoff);
371
 
380
 
372
/**
381
/**
373
 * drm_modeset_backoff_interruptible - deadlock avoidance backoff
382
 * drm_modeset_backoff_interruptible - deadlock avoidance backoff
374
 * @ctx: the acquire context
383
 * @ctx: the acquire context
375
 *
384
 *
376
 * Interruptible version of drm_modeset_backoff()
385
 * Interruptible version of drm_modeset_backoff()
377
 */
386
 */
378
int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
387
int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
379
{
388
{
380
	return modeset_backoff(ctx, true);
389
	return modeset_backoff(ctx, true);
381
}
390
}
382
EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
391
EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
383
 
392
 
384
/**
393
/**
385
 * drm_modeset_lock - take modeset lock
394
 * drm_modeset_lock - take modeset lock
386
 * @lock: lock to take
395
 * @lock: lock to take
387
 * @ctx: acquire ctx
396
 * @ctx: acquire ctx
388
 *
397
 *
389
 * If ctx is not NULL, then its ww acquire context is used and the
398
 * If ctx is not NULL, then its ww acquire context is used and the
390
 * lock will be tracked by the context and can be released by calling
399
 * lock will be tracked by the context and can be released by calling
391
 * drm_modeset_drop_locks().  If -EDEADLK is returned, this means a
400
 * drm_modeset_drop_locks().  If -EDEADLK is returned, this means a
392
 * deadlock scenario has been detected and it is an error to attempt
401
 * deadlock scenario has been detected and it is an error to attempt
393
 * to take any more locks without first calling drm_modeset_backoff().
402
 * to take any more locks without first calling drm_modeset_backoff().
394
 */
403
 */
395
int drm_modeset_lock(struct drm_modeset_lock *lock,
404
int drm_modeset_lock(struct drm_modeset_lock *lock,
396
		struct drm_modeset_acquire_ctx *ctx)
405
		struct drm_modeset_acquire_ctx *ctx)
397
{
406
{
398
	if (ctx)
407
	if (ctx)
399
		return modeset_lock(lock, ctx, false, false);
408
		return modeset_lock(lock, ctx, false, false);
400
 
409
 
401
	ww_mutex_lock(&lock->mutex, NULL);
410
	ww_mutex_lock(&lock->mutex, NULL);
402
	return 0;
411
	return 0;
403
}
412
}
404
EXPORT_SYMBOL(drm_modeset_lock);
413
EXPORT_SYMBOL(drm_modeset_lock);
405
 
414
 
406
/**
415
/**
407
 * drm_modeset_lock_interruptible - take modeset lock
416
 * drm_modeset_lock_interruptible - take modeset lock
408
 * @lock: lock to take
417
 * @lock: lock to take
409
 * @ctx: acquire ctx
418
 * @ctx: acquire ctx
410
 *
419
 *
411
 * Interruptible version of drm_modeset_lock()
420
 * Interruptible version of drm_modeset_lock()
412
 */
421
 */
413
int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
422
int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
414
		struct drm_modeset_acquire_ctx *ctx)
423
		struct drm_modeset_acquire_ctx *ctx)
415
{
424
{
416
	if (ctx)
425
	if (ctx)
417
		return modeset_lock(lock, ctx, true, false);
426
		return modeset_lock(lock, ctx, true, false);
418
 
427
 
419
	return ww_mutex_lock_interruptible(&lock->mutex, NULL);
428
	return ww_mutex_lock_interruptible(&lock->mutex, NULL);
420
}
429
}
421
EXPORT_SYMBOL(drm_modeset_lock_interruptible);
430
EXPORT_SYMBOL(drm_modeset_lock_interruptible);
422
 
431
 
423
/**
432
/**
424
 * drm_modeset_unlock - drop modeset lock
433
 * drm_modeset_unlock - drop modeset lock
425
 * @lock: lock to release
434
 * @lock: lock to release
426
 */
435
 */
427
void drm_modeset_unlock(struct drm_modeset_lock *lock)
436
void drm_modeset_unlock(struct drm_modeset_lock *lock)
428
{
437
{
429
	list_del_init(&lock->head);
438
	list_del_init(&lock->head);
430
	ww_mutex_unlock(&lock->mutex);
439
	ww_mutex_unlock(&lock->mutex);
431
}
440
}
432
EXPORT_SYMBOL(drm_modeset_unlock);
441
EXPORT_SYMBOL(drm_modeset_unlock);
-
 
442
 
-
 
443
/**
-
 
444
 * drm_modeset_lock_all_ctx - take all modeset locks
-
 
445
 * @dev: DRM device
-
 
446
 * @ctx: lock acquisition context
433
 
447
 *
434
/* In some legacy codepaths it's convenient to just grab all the crtc and plane
448
 * This function takes all modeset locks, suitable where a more fine-grained
-
 
449
 * scheme isn't (yet) implemented.
-
 
450
 *
-
 
451
 * Unlike drm_modeset_lock_all(), it doesn't take the dev->mode_config.mutex
-
 
452
 * since that lock isn't required for modeset state changes. Callers which
-
 
453
 * need to grab that lock too need to do so outside of the acquire context
-
 
454
 * @ctx.
-
 
455
 *
-
 
456
 * Locks acquired with this function should be released by calling the
-
 
457
 * drm_modeset_drop_locks() function on @ctx.
-
 
458
 *
-
 
459
 * Returns: 0 on success or a negative error-code on failure.
435
 * related locks. */
460
 */
436
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
461
int drm_modeset_lock_all_ctx(struct drm_device *dev,
437
		struct drm_modeset_acquire_ctx *ctx)
462
		struct drm_modeset_acquire_ctx *ctx)
438
{
463
{
439
	struct drm_crtc *crtc;
464
	struct drm_crtc *crtc;
440
	struct drm_plane *plane;
465
	struct drm_plane *plane;
441
	int ret = 0;
466
	int ret;
-
 
467
 
-
 
468
	ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
-
 
469
	if (ret)
-
 
470
		return ret;
442
 
471
 
443
	drm_for_each_crtc(crtc, dev) {
472
	drm_for_each_crtc(crtc, dev) {
444
		ret = drm_modeset_lock(&crtc->mutex, ctx);
473
		ret = drm_modeset_lock(&crtc->mutex, ctx);
445
		if (ret)
474
		if (ret)
446
			return ret;
475
			return ret;
447
	}
476
	}
448
 
477
 
449
	drm_for_each_plane(plane, dev) {
478
	drm_for_each_plane(plane, dev) {
450
		ret = drm_modeset_lock(&plane->mutex, ctx);
479
		ret = drm_modeset_lock(&plane->mutex, ctx);
451
		if (ret)
480
		if (ret)
452
			return ret;
481
			return ret;
453
	}
482
	}
454
 
483
 
455
	return 0;
484
	return 0;
456
}
485
}
457
EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
486
EXPORT_SYMBOL(drm_modeset_lock_all_ctx);