Subversion Repositories Kolibri OS

Rev

Rev 5271 | Rev 6937 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5060 serge 1
/*
2
 * Copyright (C) 2014 Red Hat
3
 * Author: Rob Clark 
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the "Software"),
7
 * to deal in the Software without restriction, including without limitation
8
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * Software is furnished to do so, subject to the following conditions:
11
 *
12
 * The above copyright notice and this permission notice shall be included in
13
 * all copies or substantial portions of the Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * OTHER DEALINGS IN THE SOFTWARE.
22
 */
23
 
24
#include 
25
#include 
26
#include 
27
 
28
/**
29
 * DOC: kms locking
30
 *
31
 * As KMS moves toward more fine grained locking, and atomic ioctl where
32
 * userspace can indirectly control locking order, it becomes necessary
33
 * to use ww_mutex and acquire-contexts to avoid deadlocks.  But because
34
 * the locking is more distributed around the driver code, we want a bit
35
 * of extra utility/tracking out of our acquire-ctx.  This is provided
36
 * by drm_modeset_lock / drm_modeset_acquire_ctx.
37
 *
5271 serge 38
 * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
5060 serge 39
 *
40
 * The basic usage pattern is to:
41
 *
42
 *     drm_modeset_acquire_init(&ctx)
43
 *   retry:
44
 *     foreach (lock in random_ordered_set_of_locks) {
45
 *       ret = drm_modeset_lock(lock, &ctx)
46
 *       if (ret == -EDEADLK) {
47
 *          drm_modeset_backoff(&ctx);
48
 *          goto retry;
49
 *       }
50
 *     }
51
 *
52
 *     ... do stuff ...
53
 *
54
 *     drm_modeset_drop_locks(&ctx);
55
 *     drm_modeset_acquire_fini(&ctx);
56
 */
57
 
58
/**
6084 serge 59
 * drm_modeset_lock_all - take all modeset locks
60
 * @dev: drm device
5271 serge 61
 *
6084 serge 62
 * This function takes all modeset locks, suitable where a more fine-grained
63
 * scheme isn't (yet) implemented. Locks must be dropped with
64
 * drm_modeset_unlock_all.
5271 serge 65
 */
6084 serge 66
void drm_modeset_lock_all(struct drm_device *dev)
5271 serge 67
{
68
	struct drm_mode_config *config = &dev->mode_config;
69
	struct drm_modeset_acquire_ctx *ctx;
70
	int ret;
71
 
6084 serge 72
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
73
	if (WARN_ON(!ctx))
74
		return;
5271 serge 75
 
6084 serge 76
	mutex_lock(&config->mutex);
5271 serge 77
 
78
	drm_modeset_acquire_init(ctx, 0);
79
 
80
retry:
81
	ret = drm_modeset_lock(&config->connection_mutex, ctx);
82
	if (ret)
83
		goto fail;
84
	ret = drm_modeset_lock_all_crtcs(dev, ctx);
85
	if (ret)
86
		goto fail;
87
 
88
	WARN_ON(config->acquire_ctx);
89
 
90
	/* now we hold the locks, so now that it is safe, stash the
91
	 * ctx for drm_modeset_unlock_all():
92
	 */
93
	config->acquire_ctx = ctx;
94
 
95
	drm_warn_on_modeset_not_all_locked(dev);
96
 
6084 serge 97
	return;
5271 serge 98
 
99
fail:
100
	if (ret == -EDEADLK) {
101
		drm_modeset_backoff(ctx);
102
		goto retry;
103
	}
104
 
6084 serge 105
	kfree(ctx);
5271 serge 106
}
107
EXPORT_SYMBOL(drm_modeset_lock_all);
108
 
109
/**
110
 * drm_modeset_unlock_all - drop all modeset locks
111
 * @dev: device
112
 *
113
 * This function drop all modeset locks taken by drm_modeset_lock_all.
114
 */
115
void drm_modeset_unlock_all(struct drm_device *dev)
116
{
117
	struct drm_mode_config *config = &dev->mode_config;
118
	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
119
 
120
	if (WARN_ON(!ctx))
121
		return;
122
 
123
	config->acquire_ctx = NULL;
124
	drm_modeset_drop_locks(ctx);
125
	drm_modeset_acquire_fini(ctx);
126
 
127
	kfree(ctx);
128
 
129
	mutex_unlock(&dev->mode_config.mutex);
130
}
131
EXPORT_SYMBOL(drm_modeset_unlock_all);
132
 
133
/**
134
 * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
135
 * @crtc: DRM CRTC
136
 * @plane: DRM plane to be updated on @crtc
137
 *
138
 * This function locks the given crtc and plane (which should be either the
139
 * primary or cursor plane) using a hidden acquire context. This is necessary so
140
 * that drivers internally using the atomic interfaces can grab further locks
141
 * with the lock acquire context.
142
 *
143
 * Note that @plane can be NULL, e.g. when the cursor support hasn't yet been
144
 * converted to universal planes yet.
145
 */
146
void drm_modeset_lock_crtc(struct drm_crtc *crtc,
147
			   struct drm_plane *plane)
148
{
149
	struct drm_modeset_acquire_ctx *ctx;
150
	int ret;
151
 
152
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
153
	if (WARN_ON(!ctx))
154
		return;
155
 
156
	drm_modeset_acquire_init(ctx, 0);
157
 
158
retry:
159
	ret = drm_modeset_lock(&crtc->mutex, ctx);
160
	if (ret)
161
		goto fail;
162
 
163
	if (plane) {
164
		ret = drm_modeset_lock(&plane->mutex, ctx);
165
		if (ret)
166
			goto fail;
167
 
168
		if (plane->crtc) {
169
			ret = drm_modeset_lock(&plane->crtc->mutex, ctx);
170
			if (ret)
171
				goto fail;
172
		}
173
	}
174
 
175
	WARN_ON(crtc->acquire_ctx);
176
 
177
	/* now we hold the locks, so now that it is safe, stash the
178
	 * ctx for drm_modeset_unlock_crtc():
179
	 */
180
	crtc->acquire_ctx = ctx;
181
 
182
	return;
183
 
184
fail:
185
	if (ret == -EDEADLK) {
186
		drm_modeset_backoff(ctx);
187
		goto retry;
188
	}
189
}
190
EXPORT_SYMBOL(drm_modeset_lock_crtc);
191
 
192
/**
193
 * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
194
 * @crtc: drm crtc
195
 *
196
 * Legacy ioctl operations like cursor updates or page flips only have per-crtc
197
 * locking, and store the acquire ctx in the corresponding crtc. All other
198
 * legacy operations take all locks and use a global acquire context. This
199
 * function grabs the right one.
200
 */
201
struct drm_modeset_acquire_ctx *
202
drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
203
{
204
	if (crtc->acquire_ctx)
205
		return crtc->acquire_ctx;
206
 
207
	WARN_ON(!crtc->dev->mode_config.acquire_ctx);
208
 
209
	return crtc->dev->mode_config.acquire_ctx;
210
}
211
EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
212
 
213
/**
214
 * drm_modeset_unlock_crtc - drop crtc lock
215
 * @crtc: drm crtc
216
 *
217
 * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
218
 * locks acquired through the hidden context.
219
 */
220
void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
221
{
222
	struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
223
 
224
	if (WARN_ON(!ctx))
225
		return;
226
 
227
	crtc->acquire_ctx = NULL;
228
	drm_modeset_drop_locks(ctx);
229
	drm_modeset_acquire_fini(ctx);
230
 
231
	kfree(ctx);
232
}
233
EXPORT_SYMBOL(drm_modeset_unlock_crtc);
234
 
235
/**
236
 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
237
 * @dev: device
238
 *
239
 * Useful as a debug assert.
240
 */
241
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
242
{
243
	struct drm_crtc *crtc;
244
 
245
	/* Locking is currently fubar in the panic handler. */
6084 serge 246
	if (oops_in_progress)
247
		return;
5271 serge 248
 
6084 serge 249
	drm_for_each_crtc(crtc, dev)
5271 serge 250
		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
251
 
252
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
253
	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
254
}
255
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
256
 
257
/**
5060 serge 258
 * drm_modeset_acquire_init - initialize acquire context
259
 * @ctx: the acquire context
260
 * @flags: for future
261
 */
262
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
263
		uint32_t flags)
264
{
265
	memset(ctx, 0, sizeof(*ctx));
266
	ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
267
	INIT_LIST_HEAD(&ctx->locked);
268
}
269
EXPORT_SYMBOL(drm_modeset_acquire_init);
270
 
271
/**
272
 * drm_modeset_acquire_fini - cleanup acquire context
273
 * @ctx: the acquire context
274
 */
275
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
276
{
277
	ww_acquire_fini(&ctx->ww_ctx);
278
}
279
EXPORT_SYMBOL(drm_modeset_acquire_fini);
280
 
281
/**
282
 * drm_modeset_drop_locks - drop all locks
283
 * @ctx: the acquire context
284
 *
285
 * Drop all locks currently held against this acquire context.
286
 */
287
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
288
{
289
	WARN_ON(ctx->contended);
290
	while (!list_empty(&ctx->locked)) {
291
		struct drm_modeset_lock *lock;
292
 
293
		lock = list_first_entry(&ctx->locked,
294
				struct drm_modeset_lock, head);
295
 
296
		drm_modeset_unlock(lock);
297
	}
298
}
299
EXPORT_SYMBOL(drm_modeset_drop_locks);
300
 
301
static inline int modeset_lock(struct drm_modeset_lock *lock,
302
		struct drm_modeset_acquire_ctx *ctx,
303
		bool interruptible, bool slow)
304
{
305
	int ret;
306
 
307
	WARN_ON(ctx->contended);
308
 
5271 serge 309
	if (ctx->trylock_only) {
6084 serge 310
		lockdep_assert_held(&ctx->ww_ctx);
311
 
5271 serge 312
		if (!ww_mutex_trylock(&lock->mutex))
313
			return -EBUSY;
314
		else
315
			return 0;
316
	} else if (interruptible && slow) {
5060 serge 317
		ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
318
	} else if (interruptible) {
319
		ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
320
	} else if (slow) {
321
		ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
322
		ret = 0;
323
	} else {
324
		ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
325
	}
326
	if (!ret) {
327
		WARN_ON(!list_empty(&lock->head));
328
		list_add(&lock->head, &ctx->locked);
329
	} else if (ret == -EALREADY) {
330
		/* we already hold the lock.. this is fine.  For atomic
331
		 * we will need to be able to drm_modeset_lock() things
332
		 * without having to keep track of what is already locked
333
		 * or not.
334
		 */
335
		ret = 0;
336
	} else if (ret == -EDEADLK) {
337
		ctx->contended = lock;
338
	}
339
 
340
	return ret;
341
}
342
 
343
static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
344
		bool interruptible)
345
{
346
	struct drm_modeset_lock *contended = ctx->contended;
347
 
348
	ctx->contended = NULL;
349
 
350
	if (WARN_ON(!contended))
351
		return 0;
352
 
353
	drm_modeset_drop_locks(ctx);
354
 
355
	return modeset_lock(contended, ctx, interruptible, true);
356
}
357
 
358
/**
359
 * drm_modeset_backoff - deadlock avoidance backoff
360
 * @ctx: the acquire context
361
 *
362
 * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
363
 * you must call this function to drop all currently held locks and
364
 * block until the contended lock becomes available.
365
 */
366
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
367
{
368
	modeset_backoff(ctx, false);
369
}
370
EXPORT_SYMBOL(drm_modeset_backoff);
371
 
372
/**
373
 * drm_modeset_backoff_interruptible - deadlock avoidance backoff
374
 * @ctx: the acquire context
375
 *
376
 * Interruptible version of drm_modeset_backoff()
377
 */
378
int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
379
{
380
	return modeset_backoff(ctx, true);
381
}
382
EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
383
 
384
/**
385
 * drm_modeset_lock - take modeset lock
386
 * @lock: lock to take
387
 * @ctx: acquire ctx
388
 *
389
 * If ctx is not NULL, then its ww acquire context is used and the
390
 * lock will be tracked by the context and can be released by calling
391
 * drm_modeset_drop_locks().  If -EDEADLK is returned, this means a
392
 * deadlock scenario has been detected and it is an error to attempt
393
 * to take any more locks without first calling drm_modeset_backoff().
394
 */
395
int drm_modeset_lock(struct drm_modeset_lock *lock,
396
		struct drm_modeset_acquire_ctx *ctx)
397
{
398
	if (ctx)
399
		return modeset_lock(lock, ctx, false, false);
400
 
401
	ww_mutex_lock(&lock->mutex, NULL);
402
	return 0;
403
}
404
EXPORT_SYMBOL(drm_modeset_lock);
405
 
406
/**
407
 * drm_modeset_lock_interruptible - take modeset lock
408
 * @lock: lock to take
409
 * @ctx: acquire ctx
410
 *
411
 * Interruptible version of drm_modeset_lock()
412
 */
413
int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
414
		struct drm_modeset_acquire_ctx *ctx)
415
{
416
	if (ctx)
417
		return modeset_lock(lock, ctx, true, false);
418
 
419
	return ww_mutex_lock_interruptible(&lock->mutex, NULL);
420
}
421
EXPORT_SYMBOL(drm_modeset_lock_interruptible);
422
 
423
/**
424
 * drm_modeset_unlock - drop modeset lock
425
 * @lock: lock to release
426
 */
427
void drm_modeset_unlock(struct drm_modeset_lock *lock)
428
{
429
	list_del_init(&lock->head);
430
	ww_mutex_unlock(&lock->mutex);
431
}
432
EXPORT_SYMBOL(drm_modeset_unlock);
433
 
5271 serge 434
/* In some legacy codepaths it's convenient to just grab all the crtc and plane
435
 * related locks. */
5060 serge 436
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
437
		struct drm_modeset_acquire_ctx *ctx)
438
{
439
	struct drm_crtc *crtc;
5271 serge 440
	struct drm_plane *plane;
5060 serge 441
	int ret = 0;
442
 
6084 serge 443
	drm_for_each_crtc(crtc, dev) {
5060 serge 444
		ret = drm_modeset_lock(&crtc->mutex, ctx);
445
		if (ret)
446
			return ret;
447
	}
448
 
6084 serge 449
	drm_for_each_plane(plane, dev) {
5271 serge 450
		ret = drm_modeset_lock(&plane->mutex, ctx);
451
		if (ret)
452
			return ret;
453
	}
454
 
5060 serge 455
	return 0;
456
}
457
EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);