Subversion Repositories Kolibri OS

Rev

Rev 6937 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5060 serge 1
/*
2
 * Copyright (C) 2014 Red Hat
3
 * Author: Rob Clark 
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the "Software"),
7
 * to deal in the Software without restriction, including without limitation
8
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * Software is furnished to do so, subject to the following conditions:
11
 *
12
 * The above copyright notice and this permission notice shall be included in
13
 * all copies or substantial portions of the Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * OTHER DEALINGS IN THE SOFTWARE.
22
 */
23
 
24
#include 
25
#include 
26
#include 
27
 
28
/**
29
 * DOC: kms locking
30
 *
31
 * As KMS moves toward more fine grained locking, and atomic ioctl where
32
 * userspace can indirectly control locking order, it becomes necessary
33
 * to use ww_mutex and acquire-contexts to avoid deadlocks.  But because
34
 * the locking is more distributed around the driver code, we want a bit
35
 * of extra utility/tracking out of our acquire-ctx.  This is provided
36
 * by drm_modeset_lock / drm_modeset_acquire_ctx.
37
 *
5271 serge 38
 * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
5060 serge 39
 *
40
 * The basic usage pattern is to:
41
 *
42
 *     drm_modeset_acquire_init(&ctx)
7144 serge 43
 *     retry:
5060 serge 44
 *     foreach (lock in random_ordered_set_of_locks) {
7144 serge 45
 *         ret = drm_modeset_lock(lock, &ctx)
46
 *         if (ret == -EDEADLK) {
47
 *             drm_modeset_backoff(&ctx);
48
 *             goto retry;
49
 *         }
5060 serge 50
 *     }
51
 *     ... do stuff ...
52
 *     drm_modeset_drop_locks(&ctx);
53
 *     drm_modeset_acquire_fini(&ctx);
54
 */
55
 
56
/**
6084 serge 57
 * drm_modeset_lock_all - take all modeset locks
6937 serge 58
 * @dev: DRM device
5271 serge 59
 *
6084 serge 60
 * This function takes all modeset locks, suitable where a more fine-grained
6937 serge 61
 * scheme isn't (yet) implemented. Locks must be dropped by calling the
62
 * drm_modeset_unlock_all() function.
63
 *
64
 * This function is deprecated. It allocates a lock acquisition context and
65
 * stores it in the DRM device's ->mode_config. This facilitate conversion of
66
 * existing code because it removes the need to manually deal with the
67
 * acquisition context, but it is also brittle because the context is global
68
 * and care must be taken not to nest calls. New code should use the
69
 * drm_modeset_lock_all_ctx() function and pass in the context explicitly.
5271 serge 70
 */
6084 serge 71
void drm_modeset_lock_all(struct drm_device *dev)
5271 serge 72
{
73
	struct drm_mode_config *config = &dev->mode_config;
74
	struct drm_modeset_acquire_ctx *ctx;
75
	int ret;
76
 
6084 serge 77
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
78
	if (WARN_ON(!ctx))
79
		return;
5271 serge 80
 
6084 serge 81
	mutex_lock(&config->mutex);
5271 serge 82
 
83
	drm_modeset_acquire_init(ctx, 0);
84
 
85
retry:
6937 serge 86
	ret = drm_modeset_lock_all_ctx(dev, ctx);
87
	if (ret < 0) {
88
		if (ret == -EDEADLK) {
89
			drm_modeset_backoff(ctx);
90
			goto retry;
91
		}
5271 serge 92
 
6937 serge 93
		drm_modeset_acquire_fini(ctx);
94
		kfree(ctx);
95
		return;
96
	}
97
 
5271 serge 98
	WARN_ON(config->acquire_ctx);
99
 
6937 serge 100
	/*
101
	 * We hold the locks now, so it is safe to stash the acquisition
102
	 * context for drm_modeset_unlock_all().
5271 serge 103
	 */
104
	config->acquire_ctx = ctx;
105
 
106
	drm_warn_on_modeset_not_all_locked(dev);
107
}
108
EXPORT_SYMBOL(drm_modeset_lock_all);
109
 
110
/**
111
 * drm_modeset_unlock_all - drop all modeset locks
6937 serge 112
 * @dev: DRM device
5271 serge 113
 *
6937 serge 114
 * This function drops all modeset locks taken by a previous call to the
115
 * drm_modeset_lock_all() function.
116
 *
117
 * This function is deprecated. It uses the lock acquisition context stored
118
 * in the DRM device's ->mode_config. This facilitates conversion of existing
119
 * code because it removes the need to manually deal with the acquisition
120
 * context, but it is also brittle because the context is global and care must
121
 * be taken not to nest calls. New code should pass the acquisition context
122
 * directly to the drm_modeset_drop_locks() function.
5271 serge 123
 */
124
void drm_modeset_unlock_all(struct drm_device *dev)
125
{
126
	struct drm_mode_config *config = &dev->mode_config;
127
	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
128
 
129
	if (WARN_ON(!ctx))
130
		return;
131
 
132
	config->acquire_ctx = NULL;
133
	drm_modeset_drop_locks(ctx);
134
	drm_modeset_acquire_fini(ctx);
135
 
136
	kfree(ctx);
137
 
138
	mutex_unlock(&dev->mode_config.mutex);
139
}
140
EXPORT_SYMBOL(drm_modeset_unlock_all);
141
 
142
/**
143
 * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
144
 * @crtc: DRM CRTC
145
 * @plane: DRM plane to be updated on @crtc
146
 *
147
 * This function locks the given crtc and plane (which should be either the
148
 * primary or cursor plane) using a hidden acquire context. This is necessary so
149
 * that drivers internally using the atomic interfaces can grab further locks
150
 * with the lock acquire context.
151
 *
152
 * Note that @plane can be NULL, e.g. when the cursor support hasn't yet been
153
 * converted to universal planes yet.
154
 */
155
void drm_modeset_lock_crtc(struct drm_crtc *crtc,
156
			   struct drm_plane *plane)
157
{
158
	struct drm_modeset_acquire_ctx *ctx;
159
	int ret;
160
 
161
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
162
	if (WARN_ON(!ctx))
163
		return;
164
 
165
	drm_modeset_acquire_init(ctx, 0);
166
 
167
retry:
168
	ret = drm_modeset_lock(&crtc->mutex, ctx);
169
	if (ret)
170
		goto fail;
171
 
172
	if (plane) {
173
		ret = drm_modeset_lock(&plane->mutex, ctx);
174
		if (ret)
175
			goto fail;
176
 
177
		if (plane->crtc) {
178
			ret = drm_modeset_lock(&plane->crtc->mutex, ctx);
179
			if (ret)
180
				goto fail;
181
		}
182
	}
183
 
184
	WARN_ON(crtc->acquire_ctx);
185
 
186
	/* now we hold the locks, so now that it is safe, stash the
187
	 * ctx for drm_modeset_unlock_crtc():
188
	 */
189
	crtc->acquire_ctx = ctx;
190
 
191
	return;
192
 
193
fail:
194
	if (ret == -EDEADLK) {
195
		drm_modeset_backoff(ctx);
196
		goto retry;
197
	}
198
}
199
EXPORT_SYMBOL(drm_modeset_lock_crtc);
200
 
201
/**
202
 * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
203
 * @crtc: drm crtc
204
 *
205
 * Legacy ioctl operations like cursor updates or page flips only have per-crtc
206
 * locking, and store the acquire ctx in the corresponding crtc. All other
207
 * legacy operations take all locks and use a global acquire context. This
208
 * function grabs the right one.
209
 */
210
struct drm_modeset_acquire_ctx *
211
drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
212
{
213
	if (crtc->acquire_ctx)
214
		return crtc->acquire_ctx;
215
 
216
	WARN_ON(!crtc->dev->mode_config.acquire_ctx);
217
 
218
	return crtc->dev->mode_config.acquire_ctx;
219
}
220
EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
221
 
222
/**
223
 * drm_modeset_unlock_crtc - drop crtc lock
224
 * @crtc: drm crtc
225
 *
226
 * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
227
 * locks acquired through the hidden context.
228
 */
229
void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
230
{
231
	struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
232
 
233
	if (WARN_ON(!ctx))
234
		return;
235
 
236
	crtc->acquire_ctx = NULL;
237
	drm_modeset_drop_locks(ctx);
238
	drm_modeset_acquire_fini(ctx);
239
 
240
	kfree(ctx);
241
}
242
EXPORT_SYMBOL(drm_modeset_unlock_crtc);
243
 
244
/**
245
 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
246
 * @dev: device
247
 *
248
 * Useful as a debug assert.
249
 */
250
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
251
{
252
	struct drm_crtc *crtc;
253
 
254
	/* Locking is currently fubar in the panic handler. */
6084 serge 255
	if (oops_in_progress)
256
		return;
5271 serge 257
 
6084 serge 258
	drm_for_each_crtc(crtc, dev)
5271 serge 259
		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
260
 
261
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
262
	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
263
}
264
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
265
 
266
/**
5060 serge 267
 * drm_modeset_acquire_init - initialize acquire context
268
 * @ctx: the acquire context
269
 * @flags: for future
270
 */
271
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
272
		uint32_t flags)
273
{
274
	memset(ctx, 0, sizeof(*ctx));
275
	ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
276
	INIT_LIST_HEAD(&ctx->locked);
277
}
278
EXPORT_SYMBOL(drm_modeset_acquire_init);
279
 
280
/**
281
 * drm_modeset_acquire_fini - cleanup acquire context
282
 * @ctx: the acquire context
283
 */
284
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
285
{
286
	ww_acquire_fini(&ctx->ww_ctx);
287
}
288
EXPORT_SYMBOL(drm_modeset_acquire_fini);
289
 
290
/**
291
 * drm_modeset_drop_locks - drop all locks
292
 * @ctx: the acquire context
293
 *
294
 * Drop all locks currently held against this acquire context.
295
 */
296
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
297
{
298
	WARN_ON(ctx->contended);
299
	while (!list_empty(&ctx->locked)) {
300
		struct drm_modeset_lock *lock;
301
 
302
		lock = list_first_entry(&ctx->locked,
303
				struct drm_modeset_lock, head);
304
 
305
		drm_modeset_unlock(lock);
306
	}
307
}
308
EXPORT_SYMBOL(drm_modeset_drop_locks);
309
 
310
static inline int modeset_lock(struct drm_modeset_lock *lock,
311
		struct drm_modeset_acquire_ctx *ctx,
312
		bool interruptible, bool slow)
313
{
314
	int ret;
315
 
316
	WARN_ON(ctx->contended);
317
 
5271 serge 318
	if (ctx->trylock_only) {
6084 serge 319
		lockdep_assert_held(&ctx->ww_ctx);
320
 
5271 serge 321
		if (!ww_mutex_trylock(&lock->mutex))
322
			return -EBUSY;
323
		else
324
			return 0;
325
	} else if (interruptible && slow) {
5060 serge 326
		ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
327
	} else if (interruptible) {
328
		ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
329
	} else if (slow) {
330
		ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
331
		ret = 0;
332
	} else {
333
		ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
334
	}
335
	if (!ret) {
336
		WARN_ON(!list_empty(&lock->head));
337
		list_add(&lock->head, &ctx->locked);
338
	} else if (ret == -EALREADY) {
339
		/* we already hold the lock.. this is fine.  For atomic
340
		 * we will need to be able to drm_modeset_lock() things
341
		 * without having to keep track of what is already locked
342
		 * or not.
343
		 */
344
		ret = 0;
345
	} else if (ret == -EDEADLK) {
346
		ctx->contended = lock;
347
	}
348
 
349
	return ret;
350
}
351
 
352
static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
353
		bool interruptible)
354
{
355
	struct drm_modeset_lock *contended = ctx->contended;
356
 
357
	ctx->contended = NULL;
358
 
359
	if (WARN_ON(!contended))
360
		return 0;
361
 
362
	drm_modeset_drop_locks(ctx);
363
 
364
	return modeset_lock(contended, ctx, interruptible, true);
365
}
366
 
367
/**
368
 * drm_modeset_backoff - deadlock avoidance backoff
369
 * @ctx: the acquire context
370
 *
371
 * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
372
 * you must call this function to drop all currently held locks and
373
 * block until the contended lock becomes available.
374
 */
375
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
376
{
377
	modeset_backoff(ctx, false);
378
}
379
EXPORT_SYMBOL(drm_modeset_backoff);
380
 
381
/**
382
 * drm_modeset_backoff_interruptible - deadlock avoidance backoff
383
 * @ctx: the acquire context
384
 *
385
 * Interruptible version of drm_modeset_backoff()
386
 */
387
int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
388
{
389
	return modeset_backoff(ctx, true);
390
}
391
EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
392
 
393
/**
394
 * drm_modeset_lock - take modeset lock
395
 * @lock: lock to take
396
 * @ctx: acquire ctx
397
 *
398
 * If ctx is not NULL, then its ww acquire context is used and the
399
 * lock will be tracked by the context and can be released by calling
400
 * drm_modeset_drop_locks().  If -EDEADLK is returned, this means a
401
 * deadlock scenario has been detected and it is an error to attempt
402
 * to take any more locks without first calling drm_modeset_backoff().
403
 */
404
int drm_modeset_lock(struct drm_modeset_lock *lock,
405
		struct drm_modeset_acquire_ctx *ctx)
406
{
407
	if (ctx)
408
		return modeset_lock(lock, ctx, false, false);
409
 
410
	ww_mutex_lock(&lock->mutex, NULL);
411
	return 0;
412
}
413
EXPORT_SYMBOL(drm_modeset_lock);
414
 
415
/**
416
 * drm_modeset_lock_interruptible - take modeset lock
417
 * @lock: lock to take
418
 * @ctx: acquire ctx
419
 *
420
 * Interruptible version of drm_modeset_lock()
421
 */
422
int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
423
		struct drm_modeset_acquire_ctx *ctx)
424
{
425
	if (ctx)
426
		return modeset_lock(lock, ctx, true, false);
427
 
428
	return ww_mutex_lock_interruptible(&lock->mutex, NULL);
429
}
430
EXPORT_SYMBOL(drm_modeset_lock_interruptible);
431
 
432
/**
433
 * drm_modeset_unlock - drop modeset lock
434
 * @lock: lock to release
435
 */
436
void drm_modeset_unlock(struct drm_modeset_lock *lock)
437
{
438
	list_del_init(&lock->head);
439
	ww_mutex_unlock(&lock->mutex);
440
}
441
EXPORT_SYMBOL(drm_modeset_unlock);
442
 
6937 serge 443
/**
444
 * drm_modeset_lock_all_ctx - take all modeset locks
445
 * @dev: DRM device
446
 * @ctx: lock acquisition context
447
 *
448
 * This function takes all modeset locks, suitable where a more fine-grained
449
 * scheme isn't (yet) implemented.
450
 *
451
 * Unlike drm_modeset_lock_all(), it doesn't take the dev->mode_config.mutex
452
 * since that lock isn't required for modeset state changes. Callers which
453
 * need to grab that lock too need to do so outside of the acquire context
454
 * @ctx.
455
 *
456
 * Locks acquired with this function should be released by calling the
457
 * drm_modeset_drop_locks() function on @ctx.
458
 *
459
 * Returns: 0 on success or a negative error-code on failure.
460
 */
461
int drm_modeset_lock_all_ctx(struct drm_device *dev,
7144 serge 462
			     struct drm_modeset_acquire_ctx *ctx)
5060 serge 463
{
464
	struct drm_crtc *crtc;
5271 serge 465
	struct drm_plane *plane;
6937 serge 466
	int ret;
5060 serge 467
 
6937 serge 468
	ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
469
	if (ret)
470
		return ret;
471
 
6084 serge 472
	drm_for_each_crtc(crtc, dev) {
5060 serge 473
		ret = drm_modeset_lock(&crtc->mutex, ctx);
474
		if (ret)
475
			return ret;
476
	}
477
 
6084 serge 478
	drm_for_each_plane(plane, dev) {
5271 serge 479
		ret = drm_modeset_lock(&plane->mutex, ctx);
480
		if (ret)
481
			return ret;
482
	}
483
 
5060 serge 484
	return 0;
485
}
6937 serge 486
EXPORT_SYMBOL(drm_modeset_lock_all_ctx);