Subversion Repositories Kolibri OS

Rev

Rev 4569 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
4569 Serge 3
 * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
4075 Serge 4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
/*
28
 * Authors: Thomas Hellstrom 
4569 Serge 29
 *
30
 * While no substantial code is shared, the prime code is inspired by
31
 * drm_prime.c, with
32
 * Authors:
33
 *      Dave Airlie 
34
 *      Rob Clark 
4075 Serge 35
 */
36
/** @file ttm_ref_object.c
37
 *
38
 * Base- and reference object implementation for the various
39
 * ttm objects. Implements reference counting, minimal security checks
40
 * and release on file close.
41
 */
42
 
4569 Serge 43
 
4075 Serge 44
/**
45
 * struct ttm_object_file
46
 *
47
 * @tdev: Pointer to the ttm_object_device.
48
 *
49
 * @lock: Lock that protects the ref_list list and the
50
 * ref_hash hash tables.
51
 *
52
 * @ref_list: List of ttm_ref_objects to be destroyed at
53
 * file release.
54
 *
55
 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
56
 * for fast lookup of ref objects given a base object.
57
 */
58
 
59
#define pr_fmt(fmt) "[TTM] " fmt
60
 
61
#include 
62
#include 
63
#include 
64
#include 
65
#include 
66
#include 
6296 serge 67
#include 
4075 Serge 68
 
69
struct ttm_object_file {
70
	struct ttm_object_device *tdev;
4569 Serge 71
	spinlock_t lock;
4075 Serge 72
	struct list_head ref_list;
73
	struct drm_open_hash ref_hash[TTM_REF_NUM];
74
	struct kref refcount;
75
};
76
 
77
/**
78
 * struct ttm_object_device
79
 *
80
 * @object_lock: lock that protects the object_hash hash table.
81
 *
82
 * @object_hash: hash table for fast lookup of object global names.
83
 *
84
 * @object_count: Per device object count.
85
 *
86
 * This is the per-device data structure needed for ttm object management.
87
 */
88
 
89
struct ttm_object_device {
90
	spinlock_t object_lock;
91
	struct drm_open_hash object_hash;
92
	atomic_t object_count;
93
	struct ttm_mem_global *mem_glob;
6296 serge 94
	struct dma_buf_ops ops;
95
	void (*dmabuf_release)(struct dma_buf *dma_buf);
96
	size_t dma_buf_size;
4075 Serge 97
};
98
 
99
/**
100
 * struct ttm_ref_object
101
 *
102
 * @hash: Hash entry for the per-file object reference hash.
103
 *
104
 * @head: List entry for the per-file list of ref-objects.
105
 *
106
 * @kref: Ref count.
107
 *
108
 * @obj: Base object this ref object is referencing.
109
 *
110
 * @ref_type: Type of ref object.
111
 *
112
 * This is similar to an idr object, but it also has a hash table entry
113
 * that allows lookup with a pointer to the referenced object as a key. In
114
 * that way, one can easily detect whether a base object is referenced by
115
 * a particular ttm_object_file. It also carries a ref count to avoid creating
116
 * multiple ref objects if a ttm_object_file references the same base
117
 * object more than once.
118
 */
119
 
120
struct ttm_ref_object {
6296 serge 121
	struct rcu_head rcu_head;
4075 Serge 122
	struct drm_hash_item hash;
123
	struct list_head head;
124
	struct kref kref;
125
	enum ttm_ref_type ref_type;
126
	struct ttm_base_object *obj;
127
	struct ttm_object_file *tfile;
128
};
129
 
4569 Serge 130
static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
131
 
4075 Serge 132
static inline struct ttm_object_file *
133
ttm_object_file_ref(struct ttm_object_file *tfile)
134
{
135
	kref_get(&tfile->refcount);
136
	return tfile;
137
}
138
 
139
static void ttm_object_file_destroy(struct kref *kref)
140
{
141
	struct ttm_object_file *tfile =
142
		container_of(kref, struct ttm_object_file, refcount);
143
 
144
	kfree(tfile);
145
}
146
 
147
 
148
static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
149
{
150
	struct ttm_object_file *tfile = *p_tfile;
151
 
152
	*p_tfile = NULL;
153
	kref_put(&tfile->refcount, ttm_object_file_destroy);
154
}
155
 
156
 
157
int ttm_base_object_init(struct ttm_object_file *tfile,
158
			 struct ttm_base_object *base,
159
			 bool shareable,
160
			 enum ttm_object_type object_type,
161
			 void (*refcount_release) (struct ttm_base_object **),
162
			 void (*ref_obj_release) (struct ttm_base_object *,
163
						  enum ttm_ref_type ref_type))
164
{
165
	struct ttm_object_device *tdev = tfile->tdev;
166
	int ret;
167
 
168
	base->shareable = shareable;
169
	base->tfile = ttm_object_file_ref(tfile);
170
	base->refcount_release = refcount_release;
171
	base->ref_obj_release = ref_obj_release;
172
	base->object_type = object_type;
173
	kref_init(&base->refcount);
174
	spin_lock(&tdev->object_lock);
175
	ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
176
					    &base->hash,
177
					    (unsigned long)base, 31, 0, 0);
178
	spin_unlock(&tdev->object_lock);
179
	if (unlikely(ret != 0))
180
		goto out_err0;
181
 
182
	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
183
	if (unlikely(ret != 0))
184
		goto out_err1;
185
 
186
	ttm_base_object_unref(&base);
187
 
188
	return 0;
189
out_err1:
190
	spin_lock(&tdev->object_lock);
191
	(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
192
	spin_unlock(&tdev->object_lock);
193
out_err0:
194
	return ret;
195
}
196
EXPORT_SYMBOL(ttm_base_object_init);
197
 
198
static void ttm_release_base(struct kref *kref)
199
{
200
	struct ttm_base_object *base =
201
	    container_of(kref, struct ttm_base_object, refcount);
202
	struct ttm_object_device *tdev = base->tfile->tdev;
203
 
204
	spin_lock(&tdev->object_lock);
205
	(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
206
	spin_unlock(&tdev->object_lock);
207
 
208
	/*
209
	 * Note: We don't use synchronize_rcu() here because it's far
210
	 * too slow. It's up to the user to free the object using
211
	 * call_rcu() or ttm_base_object_kfree().
212
	 */
213
 
4569 Serge 214
	ttm_object_file_unref(&base->tfile);
215
	if (base->refcount_release)
4075 Serge 216
		base->refcount_release(&base);
217
}
218
 
219
void ttm_base_object_unref(struct ttm_base_object **p_base)
220
{
221
	struct ttm_base_object *base = *p_base;
222
 
223
	*p_base = NULL;
224
 
225
	kref_put(&base->refcount, ttm_release_base);
226
}
227
EXPORT_SYMBOL(ttm_base_object_unref);
228
 
229
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
230
					       uint32_t key)
231
{
4569 Serge 232
	struct ttm_base_object *base = NULL;
4075 Serge 233
	struct drm_hash_item *hash;
4569 Serge 234
	struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
4075 Serge 235
	int ret;
236
 
6296 serge 237
	rcu_read_lock();
4569 Serge 238
	ret = drm_ht_find_item_rcu(ht, key, &hash);
4075 Serge 239
 
240
	if (likely(ret == 0)) {
4569 Serge 241
		base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
242
		if (!kref_get_unless_zero(&base->refcount))
243
			base = NULL;
4075 Serge 244
	}
6296 serge 245
	rcu_read_unlock();
4075 Serge 246
 
4569 Serge 247
	return base;
248
}
249
EXPORT_SYMBOL(ttm_base_object_lookup);
4075 Serge 250
 
4569 Serge 251
struct ttm_base_object *
252
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
253
{
254
	struct ttm_base_object *base = NULL;
255
	struct drm_hash_item *hash;
256
	struct drm_open_hash *ht = &tdev->object_hash;
257
	int ret;
258
 
6296 serge 259
	rcu_read_lock();
4569 Serge 260
	ret = drm_ht_find_item_rcu(ht, key, &hash);
261
 
262
	if (likely(ret == 0)) {
263
		base = drm_hash_entry(hash, struct ttm_base_object, hash);
264
		if (!kref_get_unless_zero(&base->refcount))
265
			base = NULL;
4075 Serge 266
	}
6296 serge 267
	rcu_read_unlock();
4075 Serge 268
 
269
	return base;
270
}
4569 Serge 271
EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
4075 Serge 272
 
273
int ttm_ref_object_add(struct ttm_object_file *tfile,
274
		       struct ttm_base_object *base,
275
		       enum ttm_ref_type ref_type, bool *existed)
276
{
277
	struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
278
	struct ttm_ref_object *ref;
279
	struct drm_hash_item *hash;
280
	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
281
	int ret = -EINVAL;
282
 
283
	if (existed != NULL)
284
		*existed = true;
285
 
286
	while (ret == -EINVAL) {
6296 serge 287
		rcu_read_lock();
4569 Serge 288
		ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
4075 Serge 289
 
290
		if (ret == 0) {
291
			ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
6296 serge 292
			if (kref_get_unless_zero(&ref->kref)) {
293
				rcu_read_unlock();
294
				break;
295
			}
4075 Serge 296
		}
297
 
6296 serge 298
		rcu_read_unlock();
4075 Serge 299
		ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
300
					   false, false);
301
		if (unlikely(ret != 0))
302
			return ret;
303
		ref = kmalloc(sizeof(*ref), GFP_KERNEL);
304
		if (unlikely(ref == NULL)) {
305
			ttm_mem_global_free(mem_glob, sizeof(*ref));
306
			return -ENOMEM;
307
		}
308
 
309
		ref->hash.key = base->hash.key;
310
		ref->obj = base;
311
		ref->tfile = tfile;
312
		ref->ref_type = ref_type;
313
		kref_init(&ref->kref);
314
 
4569 Serge 315
		spin_lock(&tfile->lock);
316
		ret = drm_ht_insert_item_rcu(ht, &ref->hash);
4075 Serge 317
 
318
		if (likely(ret == 0)) {
319
			list_add_tail(&ref->head, &tfile->ref_list);
320
			kref_get(&base->refcount);
4569 Serge 321
			spin_unlock(&tfile->lock);
4075 Serge 322
			if (existed != NULL)
323
				*existed = false;
324
			break;
325
		}
326
 
4569 Serge 327
		spin_unlock(&tfile->lock);
4075 Serge 328
		BUG_ON(ret != -EINVAL);
329
 
330
		ttm_mem_global_free(mem_glob, sizeof(*ref));
331
		kfree(ref);
332
	}
333
 
334
	return ret;
335
}
336
EXPORT_SYMBOL(ttm_ref_object_add);
337
 
338
static void ttm_ref_object_release(struct kref *kref)
339
{
340
	struct ttm_ref_object *ref =
341
	    container_of(kref, struct ttm_ref_object, kref);
342
	struct ttm_base_object *base = ref->obj;
343
	struct ttm_object_file *tfile = ref->tfile;
344
	struct drm_open_hash *ht;
345
	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
346
 
347
	ht = &tfile->ref_hash[ref->ref_type];
4569 Serge 348
	(void)drm_ht_remove_item_rcu(ht, &ref->hash);
4075 Serge 349
	list_del(&ref->head);
4569 Serge 350
	spin_unlock(&tfile->lock);
4075 Serge 351
 
352
	if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
353
		base->ref_obj_release(base, ref->ref_type);
354
 
355
	ttm_base_object_unref(&ref->obj);
356
	ttm_mem_global_free(mem_glob, sizeof(*ref));
6296 serge 357
	kfree_rcu(ref, rcu_head);
4569 Serge 358
	spin_lock(&tfile->lock);
4075 Serge 359
}
360
 
361
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
362
			      unsigned long key, enum ttm_ref_type ref_type)
363
{
364
	struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
365
	struct ttm_ref_object *ref;
366
	struct drm_hash_item *hash;
367
	int ret;
368
 
4569 Serge 369
	spin_lock(&tfile->lock);
4075 Serge 370
	ret = drm_ht_find_item(ht, key, &hash);
371
	if (unlikely(ret != 0)) {
4569 Serge 372
		spin_unlock(&tfile->lock);
4075 Serge 373
		return -EINVAL;
374
	}
375
	ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
376
	kref_put(&ref->kref, ttm_ref_object_release);
4569 Serge 377
	spin_unlock(&tfile->lock);
4075 Serge 378
	return 0;
379
}
380
EXPORT_SYMBOL(ttm_ref_object_base_unref);
381
 
382
void ttm_object_file_release(struct ttm_object_file **p_tfile)
383
{
384
	struct ttm_ref_object *ref;
385
	struct list_head *list;
386
	unsigned int i;
387
	struct ttm_object_file *tfile = *p_tfile;
388
 
389
	*p_tfile = NULL;
4569 Serge 390
	spin_lock(&tfile->lock);
4075 Serge 391
 
392
	/*
393
	 * Since we release the lock within the loop, we have to
394
	 * restart it from the beginning each time.
395
	 */
396
 
397
	while (!list_empty(&tfile->ref_list)) {
398
		list = tfile->ref_list.next;
399
		ref = list_entry(list, struct ttm_ref_object, head);
400
		ttm_ref_object_release(&ref->kref);
401
	}
402
 
403
	for (i = 0; i < TTM_REF_NUM; ++i)
404
		drm_ht_remove(&tfile->ref_hash[i]);
405
 
4569 Serge 406
	spin_unlock(&tfile->lock);
4075 Serge 407
	ttm_object_file_unref(&tfile);
408
}
409
EXPORT_SYMBOL(ttm_object_file_release);
410
 
411
struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
412
					     unsigned int hash_order)
413
{
414
	struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
415
	unsigned int i;
416
	unsigned int j = 0;
417
	int ret;
418
 
419
	if (unlikely(tfile == NULL))
420
		return NULL;
421
 
4569 Serge 422
	spin_lock_init(&tfile->lock);
4075 Serge 423
	tfile->tdev = tdev;
424
	kref_init(&tfile->refcount);
425
	INIT_LIST_HEAD(&tfile->ref_list);
426
 
427
	for (i = 0; i < TTM_REF_NUM; ++i) {
428
		ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
429
		if (ret) {
430
			j = i;
431
			goto out_err;
432
		}
433
	}
434
 
435
	return tfile;
436
out_err:
437
	for (i = 0; i < j; ++i)
438
		drm_ht_remove(&tfile->ref_hash[i]);
439
 
440
	kfree(tfile);
441
 
442
	return NULL;
443
}
444
EXPORT_SYMBOL(ttm_object_file_init);
445
 
4569 Serge 446
struct ttm_object_device *
447
ttm_object_device_init(struct ttm_mem_global *mem_glob,
448
		       unsigned int hash_order,
449
		       const struct dma_buf_ops *ops)
4075 Serge 450
{
451
	struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
452
	int ret;
453
 
454
	if (unlikely(tdev == NULL))
455
		return NULL;
456
 
457
	tdev->mem_glob = mem_glob;
458
	spin_lock_init(&tdev->object_lock);
459
	atomic_set(&tdev->object_count, 0);
460
	ret = drm_ht_create(&tdev->object_hash, hash_order);
4569 Serge 461
	if (ret != 0)
462
		goto out_no_object_hash;
4075 Serge 463
 
6296 serge 464
	tdev->ops = *ops;
465
	tdev->dmabuf_release = tdev->ops.release;
466
	tdev->ops.release = ttm_prime_dmabuf_release;
467
	tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
468
		ttm_round_pot(sizeof(struct file));
469
	return tdev;
4075 Serge 470
 
4569 Serge 471
out_no_object_hash:
4075 Serge 472
	kfree(tdev);
473
	return NULL;
474
}
475
EXPORT_SYMBOL(ttm_object_device_init);
476
 
477
void ttm_object_device_release(struct ttm_object_device **p_tdev)
478
{
479
	struct ttm_object_device *tdev = *p_tdev;
480
 
481
	*p_tdev = NULL;
482
 
483
	spin_lock(&tdev->object_lock);
484
	drm_ht_remove(&tdev->object_hash);
485
	spin_unlock(&tdev->object_lock);
486
 
487
	kfree(tdev);
488
}
489
EXPORT_SYMBOL(ttm_object_device_release);
6296 serge 490
 
491
/**
492
 * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
493
 *
494
 * @dma_buf: Non-refcounted pointer to a struct dma-buf.
495
 *
496
 * Obtain a file reference from a lookup structure that doesn't refcount
497
 * the file, but synchronizes with its release method to make sure it has
498
 * not been freed yet. See for example kref_get_unless_zero documentation.
499
 * Returns true if refcounting succeeds, false otherwise.
500
 *
501
 * Nobody really wants this as a public API yet, so let it mature here
502
 * for some time...
503
 */
504
static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
505
{
506
	return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
507
}
508
 
509
/**
510
 * ttm_prime_refcount_release - refcount release method for a prime object.
511
 *
512
 * @p_base: Pointer to ttm_base_object pointer.
513
 *
514
 * This is a wrapper that calls the refcount_release founction of the
515
 * underlying object. At the same time it cleans up the prime object.
516
 * This function is called when all references to the base object we
517
 * derive from are gone.
518
 */
519
static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
520
{
521
	struct ttm_base_object *base = *p_base;
522
	struct ttm_prime_object *prime;
523
 
524
	*p_base = NULL;
525
	prime = container_of(base, struct ttm_prime_object, base);
526
	BUG_ON(prime->dma_buf != NULL);
527
	mutex_destroy(&prime->mutex);
528
	if (prime->refcount_release)
529
		prime->refcount_release(&base);
530
}
531
 
532
/**
533
 * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
534
 *
535
 * @dma_buf:
536
 *
537
 * This function first calls the dma_buf release method the driver
538
 * provides. Then it cleans up our dma_buf pointer used for lookup,
539
 * and finally releases the reference the dma_buf has on our base
540
 * object.
541
 */
542
static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
543
{
544
	struct ttm_prime_object *prime =
545
		(struct ttm_prime_object *) dma_buf->priv;
546
	struct ttm_base_object *base = &prime->base;
547
	struct ttm_object_device *tdev = base->tfile->tdev;
548
 
549
	if (tdev->dmabuf_release)
550
		tdev->dmabuf_release(dma_buf);
551
	mutex_lock(&prime->mutex);
552
	if (prime->dma_buf == dma_buf)
553
		prime->dma_buf = NULL;
554
	mutex_unlock(&prime->mutex);
555
	ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
556
	ttm_base_object_unref(&base);
557
}
558
 
559
/**
560
 * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
561
 *
562
 * @tfile: A struct ttm_object_file identifying the caller.
563
 * @fd: The prime / dmabuf fd.
564
 * @handle: The returned handle.
565
 *
566
 * This function returns a handle to an object that previously exported
567
 * a dma-buf. Note that we don't handle imports yet, because we simply
568
 * have no consumers of that implementation.
569
 */
570
int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
571
			   int fd, u32 *handle)
572
{
573
	struct ttm_object_device *tdev = tfile->tdev;
574
	struct dma_buf *dma_buf;
575
	struct ttm_prime_object *prime;
576
	struct ttm_base_object *base;
577
	int ret;
578
 
579
	dma_buf = dma_buf_get(fd);
580
	if (IS_ERR(dma_buf))
581
		return PTR_ERR(dma_buf);
582
 
583
	if (dma_buf->ops != &tdev->ops)
584
		return -ENOSYS;
585
 
586
	prime = (struct ttm_prime_object *) dma_buf->priv;
587
	base = &prime->base;
588
	*handle = base->hash.key;
589
	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
590
 
591
	dma_buf_put(dma_buf);
592
 
593
	return ret;
594
}
595
EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
596
 
597
/**
598
 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
599
 *
600
 * @tfile: Struct ttm_object_file identifying the caller.
601
 * @handle: Handle to the object we're exporting from.
602
 * @flags: flags for dma-buf creation. We just pass them on.
603
 * @prime_fd: The returned file descriptor.
604
 *
605
 */
606
int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
607
			   uint32_t handle, uint32_t flags,
608
			   int *prime_fd)
609
{
610
	struct ttm_object_device *tdev = tfile->tdev;
611
	struct ttm_base_object *base;
612
	struct dma_buf *dma_buf;
613
	struct ttm_prime_object *prime;
614
	int ret;
615
 
616
	base = ttm_base_object_lookup(tfile, handle);
617
	if (unlikely(base == NULL ||
618
		     base->object_type != ttm_prime_type)) {
619
		ret = -ENOENT;
620
		goto out_unref;
621
	}
622
 
623
	prime = container_of(base, struct ttm_prime_object, base);
624
	if (unlikely(!base->shareable)) {
625
		ret = -EPERM;
626
		goto out_unref;
627
	}
628
 
629
	ret = mutex_lock_interruptible(&prime->mutex);
630
	if (unlikely(ret != 0)) {
631
		ret = -ERESTARTSYS;
632
		goto out_unref;
633
	}
634
 
635
	dma_buf = prime->dma_buf;
636
	if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
637
		DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
638
 
639
		exp_info.ops = &tdev->ops;
640
		exp_info.size = prime->size;
641
		exp_info.flags = flags;
642
		exp_info.priv = prime;
643
 
644
		/*
645
		 * Need to create a new dma_buf, with memory accounting.
646
		 */
647
		ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
648
					   false, true);
649
		if (unlikely(ret != 0)) {
650
			mutex_unlock(&prime->mutex);
651
			goto out_unref;
652
		}
653
 
654
		dma_buf = dma_buf_export(&exp_info);
655
		if (IS_ERR(dma_buf)) {
656
			ret = PTR_ERR(dma_buf);
657
			ttm_mem_global_free(tdev->mem_glob,
658
					    tdev->dma_buf_size);
659
			mutex_unlock(&prime->mutex);
660
			goto out_unref;
661
		}
662
 
663
		/*
664
		 * dma_buf has taken the base object reference
665
		 */
666
		base = NULL;
667
		prime->dma_buf = dma_buf;
668
	}
669
	mutex_unlock(&prime->mutex);
670
 
671
	ret = dma_buf_fd(dma_buf, flags);
672
	if (ret >= 0) {
673
		*prime_fd = ret;
674
		ret = 0;
675
	} else
676
		dma_buf_put(dma_buf);
677
 
678
out_unref:
679
	if (base)
680
		ttm_base_object_unref(&base);
681
	return ret;
682
}
683
EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
684
 
685
/**
686
 * ttm_prime_object_init - Initialize a ttm_prime_object
687
 *
688
 * @tfile: struct ttm_object_file identifying the caller
689
 * @size: The size of the dma_bufs we export.
690
 * @prime: The object to be initialized.
691
 * @shareable: See ttm_base_object_init
692
 * @type: See ttm_base_object_init
693
 * @refcount_release: See ttm_base_object_init
694
 * @ref_obj_release: See ttm_base_object_init
695
 *
696
 * Initializes an object which is compatible with the drm_prime model
697
 * for data sharing between processes and devices.
698
 */
699
int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
700
			  struct ttm_prime_object *prime, bool shareable,
701
			  enum ttm_object_type type,
702
			  void (*refcount_release) (struct ttm_base_object **),
703
			  void (*ref_obj_release) (struct ttm_base_object *,
704
						   enum ttm_ref_type ref_type))
705
{
706
	mutex_init(&prime->mutex);
707
	prime->size = PAGE_ALIGN(size);
708
	prime->real_type = type;
709
	prime->dma_buf = NULL;
710
	prime->refcount_release = refcount_release;
711
	return ttm_base_object_init(tfile, &prime->base, shareable,
712
				    ttm_prime_type,
713
				    ttm_prime_refcount_release,
714
				    ref_obj_release);
715
}
716
EXPORT_SYMBOL(ttm_prime_object_init);