Subversion Repositories Kolibri OS

Rev

Rev 3290 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
3260 Serge 1
/*
2
 * Copyright © 2008 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Eric Anholt 
25
 *
26
 */
27
 
28
#include 
29
#include 
30
#include 
31
#include 
32
#include 
33
#include 
34
#include 
35
 
36
/** @file drm_gem.c
37
 *
38
 * This file provides some of the base ioctls and library routines for
39
 * the graphics memory manager implemented by each device driver.
40
 *
41
 * Because various devices have different requirements in terms of
42
 * synchronization and migration strategies, implementing that is left up to
43
 * the driver, and all that the general API provides should be generic --
44
 * allocating objects, reading/writing data with the cpu, freeing objects.
45
 * Even there, platform-dependent optimizations for reading/writing data with
46
 * the CPU mean we'll likely hook those out to driver-specific calls.  However,
47
 * the DRI2 implementation wants to have at least allocate/mmap be generic.
48
 *
49
 * The goal was to have swap-backed object allocation managed through
50
 * struct file.  However, file descriptors as handles to a struct file have
51
 * two major failings:
52
 * - Process limits prevent more than 1024 or so being used at a time by
53
 *   default.
54
 * - Inability to allocate high fds will aggravate the X Server's select()
55
 *   handling, and likely that of many GL client applications as well.
56
 *
57
 * This led to a plan of using our own integer IDs (called handles, following
58
 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
59
 * ioctls.  The objects themselves will still include the struct file so
60
 * that we can transition to fds if the required kernel infrastructure shows
61
 * up at a later date, and as our interface with shmfs for memory allocation.
62
 */
63
 
64
/*
65
 * We make up offsets for buffer objects so we can recognize them at
66
 * mmap time.
67
 */
68
 
69
/* pgoff in mmap is an unsigned long, so we need to make sure that
70
 * the faked up offset will fit
71
 */
72
 
73
#if BITS_PER_LONG == 64
74
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
75
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
76
#else
77
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
78
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
79
#endif
80
 
81
#if 0
82
/**
83
 * Initialize the GEM device fields
84
 */
85
 
86
int
87
drm_gem_init(struct drm_device *dev)
88
{
89
	struct drm_gem_mm *mm;
90
 
91
	spin_lock_init(&dev->object_name_lock);
92
	idr_init(&dev->object_name_idr);
93
 
94
	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
95
	if (!mm) {
96
		DRM_ERROR("out of memory\n");
97
		return -ENOMEM;
98
	}
99
 
100
	dev->mm_private = mm;
101
 
102
	if (drm_ht_create(&mm->offset_hash, 12)) {
103
		kfree(mm);
104
		return -ENOMEM;
105
	}
106
 
107
	if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
108
			DRM_FILE_PAGE_OFFSET_SIZE)) {
109
		drm_ht_remove(&mm->offset_hash);
110
		kfree(mm);
111
		return -ENOMEM;
112
	}
113
 
114
	return 0;
115
}
116
 
117
void
118
drm_gem_destroy(struct drm_device *dev)
119
{
120
	struct drm_gem_mm *mm = dev->mm_private;
121
 
122
	drm_mm_takedown(&mm->offset_manager);
123
	drm_ht_remove(&mm->offset_hash);
124
	kfree(mm);
125
	dev->mm_private = NULL;
126
}
127
#endif
128
 
129
/**
130
 * Initialize an already allocated GEM object of the specified size with
131
 * shmfs backing store.
132
 */
133
int drm_gem_object_init(struct drm_device *dev,
134
			struct drm_gem_object *obj, size_t size)
135
{
136
	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
137
 
138
	obj->dev = dev;
139
	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
140
	if (IS_ERR(obj->filp))
141
		return PTR_ERR(obj->filp);
142
 
143
	kref_init(&obj->refcount);
144
	atomic_set(&obj->handle_count, 0);
145
	obj->size = size;
146
 
147
	return 0;
148
}
149
EXPORT_SYMBOL(drm_gem_object_init);
150
 
151
/**
152
 * Initialize an already allocated GEM object of the specified size with
153
 * no GEM provided backing store. Instead the caller is responsible for
154
 * backing the object and handling it.
155
 */
156
int drm_gem_private_object_init(struct drm_device *dev,
157
			struct drm_gem_object *obj, size_t size)
158
{
159
	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
160
 
161
	obj->dev = dev;
162
	obj->filp = NULL;
163
 
164
	kref_init(&obj->refcount);
165
	atomic_set(&obj->handle_count, 0);
166
	obj->size = size;
167
 
168
	return 0;
169
}
170
EXPORT_SYMBOL(drm_gem_private_object_init);
171
 
172
/**
173
 * Allocate a GEM object of the specified size with shmfs backing store
174
 */
175
struct drm_gem_object *
176
drm_gem_object_alloc(struct drm_device *dev, size_t size)
177
{
178
	struct drm_gem_object *obj;
179
 
180
	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
181
	if (!obj)
182
		goto free;
183
 
184
	if (drm_gem_object_init(dev, obj, size) != 0)
185
		goto free;
186
 
187
	if (dev->driver->gem_init_object != NULL &&
188
	    dev->driver->gem_init_object(obj) != 0) {
189
		goto fput;
190
	}
191
	return obj;
192
fput:
193
	/* Object_init mangles the global counters - readjust them. */
194
	free(obj->filp);
195
free:
196
	kfree(obj);
197
	return NULL;
198
}
199
EXPORT_SYMBOL(drm_gem_object_alloc);
200
 
201
 
202
/**
203
 * Removes the mapping from handle to filp for this object.
204
 */
205
int
206
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
207
{
208
	struct drm_device *dev;
209
	struct drm_gem_object *obj;
210
 
211
	/* This is gross. The idr system doesn't let us try a delete and
212
	 * return an error code.  It just spews if you fail at deleting.
213
	 * So, we have to grab a lock around finding the object and then
214
	 * doing the delete on it and dropping the refcount, or the user
215
	 * could race us to double-decrement the refcount and cause a
216
	 * use-after-free later.  Given the frequency of our handle lookups,
217
	 * we may want to use ida for number allocation and a hash table
218
	 * for the pointers, anyway.
219
	 */
220
	spin_lock(&filp->table_lock);
221
 
222
	/* Check if we currently have a reference on the object */
223
	obj = idr_find(&filp->object_idr, handle);
224
	if (obj == NULL) {
225
		spin_unlock(&filp->table_lock);
226
		return -EINVAL;
227
	}
228
	dev = obj->dev;
229
 
230
	/* Release reference and decrement refcount. */
231
	idr_remove(&filp->object_idr, handle);
232
	spin_unlock(&filp->table_lock);
233
 
234
//   drm_gem_remove_prime_handles(obj, filp);
235
 
236
	if (dev->driver->gem_close_object)
237
		dev->driver->gem_close_object(obj, filp);
238
	drm_gem_object_handle_unreference_unlocked(obj);
239
 
240
	return 0;
241
}
242
EXPORT_SYMBOL(drm_gem_handle_delete);
243
 
244
/**
245
 * Create a handle for this object. This adds a handle reference
246
 * to the object, which includes a regular reference count. Callers
247
 * will likely want to dereference the object afterwards.
248
 */
249
int
250
drm_gem_handle_create(struct drm_file *file_priv,
251
		       struct drm_gem_object *obj,
252
		       u32 *handlep)
253
{
254
	struct drm_device *dev = obj->dev;
255
	int ret;
256
 
257
	/*
258
	 * Get the user-visible handle using idr.
259
	 */
260
again:
261
	/* ensure there is space available to allocate a handle */
262
	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
263
		return -ENOMEM;
264
 
265
	/* do the allocation under our spinlock */
266
	spin_lock(&file_priv->table_lock);
267
	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
268
	spin_unlock(&file_priv->table_lock);
269
	if (ret == -EAGAIN)
270
		goto again;
271
	else if (ret)
272
		return ret;
273
 
274
	drm_gem_object_handle_reference(obj);
275
 
276
	if (dev->driver->gem_open_object) {
277
		ret = dev->driver->gem_open_object(obj, file_priv);
278
		if (ret) {
279
			drm_gem_handle_delete(file_priv, *handlep);
280
			return ret;
281
		}
282
	}
283
 
284
	return 0;
285
}
286
EXPORT_SYMBOL(drm_gem_handle_create);
287
 
288
 
289
/**
290
 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
291
 * @obj: obj in question
292
 *
293
 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
294
 */
295
#if 0
296
void
297
drm_gem_free_mmap_offset(struct drm_gem_object *obj)
298
{
299
	struct drm_device *dev = obj->dev;
300
	struct drm_gem_mm *mm = dev->mm_private;
301
	struct drm_map_list *list = &obj->map_list;
302
 
303
	drm_ht_remove_item(&mm->offset_hash, &list->hash);
304
	drm_mm_put_block(list->file_offset_node);
305
	kfree(list->map);
306
	list->map = NULL;
307
}
308
EXPORT_SYMBOL(drm_gem_free_mmap_offset);
309
 
310
/**
311
 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
312
 * @obj: obj in question
313
 *
314
 * GEM memory mapping works by handing back to userspace a fake mmap offset
315
 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
316
 * up the object based on the offset and sets up the various memory mapping
317
 * structures.
318
 *
319
 * This routine allocates and attaches a fake offset for @obj.
320
 */
321
int
322
drm_gem_create_mmap_offset(struct drm_gem_object *obj)
323
{
324
	struct drm_device *dev = obj->dev;
325
	struct drm_gem_mm *mm = dev->mm_private;
326
	struct drm_map_list *list;
327
	struct drm_local_map *map;
328
	int ret;
329
 
330
	/* Set the object up for mmap'ing */
331
	list = &obj->map_list;
332
	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
333
	if (!list->map)
334
		return -ENOMEM;
335
 
336
	map = list->map;
337
	map->type = _DRM_GEM;
338
	map->size = obj->size;
339
	map->handle = obj;
340
 
341
	/* Get a DRM GEM mmap offset allocated... */
342
	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
343
			obj->size / PAGE_SIZE, 0, false);
344
 
345
	if (!list->file_offset_node) {
346
		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
347
		ret = -ENOSPC;
348
		goto out_free_list;
349
	}
350
 
351
	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
352
			obj->size / PAGE_SIZE, 0);
353
	if (!list->file_offset_node) {
354
		ret = -ENOMEM;
355
		goto out_free_list;
356
	}
357
 
358
	list->hash.key = list->file_offset_node->start;
359
	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
360
	if (ret) {
361
		DRM_ERROR("failed to add to map hash\n");
362
		goto out_free_mm;
363
	}
364
 
365
	return 0;
366
 
367
out_free_mm:
368
	drm_mm_put_block(list->file_offset_node);
369
out_free_list:
370
	kfree(list->map);
371
	list->map = NULL;
372
 
373
	return ret;
374
}
375
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
376
#endif
377
 
378
/** Returns a reference to the object named by the handle. */
379
struct drm_gem_object *
380
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
381
		      u32 handle)
382
{
383
	struct drm_gem_object *obj;
384
 
385
	spin_lock(&filp->table_lock);
386
 
387
	/* Check if we currently have a reference on the object */
388
	obj = idr_find(&filp->object_idr, handle);
389
	if (obj == NULL) {
390
		spin_unlock(&filp->table_lock);
391
		return NULL;
392
	}
393
 
394
	drm_gem_object_reference(obj);
395
 
396
	spin_unlock(&filp->table_lock);
397
 
398
	return obj;
399
}
400
EXPORT_SYMBOL(drm_gem_object_lookup);
401
 
402
/**
403
 * Releases the handle to an mm object.
404
 */
405
int
406
drm_gem_close_ioctl(struct drm_device *dev, void *data,
407
		    struct drm_file *file_priv)
408
{
409
	struct drm_gem_close *args = data;
410
	int ret;
411
 
412
	ret = drm_gem_handle_delete(file_priv, args->handle);
413
 
414
	return ret;
415
}
416
 
417
/**
418
 * Create a global name for an object, returning the name.
419
 *
420
 * Note that the name does not hold a reference; when the object
421
 * is freed, the name goes away.
422
 */
423
 
424
#if 0
425
int
426
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
427
		    struct drm_file *file_priv)
428
{
429
	struct drm_gem_flink *args = data;
430
	struct drm_gem_object *obj;
431
	int ret;
432
 
433
	if (!(dev->driver->driver_features & DRIVER_GEM))
434
		return -ENODEV;
435
 
436
	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
437
	if (obj == NULL)
438
		return -ENOENT;
439
 
440
again:
441
	if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
442
		ret = -ENOMEM;
443
		goto err;
444
	}
445
 
446
	spin_lock(&dev->object_name_lock);
447
	if (!obj->name) {
448
		ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
449
					&obj->name);
450
		args->name = (uint64_t) obj->name;
451
		spin_unlock(&dev->object_name_lock);
452
 
453
		if (ret == -EAGAIN)
454
			goto again;
455
		else if (ret)
456
			goto err;
457
 
458
		/* Allocate a reference for the name table.  */
459
		drm_gem_object_reference(obj);
460
	} else {
461
		args->name = (uint64_t) obj->name;
462
		spin_unlock(&dev->object_name_lock);
463
		ret = 0;
464
	}
465
 
466
err:
467
	drm_gem_object_unreference_unlocked(obj);
468
	return ret;
469
}
470
 
471
/**
472
 * Open an object using the global name, returning a handle and the size.
473
 *
474
 * This handle (of course) holds a reference to the object, so the object
475
 * will not go away until the handle is deleted.
476
 */
477
int
478
drm_gem_open_ioctl(struct drm_device *dev, void *data,
479
		   struct drm_file *file_priv)
480
{
481
	struct drm_gem_open *args = data;
482
	struct drm_gem_object *obj;
483
	int ret;
484
	u32 handle;
485
 
486
	if (!(dev->driver->driver_features & DRIVER_GEM))
487
		return -ENODEV;
488
 
489
	spin_lock(&dev->object_name_lock);
490
	obj = idr_find(&dev->object_name_idr, (int) args->name);
491
	if (obj)
492
		drm_gem_object_reference(obj);
493
	spin_unlock(&dev->object_name_lock);
494
	if (!obj)
495
		return -ENOENT;
496
 
497
	ret = drm_gem_handle_create(file_priv, obj, &handle);
498
	drm_gem_object_unreference_unlocked(obj);
499
	if (ret)
500
		return ret;
501
 
502
	args->handle = handle;
503
	args->size = obj->size;
504
 
505
	return 0;
506
}
507
 
508
/**
509
 * Called at device open time, sets up the structure for handling refcounting
510
 * of mm objects.
511
 */
512
void
513
drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
514
{
515
	idr_init(&file_private->object_idr);
516
	spin_lock_init(&file_private->table_lock);
517
}
518
 
519
/**
520
 * Called at device close to release the file's
521
 * handle references on objects.
522
 */
523
static int
524
drm_gem_object_release_handle(int id, void *ptr, void *data)
525
{
526
	struct drm_file *file_priv = data;
527
	struct drm_gem_object *obj = ptr;
528
	struct drm_device *dev = obj->dev;
529
 
530
	drm_gem_remove_prime_handles(obj, file_priv);
531
 
532
	if (dev->driver->gem_close_object)
533
		dev->driver->gem_close_object(obj, file_priv);
534
 
535
	drm_gem_object_handle_unreference_unlocked(obj);
536
 
537
	return 0;
538
}
539
 
540
/**
541
 * Called at close time when the filp is going away.
542
 *
543
 * Releases any remaining references on objects by this filp.
544
 */
545
void
546
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
547
{
548
	idr_for_each(&file_private->object_idr,
549
		     &drm_gem_object_release_handle, file_private);
550
 
551
	idr_remove_all(&file_private->object_idr);
552
	idr_destroy(&file_private->object_idr);
553
}
554
#endif
555
 
556
void
557
drm_gem_object_release(struct drm_gem_object *obj)
558
{
559
	if (obj->filp)
560
	    free(obj->filp);
561
}
562
EXPORT_SYMBOL(drm_gem_object_release);
563
 
564
/**
565
 * Called after the last reference to the object has been lost.
566
 * Must be called holding struct_ mutex
567
 *
568
 * Frees the object
569
 */
570
void
571
drm_gem_object_free(struct kref *kref)
572
{
573
	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
574
	struct drm_device *dev = obj->dev;
575
 
576
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
577
 
578
	if (dev->driver->gem_free_object != NULL)
579
		dev->driver->gem_free_object(obj);
580
}
581
EXPORT_SYMBOL(drm_gem_object_free);
582
 
583
static void drm_gem_object_ref_bug(struct kref *list_kref)
584
{
585
	BUG();
586
}
587
 
588
/**
589
 * Called after the last handle to the object has been closed
590
 *
591
 * Removes any name for the object. Note that this must be
592
 * called before drm_gem_object_free or we'll be touching
593
 * freed memory
594
 */
595
void drm_gem_object_handle_free(struct drm_gem_object *obj)
596
{
597
	struct drm_device *dev = obj->dev;
598
 
599
	/* Remove any name for this object */
600
	spin_lock(&dev->object_name_lock);
601
	if (obj->name) {
602
		idr_remove(&dev->object_name_idr, obj->name);
603
		obj->name = 0;
604
		spin_unlock(&dev->object_name_lock);
605
		/*
606
		 * The object name held a reference to this object, drop
607
		 * that now.
608
		*
609
		* This cannot be the last reference, since the handle holds one too.
610
		 */
611
		kref_put(&obj->refcount, drm_gem_object_ref_bug);
612
	} else
613
		spin_unlock(&dev->object_name_lock);
614
 
615
}
616
EXPORT_SYMBOL(drm_gem_object_handle_free);
617
 
618
#if 0
619
void drm_gem_vm_open(struct vm_area_struct *vma)
620
{
621
	struct drm_gem_object *obj = vma->vm_private_data;
622
 
623
	drm_gem_object_reference(obj);
624
 
625
	mutex_lock(&obj->dev->struct_mutex);
626
	drm_vm_open_locked(obj->dev, vma);
627
	mutex_unlock(&obj->dev->struct_mutex);
628
}
629
EXPORT_SYMBOL(drm_gem_vm_open);
630
 
631
void drm_gem_vm_close(struct vm_area_struct *vma)
632
{
633
	struct drm_gem_object *obj = vma->vm_private_data;
634
	struct drm_device *dev = obj->dev;
635
 
636
	mutex_lock(&dev->struct_mutex);
637
	drm_vm_close_locked(obj->dev, vma);
638
	drm_gem_object_unreference(obj);
639
	mutex_unlock(&dev->struct_mutex);
640
}
641
EXPORT_SYMBOL(drm_gem_vm_close);
642
 
643
#endif
644