Subversion Repositories Kolibri OS

Rev

Rev 3480 | Rev 4104 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3260 Serge 1
/*
2
 * Copyright © 2008 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Eric Anholt 
25
 *
26
 */
27
 
28
#include 
29
#include 
30
#include 
31
#include 
32
#include 
33
#include 
34
#include 
35
 
36
/** @file drm_gem.c
37
 *
38
 * This file provides some of the base ioctls and library routines for
39
 * the graphics memory manager implemented by each device driver.
40
 *
41
 * Because various devices have different requirements in terms of
42
 * synchronization and migration strategies, implementing that is left up to
43
 * the driver, and all that the general API provides should be generic --
44
 * allocating objects, reading/writing data with the cpu, freeing objects.
45
 * Even there, platform-dependent optimizations for reading/writing data with
46
 * the CPU mean we'll likely hook those out to driver-specific calls.  However,
47
 * the DRI2 implementation wants to have at least allocate/mmap be generic.
48
 *
49
 * The goal was to have swap-backed object allocation managed through
50
 * struct file.  However, file descriptors as handles to a struct file have
51
 * two major failings:
52
 * - Process limits prevent more than 1024 or so being used at a time by
53
 *   default.
54
 * - Inability to allocate high fds will aggravate the X Server's select()
55
 *   handling, and likely that of many GL client applications as well.
56
 *
57
 * This led to a plan of using our own integer IDs (called handles, following
58
 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
59
 * ioctls.  The objects themselves will still include the struct file so
60
 * that we can transition to fds if the required kernel infrastructure shows
61
 * up at a later date, and as our interface with shmfs for memory allocation.
62
 */
63
 
64
/*
65
 * We make up offsets for buffer objects so we can recognize them at
66
 * mmap time.
67
 */
68
 
69
/* pgoff in mmap is an unsigned long, so we need to make sure that
70
 * the faked up offset will fit
71
 */
72
 
73
#if BITS_PER_LONG == 64
74
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
75
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
76
#else
77
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
78
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
79
#endif
80
 
81
#if 0
82
/**
83
 * Initialize the GEM device fields
84
 */
85
 
86
int
87
drm_gem_init(struct drm_device *dev)
88
{
89
	struct drm_gem_mm *mm;
90
 
91
	spin_lock_init(&dev->object_name_lock);
92
	idr_init(&dev->object_name_idr);
93
 
94
	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
95
	if (!mm) {
96
		DRM_ERROR("out of memory\n");
97
		return -ENOMEM;
98
	}
99
 
100
	dev->mm_private = mm;
101
 
102
	if (drm_ht_create(&mm->offset_hash, 12)) {
103
		kfree(mm);
104
		return -ENOMEM;
105
	}
106
 
4075 Serge 107
	drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
108
		    DRM_FILE_PAGE_OFFSET_SIZE);
3260 Serge 109
 
110
	return 0;
111
}
112
 
113
void
114
drm_gem_destroy(struct drm_device *dev)
115
{
116
	struct drm_gem_mm *mm = dev->mm_private;
117
 
118
	drm_mm_takedown(&mm->offset_manager);
119
	drm_ht_remove(&mm->offset_hash);
120
	kfree(mm);
121
	dev->mm_private = NULL;
122
}
123
#endif
124
 
125
/**
126
 * Initialize an already allocated GEM object of the specified size with
127
 * shmfs backing store.
128
 */
129
int drm_gem_object_init(struct drm_device *dev,
130
			struct drm_gem_object *obj, size_t size)
131
{
132
	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
133
 
134
	obj->dev = dev;
135
	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
136
	if (IS_ERR(obj->filp))
137
		return PTR_ERR(obj->filp);
138
 
139
	kref_init(&obj->refcount);
140
	atomic_set(&obj->handle_count, 0);
141
	obj->size = size;
142
 
143
	return 0;
144
}
145
EXPORT_SYMBOL(drm_gem_object_init);
146
 
147
/**
148
 * Initialize an already allocated GEM object of the specified size with
149
 * no GEM provided backing store. Instead the caller is responsible for
150
 * backing the object and handling it.
151
 */
152
int drm_gem_private_object_init(struct drm_device *dev,
153
			struct drm_gem_object *obj, size_t size)
154
{
155
	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
156
 
157
	obj->dev = dev;
158
	obj->filp = NULL;
159
 
160
	kref_init(&obj->refcount);
161
	atomic_set(&obj->handle_count, 0);
162
	obj->size = size;
163
 
164
	return 0;
165
}
166
EXPORT_SYMBOL(drm_gem_private_object_init);
167
 
168
/**
169
 * Allocate a GEM object of the specified size with shmfs backing store
170
 */
171
struct drm_gem_object *
172
drm_gem_object_alloc(struct drm_device *dev, size_t size)
173
{
174
	struct drm_gem_object *obj;
175
 
176
	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
177
	if (!obj)
178
		goto free;
179
 
180
	if (drm_gem_object_init(dev, obj, size) != 0)
181
		goto free;
182
 
183
	if (dev->driver->gem_init_object != NULL &&
184
	    dev->driver->gem_init_object(obj) != 0) {
185
		goto fput;
186
	}
187
	return obj;
188
fput:
189
	/* Object_init mangles the global counters - readjust them. */
190
	free(obj->filp);
191
free:
192
	kfree(obj);
193
	return NULL;
194
}
195
EXPORT_SYMBOL(drm_gem_object_alloc);
196
 
197
 
198
/**
199
 * Removes the mapping from handle to filp for this object.
200
 */
201
int
202
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
203
{
204
	struct drm_device *dev;
205
	struct drm_gem_object *obj;
206
 
207
	/* This is gross. The idr system doesn't let us try a delete and
208
	 * return an error code.  It just spews if you fail at deleting.
209
	 * So, we have to grab a lock around finding the object and then
210
	 * doing the delete on it and dropping the refcount, or the user
211
	 * could race us to double-decrement the refcount and cause a
212
	 * use-after-free later.  Given the frequency of our handle lookups,
213
	 * we may want to use ida for number allocation and a hash table
214
	 * for the pointers, anyway.
215
	 */
3480 Serge 216
    if(handle == -2)
217
        printf("%s handle %d\n", __FUNCTION__, handle);
218
 
3260 Serge 219
	spin_lock(&filp->table_lock);
220
 
221
	/* Check if we currently have a reference on the object */
222
	obj = idr_find(&filp->object_idr, handle);
223
	if (obj == NULL) {
224
		spin_unlock(&filp->table_lock);
225
		return -EINVAL;
226
	}
227
	dev = obj->dev;
228
 
3298 Serge 229
 //   printf("%s handle %d obj %p\n", __FUNCTION__, handle, obj);
3290 Serge 230
 
3260 Serge 231
	/* Release reference and decrement refcount. */
232
	idr_remove(&filp->object_idr, handle);
233
	spin_unlock(&filp->table_lock);
234
 
235
//   drm_gem_remove_prime_handles(obj, filp);
236
 
237
	if (dev->driver->gem_close_object)
238
		dev->driver->gem_close_object(obj, filp);
239
	drm_gem_object_handle_unreference_unlocked(obj);
240
 
241
	return 0;
242
}
243
EXPORT_SYMBOL(drm_gem_handle_delete);
244
 
245
/**
246
 * Create a handle for this object. This adds a handle reference
247
 * to the object, which includes a regular reference count. Callers
248
 * will likely want to dereference the object afterwards.
249
 */
250
int
251
drm_gem_handle_create(struct drm_file *file_priv,
252
		       struct drm_gem_object *obj,
253
		       u32 *handlep)
254
{
255
	struct drm_device *dev = obj->dev;
256
	int ret;
257
 
258
	/*
3480 Serge 259
	 * Get the user-visible handle using idr.  Preload and perform
260
	 * allocation under our spinlock.
3260 Serge 261
	 */
3480 Serge 262
	idr_preload(GFP_KERNEL);
263
	spin_lock(&file_priv->table_lock);
3260 Serge 264
 
3480 Serge 265
	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
266
 
3260 Serge 267
	spin_unlock(&file_priv->table_lock);
3480 Serge 268
	idr_preload_end();
269
	if (ret < 0)
3260 Serge 270
		return ret;
3480 Serge 271
	*handlep = ret;
3260 Serge 272
 
273
	drm_gem_object_handle_reference(obj);
274
 
275
	if (dev->driver->gem_open_object) {
276
		ret = dev->driver->gem_open_object(obj, file_priv);
277
		if (ret) {
278
			drm_gem_handle_delete(file_priv, *handlep);
279
			return ret;
280
		}
281
	}
282
 
283
	return 0;
284
}
285
EXPORT_SYMBOL(drm_gem_handle_create);
286
 
287
 
288
/**
289
 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
290
 * @obj: obj in question
291
 *
292
 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
293
 */
294
#if 0
295
void
296
drm_gem_free_mmap_offset(struct drm_gem_object *obj)
297
{
298
	struct drm_device *dev = obj->dev;
299
	struct drm_gem_mm *mm = dev->mm_private;
300
	struct drm_map_list *list = &obj->map_list;
301
 
302
	drm_ht_remove_item(&mm->offset_hash, &list->hash);
303
	drm_mm_put_block(list->file_offset_node);
304
	kfree(list->map);
305
	list->map = NULL;
306
}
307
EXPORT_SYMBOL(drm_gem_free_mmap_offset);
308
 
309
/**
310
 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
311
 * @obj: obj in question
312
 *
313
 * GEM memory mapping works by handing back to userspace a fake mmap offset
314
 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
315
 * up the object based on the offset and sets up the various memory mapping
316
 * structures.
317
 *
318
 * This routine allocates and attaches a fake offset for @obj.
319
 */
320
int
321
drm_gem_create_mmap_offset(struct drm_gem_object *obj)
322
{
323
	struct drm_device *dev = obj->dev;
324
	struct drm_gem_mm *mm = dev->mm_private;
325
	struct drm_map_list *list;
326
	struct drm_local_map *map;
327
	int ret;
328
 
329
	/* Set the object up for mmap'ing */
330
	list = &obj->map_list;
331
	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
332
	if (!list->map)
333
		return -ENOMEM;
334
 
335
	map = list->map;
336
	map->type = _DRM_GEM;
337
	map->size = obj->size;
338
	map->handle = obj;
339
 
340
	/* Get a DRM GEM mmap offset allocated... */
341
	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
342
			obj->size / PAGE_SIZE, 0, false);
343
 
344
	if (!list->file_offset_node) {
345
		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
346
		ret = -ENOSPC;
347
		goto out_free_list;
348
	}
349
 
350
	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
351
			obj->size / PAGE_SIZE, 0);
352
	if (!list->file_offset_node) {
353
		ret = -ENOMEM;
354
		goto out_free_list;
355
	}
356
 
357
	list->hash.key = list->file_offset_node->start;
358
	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
359
	if (ret) {
360
		DRM_ERROR("failed to add to map hash\n");
361
		goto out_free_mm;
362
	}
363
 
364
	return 0;
365
 
366
out_free_mm:
367
	drm_mm_put_block(list->file_offset_node);
368
out_free_list:
369
	kfree(list->map);
370
	list->map = NULL;
371
 
372
	return ret;
373
}
374
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
375
#endif
376
 
377
/** Returns a reference to the object named by the handle. */
378
struct drm_gem_object *
379
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
380
		      u32 handle)
381
{
382
	struct drm_gem_object *obj;
383
 
3480 Serge 384
     if(handle == -2)
385
        printf("%s handle %d\n", __FUNCTION__, handle);
386
 
3260 Serge 387
	spin_lock(&filp->table_lock);
388
 
389
	/* Check if we currently have a reference on the object */
390
	obj = idr_find(&filp->object_idr, handle);
391
	if (obj == NULL) {
392
		spin_unlock(&filp->table_lock);
393
		return NULL;
394
	}
395
 
396
	drm_gem_object_reference(obj);
397
 
398
	spin_unlock(&filp->table_lock);
399
 
400
	return obj;
401
}
402
EXPORT_SYMBOL(drm_gem_object_lookup);
403
 
404
/**
405
 * Releases the handle to an mm object.
406
 */
407
int
408
drm_gem_close_ioctl(struct drm_device *dev, void *data,
409
		    struct drm_file *file_priv)
410
{
411
	struct drm_gem_close *args = data;
412
	int ret;
413
 
414
	ret = drm_gem_handle_delete(file_priv, args->handle);
415
 
416
	return ret;
417
}
418
 
419
/**
420
 * Create a global name for an object, returning the name.
421
 *
422
 * Note that the name does not hold a reference; when the object
423
 * is freed, the name goes away.
424
 */
425
 
426
#if 0
427
int
428
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
429
		    struct drm_file *file_priv)
430
{
431
	struct drm_gem_flink *args = data;
432
	struct drm_gem_object *obj;
433
	int ret;
434
 
435
	if (!(dev->driver->driver_features & DRIVER_GEM))
436
		return -ENODEV;
437
 
438
	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
439
	if (obj == NULL)
440
		return -ENOENT;
441
 
3480 Serge 442
	idr_preload(GFP_KERNEL);
3260 Serge 443
	spin_lock(&dev->object_name_lock);
444
	if (!obj->name) {
3480 Serge 445
		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
446
		if (ret < 0)
3260 Serge 447
			goto err;
448
 
4075 Serge 449
		obj->name = ret;
450
 
3260 Serge 451
		/* Allocate a reference for the name table.  */
452
		drm_gem_object_reference(obj);
4075 Serge 453
	}
454
 
3260 Serge 455
		args->name = (uint64_t) obj->name;
456
		ret = 0;
457
 
458
err:
4075 Serge 459
	spin_unlock(&dev->object_name_lock);
460
	idr_preload_end();
3260 Serge 461
	drm_gem_object_unreference_unlocked(obj);
462
	return ret;
463
}
464
 
465
/**
466
 * Open an object using the global name, returning a handle and the size.
467
 *
468
 * This handle (of course) holds a reference to the object, so the object
469
 * will not go away until the handle is deleted.
470
 */
471
int
472
drm_gem_open_ioctl(struct drm_device *dev, void *data,
473
		   struct drm_file *file_priv)
474
{
475
	struct drm_gem_open *args = data;
476
	struct drm_gem_object *obj;
477
	int ret;
478
	u32 handle;
479
 
480
	if (!(dev->driver->driver_features & DRIVER_GEM))
481
		return -ENODEV;
482
 
3480 Serge 483
    if(handle == -2)
484
        printf("%s handle %d\n", __FUNCTION__, handle);
485
 
3260 Serge 486
	spin_lock(&dev->object_name_lock);
487
	obj = idr_find(&dev->object_name_idr, (int) args->name);
488
	if (obj)
489
		drm_gem_object_reference(obj);
490
	spin_unlock(&dev->object_name_lock);
491
	if (!obj)
492
		return -ENOENT;
493
 
494
	ret = drm_gem_handle_create(file_priv, obj, &handle);
495
	drm_gem_object_unreference_unlocked(obj);
496
	if (ret)
497
		return ret;
498
 
499
	args->handle = handle;
500
	args->size = obj->size;
501
 
502
	return 0;
503
}
504
 
505
/**
506
 * Called at device open time, sets up the structure for handling refcounting
507
 * of mm objects.
508
 */
509
void
510
drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
511
{
512
	idr_init(&file_private->object_idr);
513
	spin_lock_init(&file_private->table_lock);
514
}
515
 
516
/**
517
 * Called at device close to release the file's
518
 * handle references on objects.
519
 */
520
static int
521
drm_gem_object_release_handle(int id, void *ptr, void *data)
522
{
523
	struct drm_file *file_priv = data;
524
	struct drm_gem_object *obj = ptr;
525
	struct drm_device *dev = obj->dev;
526
 
527
	drm_gem_remove_prime_handles(obj, file_priv);
528
 
529
	if (dev->driver->gem_close_object)
530
		dev->driver->gem_close_object(obj, file_priv);
531
 
532
	drm_gem_object_handle_unreference_unlocked(obj);
533
 
534
	return 0;
535
}
536
 
537
/**
538
 * Called at close time when the filp is going away.
539
 *
540
 * Releases any remaining references on objects by this filp.
541
 */
542
void
543
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
544
{
545
	idr_for_each(&file_private->object_idr,
546
		     &drm_gem_object_release_handle, file_private);
547
	idr_destroy(&file_private->object_idr);
548
}
549
#endif
550
 
551
void
552
drm_gem_object_release(struct drm_gem_object *obj)
553
{
554
	if (obj->filp)
555
	    free(obj->filp);
556
}
557
EXPORT_SYMBOL(drm_gem_object_release);
558
 
559
/**
560
 * Called after the last reference to the object has been lost.
561
 * Must be called holding struct_ mutex
562
 *
563
 * Frees the object
564
 */
565
void
566
drm_gem_object_free(struct kref *kref)
567
{
568
	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
569
	struct drm_device *dev = obj->dev;
570
 
571
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
572
 
573
	if (dev->driver->gem_free_object != NULL)
574
		dev->driver->gem_free_object(obj);
575
}
576
EXPORT_SYMBOL(drm_gem_object_free);
577
 
578
static void drm_gem_object_ref_bug(struct kref *list_kref)
579
{
580
	BUG();
581
}
582
 
583
/**
584
 * Called after the last handle to the object has been closed
585
 *
586
 * Removes any name for the object. Note that this must be
587
 * called before drm_gem_object_free or we'll be touching
588
 * freed memory
589
 */
590
void drm_gem_object_handle_free(struct drm_gem_object *obj)
591
{
592
	struct drm_device *dev = obj->dev;
593
 
594
	/* Remove any name for this object */
595
	spin_lock(&dev->object_name_lock);
596
	if (obj->name) {
597
		idr_remove(&dev->object_name_idr, obj->name);
598
		obj->name = 0;
599
		spin_unlock(&dev->object_name_lock);
600
		/*
601
		 * The object name held a reference to this object, drop
602
		 * that now.
603
		*
604
		* This cannot be the last reference, since the handle holds one too.
605
		 */
606
		kref_put(&obj->refcount, drm_gem_object_ref_bug);
607
	} else
608
		spin_unlock(&dev->object_name_lock);
609
 
610
}
611
EXPORT_SYMBOL(drm_gem_object_handle_free);
612
 
613
#if 0
614
void drm_gem_vm_open(struct vm_area_struct *vma)
615
{
616
	struct drm_gem_object *obj = vma->vm_private_data;
617
 
618
	drm_gem_object_reference(obj);
619
 
620
	mutex_lock(&obj->dev->struct_mutex);
621
	drm_vm_open_locked(obj->dev, vma);
622
	mutex_unlock(&obj->dev->struct_mutex);
623
}
624
EXPORT_SYMBOL(drm_gem_vm_open);
625
 
626
void drm_gem_vm_close(struct vm_area_struct *vma)
627
{
628
	struct drm_gem_object *obj = vma->vm_private_data;
629
	struct drm_device *dev = obj->dev;
630
 
631
	mutex_lock(&dev->struct_mutex);
632
	drm_vm_close_locked(obj->dev, vma);
633
	drm_gem_object_unreference(obj);
634
	mutex_unlock(&dev->struct_mutex);
635
}
636
EXPORT_SYMBOL(drm_gem_vm_close);
637
 
638
#endif
639