Subversion Repositories Kolibri OS

Rev

Rev 4539 | Rev 5060 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4539 Rev 4560
Line 26... Line 26...
26
 */
26
 */
Line 27... Line 27...
27
 
27
 
28
#include 
28
#include 
29
#include 
29
#include 
-
 
30
#include 
-
 
31
#include 
30
#include 
32
#include 
31
#include 
33
#include 
32
#include 
34
#include 
33
#include 
35
#include 
34
#include 
36
#include 
Line 84... Line 86...
84
 */
86
 */
Line 85... Line 87...
85
 
87
 
86
int
88
int
87
drm_gem_init(struct drm_device *dev)
89
drm_gem_init(struct drm_device *dev)
88
{
90
{
Line 89... Line 91...
89
	struct drm_gem_mm *mm;
91
	struct drm_vma_offset_manager *vma_offset_manager;
90
 
92
 
Line 91... Line 93...
91
	mutex_init(&dev->object_name_lock);
93
	mutex_init(&dev->object_name_lock);
92
	idr_init(&dev->object_name_idr);
94
	idr_init(&dev->object_name_idr);
93
 
95
 
94
	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
96
	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
95
	if (!mm) {
97
	if (!vma_offset_manager) {
Line 96... Line 98...
96
		DRM_ERROR("out of memory\n");
98
		DRM_ERROR("out of memory\n");
97
		return -ENOMEM;
99
		return -ENOMEM;
98
	}
100
	}
99
 
101
 
Line 100... Line 102...
100
	dev->mm_private = mm;
102
	dev->vma_offset_manager = vma_offset_manager;
101
	drm_vma_offset_manager_init(&mm->vma_manager,
103
	drm_vma_offset_manager_init(vma_offset_manager,
Line 102... Line 104...
102
				    DRM_FILE_PAGE_OFFSET_START,
104
				    DRM_FILE_PAGE_OFFSET_START,
103
		    DRM_FILE_PAGE_OFFSET_SIZE);
105
		    DRM_FILE_PAGE_OFFSET_SIZE);
104
 
106
 
105
	return 0;
-
 
Line 106... Line 107...
106
}
107
	return 0;
107
 
108
}
108
void
109
 
109
drm_gem_destroy(struct drm_device *dev)
110
void
Line 110... Line 111...
110
{
111
drm_gem_destroy(struct drm_device *dev)
111
	struct drm_gem_mm *mm = dev->mm_private;
112
{
112
 
113
 
Line 122... Line 123...
122
int drm_gem_object_init(struct drm_device *dev,
123
int drm_gem_object_init(struct drm_device *dev,
123
			struct drm_gem_object *obj, size_t size)
124
			struct drm_gem_object *obj, size_t size)
124
{
125
{
125
	struct file *filp;
126
	struct file *filp;
Line -... Line 127...
-
 
127
 
-
 
128
	drm_gem_private_object_init(dev, obj, size);
126
 
129
 
127
	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
130
	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
128
	if (IS_ERR(filp))
131
	if (IS_ERR(filp))
Line 129... Line -...
129
		return PTR_ERR(filp);
-
 
130
 
132
		return PTR_ERR(filp);
Line 131... Line 133...
131
	drm_gem_private_object_init(dev, obj, size);
133
 
132
	obj->filp = filp;
134
	obj->filp = filp;
133
 
135
 
Line 154... Line 156...
154
	drm_vma_node_reset(&obj->vma_node);
156
	drm_vma_node_reset(&obj->vma_node);
155
}
157
}
156
EXPORT_SYMBOL(drm_gem_private_object_init);
158
EXPORT_SYMBOL(drm_gem_private_object_init);
Line 157... Line 159...
157
 
159
 
158
/**
-
 
159
 * Allocate a GEM object of the specified size with shmfs backing store
-
 
160
 */
-
 
161
struct drm_gem_object *
-
 
162
drm_gem_object_alloc(struct drm_device *dev, size_t size)
-
 
163
{
-
 
164
	struct drm_gem_object *obj;
-
 
165
 
-
 
166
	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-
 
167
	if (!obj)
-
 
168
		goto free;
-
 
169
 
-
 
170
	if (drm_gem_object_init(dev, obj, size) != 0)
-
 
171
		goto free;
-
 
172
 
-
 
173
	if (dev->driver->gem_init_object != NULL &&
-
 
174
	    dev->driver->gem_init_object(obj) != 0) {
-
 
175
		goto fput;
-
 
176
	}
-
 
177
	return obj;
-
 
178
fput:
-
 
179
	/* Object_init mangles the global counters - readjust them. */
-
 
180
	free(obj->filp);
-
 
181
free:
-
 
182
	kfree(obj);
-
 
183
	return NULL;
-
 
184
}
-
 
185
EXPORT_SYMBOL(drm_gem_object_alloc);
-
 
186
 
-
 
187
static void drm_gem_object_ref_bug(struct kref *list_kref)
-
 
188
{
-
 
189
	BUG();
-
 
190
}
-
 
191
 
-
 
192
/**
160
/**
193
 * Called after the last handle to the object has been closed
161
 * Called after the last handle to the object has been closed
194
 *
162
 *
195
 * Removes any name for the object. Note that this must be
163
 * Removes any name for the object. Note that this must be
196
 * called before drm_gem_object_free or we'll be touching
164
 * called before drm_gem_object_free or we'll be touching
Line 202... Line 170...
202
 
170
 
203
	/* Remove any name for this object */
171
	/* Remove any name for this object */
204
	if (obj->name) {
172
	if (obj->name) {
205
		idr_remove(&dev->object_name_idr, obj->name);
173
		idr_remove(&dev->object_name_idr, obj->name);
206
		obj->name = 0;
-
 
207
		/*
-
 
208
		 * The object name held a reference to this object, drop
-
 
209
		 * that now.
-
 
210
		*
-
 
211
		* This cannot be the last reference, since the handle holds one too.
-
 
212
		 */
-
 
213
		kref_put(&obj->refcount, drm_gem_object_ref_bug);
174
		obj->name = 0;
214
	}
175
	}
Line 215... Line 176...
215
}
176
}
Line 266... Line 227...
266
 
227
 
267
	/* Release reference and decrement refcount. */
228
	/* Release reference and decrement refcount. */
268
	idr_remove(&filp->object_idr, handle);
229
	idr_remove(&filp->object_idr, handle);
Line -... Line 230...
-
 
230
	spin_unlock(&filp->table_lock);
Line 269... Line 231...
269
	spin_unlock(&filp->table_lock);
231
 
270
 
232
//	drm_vma_node_revoke(&obj->vma_node, filp->filp);
271
 
233
 
Line 272... Line 234...
272
	if (dev->driver->gem_close_object)
234
	if (dev->driver->gem_close_object)
273
		dev->driver->gem_close_object(obj, filp);
235
		dev->driver->gem_close_object(obj, filp);
274
	drm_gem_object_handle_unreference_unlocked(obj);
236
	drm_gem_object_handle_unreference_unlocked(obj);
Line 275... Line 237...
275
 
237
 
276
	return 0;
238
	return 0;
-
 
239
}
277
}
240
EXPORT_SYMBOL(drm_gem_handle_delete);
278
EXPORT_SYMBOL(drm_gem_handle_delete);
241
 
-
 
242
/**
-
 
243
 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
-
 
244
 * 
-
 
245
 * This implements the ->dumb_destroy kms driver callback for drivers which use
-
 
246
 * gem to manage their backing storage.
-
 
247
 */
-
 
248
int drm_gem_dumb_destroy(struct drm_file *file,
-
 
249
			 struct drm_device *dev,
-
 
250
			 uint32_t handle)
279
 
251
{
280
/**
252
	return drm_gem_handle_delete(file, handle);
281
 * Create a handle for this object. This adds a handle reference
253
}
282
 * to the object, which includes a regular reference count. Callers
254
EXPORT_SYMBOL(drm_gem_dumb_destroy);
283
 * will likely want to dereference the object afterwards.
255
 
Line 315... Line 287...
315
		drm_gem_object_handle_unreference_unlocked(obj);
287
		drm_gem_object_handle_unreference_unlocked(obj);
316
		return ret;
288
		return ret;
317
	}
289
	}
318
	*handlep = ret;
290
	*handlep = ret;
Line -... Line 291...
-
 
291
 
-
 
292
//	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
-
 
293
//	if (ret) {
-
 
294
//		drm_gem_handle_delete(file_priv, *handlep);
-
 
295
//		return ret;
-
 
296
//	}
319
 
297
 
320
	if (dev->driver->gem_open_object) {
298
	if (dev->driver->gem_open_object) {
321
		ret = dev->driver->gem_open_object(obj, file_priv);
299
		ret = dev->driver->gem_open_object(obj, file_priv);
322
		if (ret) {
300
		if (ret) {
323
			drm_gem_handle_delete(file_priv, *handlep);
301
			drm_gem_handle_delete(file_priv, *handlep);
Line 342... Line 320...
342
 
320
 
343
	return drm_gem_handle_create_tail(file_priv, obj, handlep);
321
	return drm_gem_handle_create_tail(file_priv, obj, handlep);
344
}
322
}
Line 345... Line 323...
345
EXPORT_SYMBOL(drm_gem_handle_create);
323
EXPORT_SYMBOL(drm_gem_handle_create);
346
 
324
 
347
 
325
#if 0
348
/**
326
/**
349
 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
327
 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
350
 * @obj: obj in question
328
 * @obj: obj in question
351
 *
329
 *
352
 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
-
 
353
 */
330
 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
354
#if 0
331
 */
355
void
332
void
356
drm_gem_free_mmap_offset(struct drm_gem_object *obj)
333
drm_gem_free_mmap_offset(struct drm_gem_object *obj)
357
{
-
 
Line 358... Line 334...
358
	struct drm_device *dev = obj->dev;
334
{
359
	struct drm_gem_mm *mm = dev->mm_private;
335
	struct drm_device *dev = obj->dev;
360
 
336
 
Line 361... Line 337...
361
	drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
337
	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
362
}
338
}
Line 378... Line 354...
378
 */
354
 */
379
int
355
int
380
drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
356
drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
381
{
357
{
382
	struct drm_device *dev = obj->dev;
358
	struct drm_device *dev = obj->dev;
383
	struct drm_gem_mm *mm = dev->mm_private;
-
 
Line -... Line 359...
-
 
359
 
384
 
360
	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
-
 
361
				  size / PAGE_SIZE);
-
 
362
}
-
 
363
EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
-
 
364
 
-
 
365
/**
385
	/* Set the object up for mmap'ing */
366
 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
-
 
367
 * @obj: obj in question
-
 
368
 *
-
 
369
 * GEM memory mapping works by handing back to userspace a fake mmap offset
-
 
370
 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
-
 
371
 * up the object based on the offset and sets up the various memory mapping
-
 
372
 * structures.
-
 
373
 *
-
 
374
 * This routine allocates and attaches a fake offset for @obj.
386
	list = &obj->map_list;
375
 */
-
 
376
int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
-
 
377
{
-
 
378
	return drm_gem_create_mmap_offset_size(obj, obj->size);
-
 
379
}
-
 
380
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
-
 
381
 
-
 
382
/**
387
	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
383
 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
-
 
384
 * from shmem
-
 
385
 * @obj: obj in question
-
 
386
 * @gfpmask: gfp mask of requested pages
-
 
387
 */
-
 
388
struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
-
 
389
{
-
 
390
	struct inode *inode;
-
 
391
	struct address_space *mapping;
388
	if (!list->map)
392
	struct page *p, **pages;
-
 
393
	int i, npages;
-
 
394
 
-
 
395
	/* This is the shared memory object that backs the GEM resource */
-
 
396
	inode = file_inode(obj->filp);
-
 
397
	mapping = inode->i_mapping;
-
 
398
 
-
 
399
	/* We already BUG_ON() for non-page-aligned sizes in
-
 
400
	 * drm_gem_object_init(), so we should never hit this unless
-
 
401
	 * driver author is doing something really wrong:
-
 
402
	 */
Line 389... Line 403...
389
		return -ENOMEM;
403
	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
-
 
404
 
390
 
405
	npages = obj->size >> PAGE_SHIFT;
391
	map = list->map;
406
 
392
	map->type = _DRM_GEM;
407
	pages = drm_malloc_ab(npages, sizeof(struct page *));
393
	map->size = obj->size;
408
	if (pages == NULL)
394
	map->handle = obj;
-
 
395
 
-
 
396
	/* Get a DRM GEM mmap offset allocated... */
409
		return ERR_PTR(-ENOMEM);
397
	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
410
 
398
			obj->size / PAGE_SIZE, 0, false);
411
	gfpmask |= mapping_gfp_mask(mapping);
399
 
412
 
400
	if (!list->file_offset_node) {
413
	for (i = 0; i < npages; i++) {
401
		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
414
		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
402
		ret = -ENOSPC;
415
		if (IS_ERR(p))
403
		goto out_free_list;
416
			goto fail;
-
 
417
		pages[i] = p;
-
 
418
 
-
 
419
		/* There is a hypothetical issue w/ drivers that require
-
 
420
		 * buffer memory in the low 4GB.. if the pages are un-
-
 
421
		 * pinned, and swapped out, they can end up swapped back
-
 
422
		 * in above 4GB.  If pages are already in memory, then
-
 
423
		 * shmem_read_mapping_page_gfp will ignore the gfpmask,
404
	}
424
		 * even if the already in-memory page disobeys the mask.
-
 
425
		 *
-
 
426
		 * It is only a theoretical issue today, because none of
405
 
427
		 * the devices with this limitation can be populated with
-
 
428
		 * enough memory to trigger the issue.  But this BUG_ON()
406
	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
429
		 * is here as a reminder in case the problem with
-
 
430
		 * shmem_read_mapping_page_gfp() isn't solved by the time
-
 
431
		 * it does become a real issue.
407
			obj->size / PAGE_SIZE, 0);
432
		 *
-
 
433
		 * See this thread: http://lkml.org/lkml/2011/7/11/238
408
	if (!list->file_offset_node) {
434
		 */
409
		ret = -ENOMEM;
435
		BUG_ON((gfpmask & __GFP_DMA32) &&
Line 410... Line 436...
410
		goto out_free_list;
436
				(page_to_pfn(p) >= 0x00100000UL));
411
	}
-
 
-
 
437
	}
-
 
438
 
412
 
439
	return pages;
413
	list->hash.key = list->file_offset_node->start;
440
 
-
 
441
fail:
-
 
442
	while (i--)
414
	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
443
		page_cache_release(pages[i]);
415
	if (ret) {
444
 
-
 
445
	drm_free_large(pages);
Line -... Line 446...
-
 
446
	return ERR_CAST(p);
-
 
447
}
-
 
448
EXPORT_SYMBOL(drm_gem_get_pages);
-
 
449
 
-
 
450
/**
-
 
451
 * drm_gem_put_pages - helper to free backing pages for a GEM object
-
 
452
 * @obj: obj in question
-
 
453
 * @pages: pages to free
-
 
454
 * @dirty: if true, pages will be marked as dirty
-
 
455
 * @accessed: if true, the pages will be marked as accessed
416
		DRM_ERROR("failed to add to map hash\n");
456
 */
Line 417... Line 457...
417
		goto out_free_mm;
457
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
418
	}
458
		bool dirty, bool accessed)
419
 
459
{
420
	return 0;
460
	int i, npages;
421
 
461
 
Line -... Line 462...
-
 
462
	/* We already BUG_ON() for non-page-aligned sizes in
-
 
463
	 * drm_gem_object_init(), so we should never hit this unless
-
 
464
	 * driver author is doing something really wrong:
422
out_free_mm:
465
	 */
-
 
466
	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
-
 
467
 
-
 
468
	npages = obj->size >> PAGE_SHIFT;
-
 
469
 
-
 
470
	for (i = 0; i < npages; i++) {
-
 
471
		if (dirty)
-
 
472
			set_page_dirty(pages[i]);
423
	drm_mm_put_block(list->file_offset_node);
473
 
-
 
474
		if (accessed)
-
 
475
			mark_page_accessed(pages[i]);
-
 
476
 
424
out_free_list:
477
		/* Undo the reference we took when populating the table */
425
	kfree(list->map);
478
		page_cache_release(pages[i]);
Line 426... Line 479...
426
	list->map = NULL;
479
	}
427
 
480
 
428
	return ret;
481
	drm_free_large(pages);
Line 502... Line 555...
502
		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
555
		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
503
		if (ret < 0)
556
		if (ret < 0)
504
			goto err;
557
			goto err;
Line 505... Line 558...
505
 
558
 
506
		obj->name = ret;
-
 
507
 
-
 
508
		/* Allocate a reference for the name table.  */
-
 
509
		drm_gem_object_reference(obj);
559
		obj->name = ret;
Line 510... Line 560...
510
	}
560
	}
511
 
561
 
Line 579... Line 629...
579
{
629
{
580
	struct drm_file *file_priv = data;
630
	struct drm_file *file_priv = data;
581
	struct drm_gem_object *obj = ptr;
631
	struct drm_gem_object *obj = ptr;
582
	struct drm_device *dev = obj->dev;
632
	struct drm_device *dev = obj->dev;
Line 583... Line -...
583
 
-
 
584
	drm_gem_remove_prime_handles(obj, file_priv);
633
 
Line 585... Line 634...
585
	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
634
	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
586
 
635