Rev 4246 | Rev 4539 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
3260 | Serge | 1 | /* |
2 | * Copyright © 2008 Intel Corporation |
||
3 | * |
||
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
5 | * copy of this software and associated documentation files (the "Software"), |
||
6 | * to deal in the Software without restriction, including without limitation |
||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
8 | * and/or sell copies of the Software, and to permit persons to whom the |
||
9 | * Software is furnished to do so, subject to the following conditions: |
||
10 | * |
||
11 | * The above copyright notice and this permission notice (including the next |
||
12 | * paragraph) shall be included in all copies or substantial portions of the |
||
13 | * Software. |
||
14 | * |
||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
||
21 | * IN THE SOFTWARE. |
||
22 | * |
||
23 | * Authors: |
||
24 | * Eric Anholt |
||
25 | * |
||
26 | */ |
||
27 | |||
28 | #include |
||
29 | #include |
||
30 | #include |
||
31 | #include |
||
32 | #include |
||
33 | #include |
||
34 | #include |
||
4104 | Serge | 35 | #include |
3260 | Serge | 36 | |
37 | /** @file drm_gem.c |
||
38 | * |
||
39 | * This file provides some of the base ioctls and library routines for |
||
40 | * the graphics memory manager implemented by each device driver. |
||
41 | * |
||
42 | * Because various devices have different requirements in terms of |
||
43 | * synchronization and migration strategies, implementing that is left up to |
||
44 | * the driver, and all that the general API provides should be generic -- |
||
45 | * allocating objects, reading/writing data with the cpu, freeing objects. |
||
46 | * Even there, platform-dependent optimizations for reading/writing data with |
||
47 | * the CPU mean we'll likely hook those out to driver-specific calls. However, |
||
48 | * the DRI2 implementation wants to have at least allocate/mmap be generic. |
||
49 | * |
||
50 | * The goal was to have swap-backed object allocation managed through |
||
51 | * struct file. However, file descriptors as handles to a struct file have |
||
52 | * two major failings: |
||
53 | * - Process limits prevent more than 1024 or so being used at a time by |
||
54 | * default. |
||
55 | * - Inability to allocate high fds will aggravate the X Server's select() |
||
56 | * handling, and likely that of many GL client applications as well. |
||
57 | * |
||
58 | * This led to a plan of using our own integer IDs (called handles, following |
||
59 | * DRM terminology) to mimic fds, and implement the fd syscalls we need as |
||
60 | * ioctls. The objects themselves will still include the struct file so |
||
61 | * that we can transition to fds if the required kernel infrastructure shows |
||
62 | * up at a later date, and as our interface with shmfs for memory allocation. |
||
63 | */ |
||
64 | |||
65 | /* |
||
66 | * We make up offsets for buffer objects so we can recognize them at |
||
67 | * mmap time. |
||
68 | */ |
||
69 | |||
70 | /* pgoff in mmap is an unsigned long, so we need to make sure that |
||
71 | * the faked up offset will fit |
||
72 | */ |
||
73 | |||
74 | #if BITS_PER_LONG == 64 |
||
75 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) |
||
76 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) |
||
77 | #else |
||
78 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) |
||
79 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) |
||
80 | #endif |
||
81 | |||
82 | /** |
||
83 | * Initialize the GEM device fields |
||
84 | */ |
||
85 | |||
86 | int |
||
87 | drm_gem_init(struct drm_device *dev) |
||
88 | { |
||
89 | struct drm_gem_mm *mm; |
||
90 | |||
4104 | Serge | 91 | mutex_init(&dev->object_name_lock); |
3260 | Serge | 92 | idr_init(&dev->object_name_idr); |
93 | |||
94 | mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); |
||
95 | if (!mm) { |
||
96 | DRM_ERROR("out of memory\n"); |
||
97 | return -ENOMEM; |
||
98 | } |
||
99 | |||
100 | dev->mm_private = mm; |
||
4104 | Serge | 101 | drm_vma_offset_manager_init(&mm->vma_manager, |
102 | DRM_FILE_PAGE_OFFSET_START, |
||
4075 | Serge | 103 | DRM_FILE_PAGE_OFFSET_SIZE); |
3260 | Serge | 104 | |
105 | return 0; |
||
106 | } |
||
107 | |||
108 | void |
||
109 | drm_gem_destroy(struct drm_device *dev) |
||
110 | { |
||
111 | struct drm_gem_mm *mm = dev->mm_private; |
||
112 | |||
4104 | Serge | 113 | drm_vma_offset_manager_destroy(&mm->vma_manager); |
3260 | Serge | 114 | kfree(mm); |
115 | dev->mm_private = NULL; |
||
116 | } |
||
117 | |||
118 | /** |
||
119 | * Initialize an already allocated GEM object of the specified size with |
||
120 | * shmfs backing store. |
||
121 | */ |
||
122 | int drm_gem_object_init(struct drm_device *dev, |
||
123 | struct drm_gem_object *obj, size_t size) |
||
124 | { |
||
4104 | Serge | 125 | struct file *filp; |
3260 | Serge | 126 | |
4104 | Serge | 127 | filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
128 | if (IS_ERR(filp)) |
||
129 | return PTR_ERR(filp); |
||
3260 | Serge | 130 | |
4104 | Serge | 131 | drm_gem_private_object_init(dev, obj, size); |
132 | obj->filp = filp; |
||
3260 | Serge | 133 | |
134 | return 0; |
||
135 | } |
||
136 | EXPORT_SYMBOL(drm_gem_object_init); |
||
137 | |||
138 | /** |
||
139 | * Initialize an already allocated GEM object of the specified size with |
||
140 | * no GEM provided backing store. Instead the caller is responsible for |
||
141 | * backing the object and handling it. |
||
142 | */ |
||
4104 | Serge | 143 | void drm_gem_private_object_init(struct drm_device *dev, |
3260 | Serge | 144 | struct drm_gem_object *obj, size_t size) |
145 | { |
||
146 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
||
147 | |||
148 | obj->dev = dev; |
||
149 | obj->filp = NULL; |
||
150 | |||
151 | kref_init(&obj->refcount); |
||
4104 | Serge | 152 | obj->handle_count = 0; |
3260 | Serge | 153 | obj->size = size; |
4104 | Serge | 154 | drm_vma_node_reset(&obj->vma_node); |
3260 | Serge | 155 | } |
156 | EXPORT_SYMBOL(drm_gem_private_object_init); |
||
157 | |||
158 | /** |
||
159 | * Allocate a GEM object of the specified size with shmfs backing store |
||
160 | */ |
||
161 | struct drm_gem_object * |
||
162 | drm_gem_object_alloc(struct drm_device *dev, size_t size) |
||
163 | { |
||
164 | struct drm_gem_object *obj; |
||
165 | |||
166 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
||
167 | if (!obj) |
||
168 | goto free; |
||
169 | |||
170 | if (drm_gem_object_init(dev, obj, size) != 0) |
||
171 | goto free; |
||
172 | |||
173 | if (dev->driver->gem_init_object != NULL && |
||
174 | dev->driver->gem_init_object(obj) != 0) { |
||
175 | goto fput; |
||
176 | } |
||
177 | return obj; |
||
178 | fput: |
||
179 | /* Object_init mangles the global counters - readjust them. */ |
||
180 | free(obj->filp); |
||
181 | free: |
||
182 | kfree(obj); |
||
183 | return NULL; |
||
184 | } |
||
185 | EXPORT_SYMBOL(drm_gem_object_alloc); |
||
186 | |||
4104 | Serge | 187 | static void drm_gem_object_ref_bug(struct kref *list_kref) |
188 | { |
||
189 | BUG(); |
||
190 | } |
||
3260 | Serge | 191 | |
192 | /** |
||
4104 | Serge | 193 | * Called after the last handle to the object has been closed |
194 | * |
||
195 | * Removes any name for the object. Note that this must be |
||
196 | * called before drm_gem_object_free or we'll be touching |
||
197 | * freed memory |
||
198 | */ |
||
199 | static void drm_gem_object_handle_free(struct drm_gem_object *obj) |
||
200 | { |
||
201 | struct drm_device *dev = obj->dev; |
||
202 | |||
203 | /* Remove any name for this object */ |
||
204 | if (obj->name) { |
||
205 | idr_remove(&dev->object_name_idr, obj->name); |
||
206 | obj->name = 0; |
||
207 | /* |
||
208 | * The object name held a reference to this object, drop |
||
209 | * that now. |
||
210 | * |
||
211 | * This cannot be the last reference, since the handle holds one too. |
||
212 | */ |
||
213 | kref_put(&obj->refcount, drm_gem_object_ref_bug); |
||
214 | } |
||
215 | } |
||
216 | |||
217 | |||
218 | static void |
||
219 | drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) |
||
220 | { |
||
221 | if (WARN_ON(obj->handle_count == 0)) |
||
222 | return; |
||
223 | |||
224 | /* |
||
225 | * Must bump handle count first as this may be the last |
||
226 | * ref, in which case the object would disappear before we |
||
227 | * checked for a name |
||
228 | */ |
||
229 | |||
230 | mutex_lock(&obj->dev->object_name_lock); |
||
231 | if (--obj->handle_count == 0) { |
||
232 | drm_gem_object_handle_free(obj); |
||
233 | } |
||
234 | mutex_unlock(&obj->dev->object_name_lock); |
||
235 | |||
236 | drm_gem_object_unreference_unlocked(obj); |
||
237 | } |
||
238 | |||
239 | /** |
||
3260 | Serge | 240 | * Removes the mapping from handle to filp for this object. |
241 | */ |
||
242 | int |
||
243 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
||
244 | { |
||
245 | struct drm_device *dev; |
||
246 | struct drm_gem_object *obj; |
||
247 | |||
248 | /* This is gross. The idr system doesn't let us try a delete and |
||
249 | * return an error code. It just spews if you fail at deleting. |
||
250 | * So, we have to grab a lock around finding the object and then |
||
251 | * doing the delete on it and dropping the refcount, or the user |
||
252 | * could race us to double-decrement the refcount and cause a |
||
253 | * use-after-free later. Given the frequency of our handle lookups, |
||
254 | * we may want to use ida for number allocation and a hash table |
||
255 | * for the pointers, anyway. |
||
256 | */ |
||
3480 | Serge | 257 | if(handle == -2) |
258 | printf("%s handle %d\n", __FUNCTION__, handle); |
||
259 | |||
3260 | Serge | 260 | spin_lock(&filp->table_lock); |
261 | |||
262 | /* Check if we currently have a reference on the object */ |
||
263 | obj = idr_find(&filp->object_idr, handle); |
||
264 | if (obj == NULL) { |
||
265 | spin_unlock(&filp->table_lock); |
||
266 | return -EINVAL; |
||
267 | } |
||
268 | dev = obj->dev; |
||
269 | |||
3298 | Serge | 270 | // printf("%s handle %d obj %p\n", __FUNCTION__, handle, obj); |
3290 | Serge | 271 | |
3260 | Serge | 272 | /* Release reference and decrement refcount. */ |
273 | idr_remove(&filp->object_idr, handle); |
||
274 | spin_unlock(&filp->table_lock); |
||
275 | |||
276 | |||
277 | if (dev->driver->gem_close_object) |
||
278 | dev->driver->gem_close_object(obj, filp); |
||
279 | drm_gem_object_handle_unreference_unlocked(obj); |
||
280 | |||
281 | return 0; |
||
282 | } |
||
283 | EXPORT_SYMBOL(drm_gem_handle_delete); |
||
284 | |||
285 | /** |
||
286 | * Create a handle for this object. This adds a handle reference |
||
287 | * to the object, which includes a regular reference count. Callers |
||
288 | * will likely want to dereference the object afterwards. |
||
289 | */ |
||
290 | int |
||
4104 | Serge | 291 | drm_gem_handle_create_tail(struct drm_file *file_priv, |
3260 | Serge | 292 | struct drm_gem_object *obj, |
293 | u32 *handlep) |
||
294 | { |
||
295 | struct drm_device *dev = obj->dev; |
||
296 | int ret; |
||
297 | |||
4104 | Serge | 298 | WARN_ON(!mutex_is_locked(&dev->object_name_lock)); |
299 | |||
3260 | Serge | 300 | /* |
3480 | Serge | 301 | * Get the user-visible handle using idr. Preload and perform |
302 | * allocation under our spinlock. |
||
3260 | Serge | 303 | */ |
3480 | Serge | 304 | idr_preload(GFP_KERNEL); |
305 | spin_lock(&file_priv->table_lock); |
||
3260 | Serge | 306 | |
3480 | Serge | 307 | ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); |
4104 | Serge | 308 | drm_gem_object_reference(obj); |
309 | obj->handle_count++; |
||
3260 | Serge | 310 | spin_unlock(&file_priv->table_lock); |
3480 | Serge | 311 | idr_preload_end(); |
4104 | Serge | 312 | mutex_unlock(&dev->object_name_lock); |
313 | if (ret < 0) { |
||
314 | drm_gem_object_handle_unreference_unlocked(obj); |
||
3260 | Serge | 315 | return ret; |
4104 | Serge | 316 | } |
3480 | Serge | 317 | *handlep = ret; |
3260 | Serge | 318 | |
319 | if (dev->driver->gem_open_object) { |
||
320 | ret = dev->driver->gem_open_object(obj, file_priv); |
||
321 | if (ret) { |
||
322 | drm_gem_handle_delete(file_priv, *handlep); |
||
323 | return ret; |
||
324 | } |
||
325 | } |
||
326 | |||
327 | return 0; |
||
328 | } |
||
4104 | Serge | 329 | |
330 | /** |
||
331 | * Create a handle for this object. This adds a handle reference |
||
332 | * to the object, which includes a regular reference count. Callers |
||
333 | * will likely want to dereference the object afterwards. |
||
334 | */ |
||
335 | int |
||
336 | drm_gem_handle_create(struct drm_file *file_priv, |
||
337 | struct drm_gem_object *obj, |
||
338 | u32 *handlep) |
||
339 | { |
||
340 | mutex_lock(&obj->dev->object_name_lock); |
||
341 | |||
342 | return drm_gem_handle_create_tail(file_priv, obj, handlep); |
||
343 | } |
||
3260 | Serge | 344 | EXPORT_SYMBOL(drm_gem_handle_create); |
345 | |||
346 | |||
347 | /** |
||
348 | * drm_gem_free_mmap_offset - release a fake mmap offset for an object |
||
349 | * @obj: obj in question |
||
350 | * |
||
351 | * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). |
||
352 | */ |
||
353 | #if 0 |
||
354 | void |
||
355 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) |
||
356 | { |
||
357 | struct drm_device *dev = obj->dev; |
||
358 | struct drm_gem_mm *mm = dev->mm_private; |
||
359 | |||
4104 | Serge | 360 | drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node); |
3260 | Serge | 361 | } |
362 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); |
||
363 | |||
364 | /** |
||
4104 | Serge | 365 | * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object |
3260 | Serge | 366 | * @obj: obj in question |
4104 | Serge | 367 | * @size: the virtual size |
3260 | Serge | 368 | * |
369 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
||
370 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
||
371 | * up the object based on the offset and sets up the various memory mapping |
||
372 | * structures. |
||
373 | * |
||
4104 | Serge | 374 | * This routine allocates and attaches a fake offset for @obj, in cases where |
375 | * the virtual size differs from the physical size (ie. obj->size). Otherwise |
||
376 | * just use drm_gem_create_mmap_offset(). |
||
3260 | Serge | 377 | */ |
378 | int |
||
4104 | Serge | 379 | drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) |
3260 | Serge | 380 | { |
381 | struct drm_device *dev = obj->dev; |
||
382 | struct drm_gem_mm *mm = dev->mm_private; |
||
383 | |||
384 | /* Set the object up for mmap'ing */ |
||
385 | list = &obj->map_list; |
||
386 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); |
||
387 | if (!list->map) |
||
388 | return -ENOMEM; |
||
389 | |||
390 | map = list->map; |
||
391 | map->type = _DRM_GEM; |
||
392 | map->size = obj->size; |
||
393 | map->handle = obj; |
||
394 | |||
395 | /* Get a DRM GEM mmap offset allocated... */ |
||
396 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, |
||
397 | obj->size / PAGE_SIZE, 0, false); |
||
398 | |||
399 | if (!list->file_offset_node) { |
||
400 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); |
||
401 | ret = -ENOSPC; |
||
402 | goto out_free_list; |
||
403 | } |
||
404 | |||
405 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, |
||
406 | obj->size / PAGE_SIZE, 0); |
||
407 | if (!list->file_offset_node) { |
||
408 | ret = -ENOMEM; |
||
409 | goto out_free_list; |
||
410 | } |
||
411 | |||
412 | list->hash.key = list->file_offset_node->start; |
||
413 | ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); |
||
414 | if (ret) { |
||
415 | DRM_ERROR("failed to add to map hash\n"); |
||
416 | goto out_free_mm; |
||
417 | } |
||
418 | |||
419 | return 0; |
||
420 | |||
421 | out_free_mm: |
||
422 | drm_mm_put_block(list->file_offset_node); |
||
423 | out_free_list: |
||
424 | kfree(list->map); |
||
425 | list->map = NULL; |
||
426 | |||
427 | return ret; |
||
428 | } |
||
429 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); |
||
430 | #endif |
||
431 | |||
432 | /** Returns a reference to the object named by the handle. */ |
||
433 | struct drm_gem_object * |
||
434 | drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, |
||
435 | u32 handle) |
||
436 | { |
||
437 | struct drm_gem_object *obj; |
||
438 | |||
3480 | Serge | 439 | if(handle == -2) |
440 | printf("%s handle %d\n", __FUNCTION__, handle); |
||
441 | |||
3260 | Serge | 442 | spin_lock(&filp->table_lock); |
443 | |||
444 | /* Check if we currently have a reference on the object */ |
||
445 | obj = idr_find(&filp->object_idr, handle); |
||
446 | if (obj == NULL) { |
||
447 | spin_unlock(&filp->table_lock); |
||
448 | return NULL; |
||
449 | } |
||
450 | |||
451 | drm_gem_object_reference(obj); |
||
452 | |||
453 | spin_unlock(&filp->table_lock); |
||
454 | |||
455 | return obj; |
||
456 | } |
||
457 | EXPORT_SYMBOL(drm_gem_object_lookup); |
||
458 | |||
459 | /** |
||
460 | * Releases the handle to an mm object. |
||
461 | */ |
||
462 | int |
||
463 | drm_gem_close_ioctl(struct drm_device *dev, void *data, |
||
464 | struct drm_file *file_priv) |
||
465 | { |
||
466 | struct drm_gem_close *args = data; |
||
467 | int ret; |
||
468 | |||
469 | ret = drm_gem_handle_delete(file_priv, args->handle); |
||
470 | |||
471 | return ret; |
||
472 | } |
||
473 | |||
474 | /** |
||
475 | * Create a global name for an object, returning the name. |
||
476 | * |
||
477 | * Note that the name does not hold a reference; when the object |
||
478 | * is freed, the name goes away. |
||
479 | */ |
||
480 | int |
||
481 | drm_gem_flink_ioctl(struct drm_device *dev, void *data, |
||
482 | struct drm_file *file_priv) |
||
483 | { |
||
484 | struct drm_gem_flink *args = data; |
||
485 | struct drm_gem_object *obj; |
||
486 | int ret; |
||
487 | |||
488 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
||
489 | return -ENODEV; |
||
490 | |||
491 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
||
492 | if (obj == NULL) |
||
493 | return -ENOENT; |
||
494 | |||
4104 | Serge | 495 | mutex_lock(&dev->object_name_lock); |
3480 | Serge | 496 | idr_preload(GFP_KERNEL); |
4104 | Serge | 497 | /* prevent races with concurrent gem_close. */ |
498 | if (obj->handle_count == 0) { |
||
499 | ret = -ENOENT; |
||
500 | goto err; |
||
501 | } |
||
502 | |||
3260 | Serge | 503 | if (!obj->name) { |
3480 | Serge | 504 | ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); |
505 | if (ret < 0) |
||
3260 | Serge | 506 | goto err; |
507 | |||
4075 | Serge | 508 | obj->name = ret; |
509 | |||
3260 | Serge | 510 | /* Allocate a reference for the name table. */ |
511 | drm_gem_object_reference(obj); |
||
4075 | Serge | 512 | } |
513 | |||
3260 | Serge | 514 | args->name = (uint64_t) obj->name; |
515 | ret = 0; |
||
516 | |||
517 | err: |
||
4075 | Serge | 518 | idr_preload_end(); |
4104 | Serge | 519 | mutex_unlock(&dev->object_name_lock); |
3260 | Serge | 520 | drm_gem_object_unreference_unlocked(obj); |
521 | return ret; |
||
522 | } |
||
523 | |||
524 | /** |
||
525 | * Open an object using the global name, returning a handle and the size. |
||
526 | * |
||
527 | * This handle (of course) holds a reference to the object, so the object |
||
528 | * will not go away until the handle is deleted. |
||
529 | */ |
||
530 | int |
||
531 | drm_gem_open_ioctl(struct drm_device *dev, void *data, |
||
532 | struct drm_file *file_priv) |
||
533 | { |
||
534 | struct drm_gem_open *args = data; |
||
535 | struct drm_gem_object *obj; |
||
536 | int ret; |
||
537 | u32 handle; |
||
538 | |||
539 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
||
540 | return -ENODEV; |
||
541 | |||
3480 | Serge | 542 | if(handle == -2) |
543 | printf("%s handle %d\n", __FUNCTION__, handle); |
||
544 | |||
4104 | Serge | 545 | mutex_lock(&dev->object_name_lock); |
3260 | Serge | 546 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
4104 | Serge | 547 | if (obj) { |
3260 | Serge | 548 | drm_gem_object_reference(obj); |
4104 | Serge | 549 | } else { |
550 | mutex_unlock(&dev->object_name_lock); |
||
3260 | Serge | 551 | return -ENOENT; |
4104 | Serge | 552 | } |
3260 | Serge | 553 | |
4104 | Serge | 554 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ |
555 | ret = drm_gem_handle_create_tail(file_priv, obj, &handle); |
||
3260 | Serge | 556 | drm_gem_object_unreference_unlocked(obj); |
557 | if (ret) |
||
558 | return ret; |
||
559 | |||
560 | args->handle = handle; |
||
561 | args->size = obj->size; |
||
562 | |||
563 | return 0; |
||
564 | } |
||
565 | |||
4246 | Serge | 566 | #if 0 |
3260 | Serge | 567 | /** |
568 | * Called at device open time, sets up the structure for handling refcounting |
||
569 | * of mm objects. |
||
570 | */ |
||
571 | void |
||
572 | drm_gem_open(struct drm_device *dev, struct drm_file *file_private) |
||
573 | { |
||
574 | idr_init(&file_private->object_idr); |
||
575 | spin_lock_init(&file_private->table_lock); |
||
576 | } |
||
577 | |||
578 | /** |
||
579 | * Called at device close to release the file's |
||
580 | * handle references on objects. |
||
581 | */ |
||
582 | static int |
||
583 | drm_gem_object_release_handle(int id, void *ptr, void *data) |
||
584 | { |
||
585 | struct drm_file *file_priv = data; |
||
586 | struct drm_gem_object *obj = ptr; |
||
587 | struct drm_device *dev = obj->dev; |
||
588 | |||
589 | drm_gem_remove_prime_handles(obj, file_priv); |
||
4104 | Serge | 590 | drm_vma_node_revoke(&obj->vma_node, file_priv->filp); |
3260 | Serge | 591 | |
592 | if (dev->driver->gem_close_object) |
||
593 | dev->driver->gem_close_object(obj, file_priv); |
||
594 | |||
595 | drm_gem_object_handle_unreference_unlocked(obj); |
||
596 | |||
597 | return 0; |
||
598 | } |
||
599 | |||
600 | /** |
||
601 | * Called at close time when the filp is going away. |
||
602 | * |
||
603 | * Releases any remaining references on objects by this filp. |
||
604 | */ |
||
605 | void |
||
606 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) |
||
607 | { |
||
608 | idr_for_each(&file_private->object_idr, |
||
609 | &drm_gem_object_release_handle, file_private); |
||
610 | idr_destroy(&file_private->object_idr); |
||
611 | } |
||
612 | #endif |
||
613 | |||
614 | void |
||
615 | drm_gem_object_release(struct drm_gem_object *obj) |
||
616 | { |
||
617 | if (obj->filp) |
||
618 | free(obj->filp); |
||
619 | } |
||
620 | EXPORT_SYMBOL(drm_gem_object_release); |
||
621 | |||
622 | /** |
||
623 | * Called after the last reference to the object has been lost. |
||
624 | * Must be called holding struct_ mutex |
||
625 | * |
||
626 | * Frees the object |
||
627 | */ |
||
628 | void |
||
629 | drm_gem_object_free(struct kref *kref) |
||
630 | { |
||
631 | struct drm_gem_object *obj = (struct drm_gem_object *) kref; |
||
632 | struct drm_device *dev = obj->dev; |
||
633 | |||
634 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
||
635 | |||
636 | if (dev->driver->gem_free_object != NULL) |
||
637 | dev->driver->gem_free_object(obj); |
||
638 | } |
||
639 | EXPORT_SYMBOL(drm_gem_object_free); |
||
640 | |||
641 | |||
642 | #if 0 |
||
643 | void drm_gem_vm_open(struct vm_area_struct *vma) |
||
644 | { |
||
645 | struct drm_gem_object *obj = vma->vm_private_data; |
||
646 | |||
647 | drm_gem_object_reference(obj); |
||
648 | |||
649 | mutex_lock(&obj->dev->struct_mutex); |
||
650 | drm_vm_open_locked(obj->dev, vma); |
||
651 | mutex_unlock(&obj->dev->struct_mutex); |
||
652 | } |
||
653 | EXPORT_SYMBOL(drm_gem_vm_open); |
||
654 | |||
655 | void drm_gem_vm_close(struct vm_area_struct *vma) |
||
656 | { |
||
657 | struct drm_gem_object *obj = vma->vm_private_data; |
||
658 | struct drm_device *dev = obj->dev; |
||
659 | |||
660 | mutex_lock(&dev->struct_mutex); |
||
661 | drm_vm_close_locked(obj->dev, vma); |
||
662 | drm_gem_object_unreference(obj); |
||
663 | mutex_unlock(&dev->struct_mutex); |
||
664 | } |
||
665 | EXPORT_SYMBOL(drm_gem_vm_close); |
||
666 | |||
667 | #endif>> |
||
668 |