Rev 3260 | Rev 3298 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
3260 | Serge | 1 | /* |
2 | * Copyright © 2008 Intel Corporation |
||
3 | * |
||
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
5 | * copy of this software and associated documentation files (the "Software"), |
||
6 | * to deal in the Software without restriction, including without limitation |
||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
8 | * and/or sell copies of the Software, and to permit persons to whom the |
||
9 | * Software is furnished to do so, subject to the following conditions: |
||
10 | * |
||
11 | * The above copyright notice and this permission notice (including the next |
||
12 | * paragraph) shall be included in all copies or substantial portions of the |
||
13 | * Software. |
||
14 | * |
||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
||
21 | * IN THE SOFTWARE. |
||
22 | * |
||
23 | * Authors: |
||
24 | * Eric Anholt |
||
25 | * |
||
26 | */ |
||
27 | |||
28 | #include |
||
29 | #include |
||
30 | #include |
||
31 | #include |
||
32 | #include |
||
33 | #include |
||
34 | #include |
||
35 | |||
36 | /** @file drm_gem.c |
||
37 | * |
||
38 | * This file provides some of the base ioctls and library routines for |
||
39 | * the graphics memory manager implemented by each device driver. |
||
40 | * |
||
41 | * Because various devices have different requirements in terms of |
||
42 | * synchronization and migration strategies, implementing that is left up to |
||
43 | * the driver, and all that the general API provides should be generic -- |
||
44 | * allocating objects, reading/writing data with the cpu, freeing objects. |
||
45 | * Even there, platform-dependent optimizations for reading/writing data with |
||
46 | * the CPU mean we'll likely hook those out to driver-specific calls. However, |
||
47 | * the DRI2 implementation wants to have at least allocate/mmap be generic. |
||
48 | * |
||
49 | * The goal was to have swap-backed object allocation managed through |
||
50 | * struct file. However, file descriptors as handles to a struct file have |
||
51 | * two major failings: |
||
52 | * - Process limits prevent more than 1024 or so being used at a time by |
||
53 | * default. |
||
54 | * - Inability to allocate high fds will aggravate the X Server's select() |
||
55 | * handling, and likely that of many GL client applications as well. |
||
56 | * |
||
57 | * This led to a plan of using our own integer IDs (called handles, following |
||
58 | * DRM terminology) to mimic fds, and implement the fd syscalls we need as |
||
59 | * ioctls. The objects themselves will still include the struct file so |
||
60 | * that we can transition to fds if the required kernel infrastructure shows |
||
61 | * up at a later date, and as our interface with shmfs for memory allocation. |
||
62 | */ |
||
63 | |||
64 | /* |
||
65 | * We make up offsets for buffer objects so we can recognize them at |
||
66 | * mmap time. |
||
67 | */ |
||
68 | |||
69 | /* pgoff in mmap is an unsigned long, so we need to make sure that |
||
70 | * the faked up offset will fit |
||
71 | */ |
||
72 | |||
73 | #if BITS_PER_LONG == 64 |
||
74 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) |
||
75 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) |
||
76 | #else |
||
77 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) |
||
78 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) |
||
79 | #endif |
||
80 | |||
81 | #if 0 |
||
82 | /** |
||
83 | * Initialize the GEM device fields |
||
84 | */ |
||
85 | |||
86 | int |
||
87 | drm_gem_init(struct drm_device *dev) |
||
88 | { |
||
89 | struct drm_gem_mm *mm; |
||
90 | |||
91 | spin_lock_init(&dev->object_name_lock); |
||
92 | idr_init(&dev->object_name_idr); |
||
93 | |||
94 | mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); |
||
95 | if (!mm) { |
||
96 | DRM_ERROR("out of memory\n"); |
||
97 | return -ENOMEM; |
||
98 | } |
||
99 | |||
100 | dev->mm_private = mm; |
||
101 | |||
102 | if (drm_ht_create(&mm->offset_hash, 12)) { |
||
103 | kfree(mm); |
||
104 | return -ENOMEM; |
||
105 | } |
||
106 | |||
107 | if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, |
||
108 | DRM_FILE_PAGE_OFFSET_SIZE)) { |
||
109 | drm_ht_remove(&mm->offset_hash); |
||
110 | kfree(mm); |
||
111 | return -ENOMEM; |
||
112 | } |
||
113 | |||
114 | return 0; |
||
115 | } |
||
116 | |||
117 | void |
||
118 | drm_gem_destroy(struct drm_device *dev) |
||
119 | { |
||
120 | struct drm_gem_mm *mm = dev->mm_private; |
||
121 | |||
122 | drm_mm_takedown(&mm->offset_manager); |
||
123 | drm_ht_remove(&mm->offset_hash); |
||
124 | kfree(mm); |
||
125 | dev->mm_private = NULL; |
||
126 | } |
||
127 | #endif |
||
128 | |||
129 | /** |
||
130 | * Initialize an already allocated GEM object of the specified size with |
||
131 | * shmfs backing store. |
||
132 | */ |
||
133 | int drm_gem_object_init(struct drm_device *dev, |
||
134 | struct drm_gem_object *obj, size_t size) |
||
135 | { |
||
136 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
||
137 | |||
138 | obj->dev = dev; |
||
139 | obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
||
140 | if (IS_ERR(obj->filp)) |
||
141 | return PTR_ERR(obj->filp); |
||
142 | |||
143 | kref_init(&obj->refcount); |
||
144 | atomic_set(&obj->handle_count, 0); |
||
145 | obj->size = size; |
||
146 | |||
147 | return 0; |
||
148 | } |
||
149 | EXPORT_SYMBOL(drm_gem_object_init); |
||
150 | |||
151 | /** |
||
152 | * Initialize an already allocated GEM object of the specified size with |
||
153 | * no GEM provided backing store. Instead the caller is responsible for |
||
154 | * backing the object and handling it. |
||
155 | */ |
||
156 | int drm_gem_private_object_init(struct drm_device *dev, |
||
157 | struct drm_gem_object *obj, size_t size) |
||
158 | { |
||
159 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
||
160 | |||
161 | obj->dev = dev; |
||
162 | obj->filp = NULL; |
||
163 | |||
164 | kref_init(&obj->refcount); |
||
165 | atomic_set(&obj->handle_count, 0); |
||
166 | obj->size = size; |
||
167 | |||
168 | return 0; |
||
169 | } |
||
170 | EXPORT_SYMBOL(drm_gem_private_object_init); |
||
171 | |||
172 | /** |
||
173 | * Allocate a GEM object of the specified size with shmfs backing store |
||
174 | */ |
||
175 | struct drm_gem_object * |
||
176 | drm_gem_object_alloc(struct drm_device *dev, size_t size) |
||
177 | { |
||
178 | struct drm_gem_object *obj; |
||
179 | |||
180 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
||
181 | if (!obj) |
||
182 | goto free; |
||
183 | |||
184 | if (drm_gem_object_init(dev, obj, size) != 0) |
||
185 | goto free; |
||
186 | |||
187 | if (dev->driver->gem_init_object != NULL && |
||
188 | dev->driver->gem_init_object(obj) != 0) { |
||
189 | goto fput; |
||
190 | } |
||
191 | return obj; |
||
192 | fput: |
||
193 | /* Object_init mangles the global counters - readjust them. */ |
||
194 | free(obj->filp); |
||
195 | free: |
||
196 | kfree(obj); |
||
197 | return NULL; |
||
198 | } |
||
199 | EXPORT_SYMBOL(drm_gem_object_alloc); |
||
200 | |||
201 | |||
202 | /** |
||
203 | * Removes the mapping from handle to filp for this object. |
||
204 | */ |
||
205 | int |
||
206 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
||
207 | { |
||
208 | struct drm_device *dev; |
||
209 | struct drm_gem_object *obj; |
||
210 | |||
211 | /* This is gross. The idr system doesn't let us try a delete and |
||
212 | * return an error code. It just spews if you fail at deleting. |
||
213 | * So, we have to grab a lock around finding the object and then |
||
214 | * doing the delete on it and dropping the refcount, or the user |
||
215 | * could race us to double-decrement the refcount and cause a |
||
216 | * use-after-free later. Given the frequency of our handle lookups, |
||
217 | * we may want to use ida for number allocation and a hash table |
||
218 | * for the pointers, anyway. |
||
219 | */ |
||
3290 | Serge | 220 | |
3260 | Serge | 221 | spin_lock(&filp->table_lock); |
222 | |||
223 | /* Check if we currently have a reference on the object */ |
||
224 | obj = idr_find(&filp->object_idr, handle); |
||
225 | if (obj == NULL) { |
||
226 | spin_unlock(&filp->table_lock); |
||
227 | return -EINVAL; |
||
228 | } |
||
229 | dev = obj->dev; |
||
230 | |||
3290 | Serge | 231 | printf("%s handle %d obj %p\n", __FUNCTION__, handle, obj); |
232 | |||
3260 | Serge | 233 | /* Release reference and decrement refcount. */ |
234 | idr_remove(&filp->object_idr, handle); |
||
235 | spin_unlock(&filp->table_lock); |
||
236 | |||
237 | // drm_gem_remove_prime_handles(obj, filp); |
||
238 | |||
239 | if (dev->driver->gem_close_object) |
||
240 | dev->driver->gem_close_object(obj, filp); |
||
241 | drm_gem_object_handle_unreference_unlocked(obj); |
||
242 | |||
243 | return 0; |
||
244 | } |
||
245 | EXPORT_SYMBOL(drm_gem_handle_delete); |
||
246 | |||
247 | /** |
||
248 | * Create a handle for this object. This adds a handle reference |
||
249 | * to the object, which includes a regular reference count. Callers |
||
250 | * will likely want to dereference the object afterwards. |
||
251 | */ |
||
252 | int |
||
253 | drm_gem_handle_create(struct drm_file *file_priv, |
||
254 | struct drm_gem_object *obj, |
||
255 | u32 *handlep) |
||
256 | { |
||
257 | struct drm_device *dev = obj->dev; |
||
258 | int ret; |
||
259 | |||
260 | /* |
||
261 | * Get the user-visible handle using idr. |
||
262 | */ |
||
263 | again: |
||
264 | /* ensure there is space available to allocate a handle */ |
||
265 | if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) |
||
266 | return -ENOMEM; |
||
267 | |||
268 | /* do the allocation under our spinlock */ |
||
269 | spin_lock(&file_priv->table_lock); |
||
270 | ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep); |
||
271 | spin_unlock(&file_priv->table_lock); |
||
272 | if (ret == -EAGAIN) |
||
273 | goto again; |
||
274 | else if (ret) |
||
275 | return ret; |
||
276 | |||
277 | drm_gem_object_handle_reference(obj); |
||
278 | |||
279 | if (dev->driver->gem_open_object) { |
||
280 | ret = dev->driver->gem_open_object(obj, file_priv); |
||
281 | if (ret) { |
||
282 | drm_gem_handle_delete(file_priv, *handlep); |
||
283 | return ret; |
||
284 | } |
||
285 | } |
||
286 | |||
287 | return 0; |
||
288 | } |
||
289 | EXPORT_SYMBOL(drm_gem_handle_create); |
||
290 | |||
291 | |||
292 | /** |
||
293 | * drm_gem_free_mmap_offset - release a fake mmap offset for an object |
||
294 | * @obj: obj in question |
||
295 | * |
||
296 | * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). |
||
297 | */ |
||
298 | #if 0 |
||
299 | void |
||
300 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) |
||
301 | { |
||
302 | struct drm_device *dev = obj->dev; |
||
303 | struct drm_gem_mm *mm = dev->mm_private; |
||
304 | struct drm_map_list *list = &obj->map_list; |
||
305 | |||
306 | drm_ht_remove_item(&mm->offset_hash, &list->hash); |
||
307 | drm_mm_put_block(list->file_offset_node); |
||
308 | kfree(list->map); |
||
309 | list->map = NULL; |
||
310 | } |
||
311 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); |
||
312 | |||
313 | /** |
||
314 | * drm_gem_create_mmap_offset - create a fake mmap offset for an object |
||
315 | * @obj: obj in question |
||
316 | * |
||
317 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
||
318 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
||
319 | * up the object based on the offset and sets up the various memory mapping |
||
320 | * structures. |
||
321 | * |
||
322 | * This routine allocates and attaches a fake offset for @obj. |
||
323 | */ |
||
324 | int |
||
325 | drm_gem_create_mmap_offset(struct drm_gem_object *obj) |
||
326 | { |
||
327 | struct drm_device *dev = obj->dev; |
||
328 | struct drm_gem_mm *mm = dev->mm_private; |
||
329 | struct drm_map_list *list; |
||
330 | struct drm_local_map *map; |
||
331 | int ret; |
||
332 | |||
333 | /* Set the object up for mmap'ing */ |
||
334 | list = &obj->map_list; |
||
335 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); |
||
336 | if (!list->map) |
||
337 | return -ENOMEM; |
||
338 | |||
339 | map = list->map; |
||
340 | map->type = _DRM_GEM; |
||
341 | map->size = obj->size; |
||
342 | map->handle = obj; |
||
343 | |||
344 | /* Get a DRM GEM mmap offset allocated... */ |
||
345 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, |
||
346 | obj->size / PAGE_SIZE, 0, false); |
||
347 | |||
348 | if (!list->file_offset_node) { |
||
349 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); |
||
350 | ret = -ENOSPC; |
||
351 | goto out_free_list; |
||
352 | } |
||
353 | |||
354 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, |
||
355 | obj->size / PAGE_SIZE, 0); |
||
356 | if (!list->file_offset_node) { |
||
357 | ret = -ENOMEM; |
||
358 | goto out_free_list; |
||
359 | } |
||
360 | |||
361 | list->hash.key = list->file_offset_node->start; |
||
362 | ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); |
||
363 | if (ret) { |
||
364 | DRM_ERROR("failed to add to map hash\n"); |
||
365 | goto out_free_mm; |
||
366 | } |
||
367 | |||
368 | return 0; |
||
369 | |||
370 | out_free_mm: |
||
371 | drm_mm_put_block(list->file_offset_node); |
||
372 | out_free_list: |
||
373 | kfree(list->map); |
||
374 | list->map = NULL; |
||
375 | |||
376 | return ret; |
||
377 | } |
||
378 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); |
||
379 | #endif |
||
380 | |||
381 | /** Returns a reference to the object named by the handle. */ |
||
382 | struct drm_gem_object * |
||
383 | drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, |
||
384 | u32 handle) |
||
385 | { |
||
386 | struct drm_gem_object *obj; |
||
387 | |||
388 | spin_lock(&filp->table_lock); |
||
389 | |||
390 | /* Check if we currently have a reference on the object */ |
||
391 | obj = idr_find(&filp->object_idr, handle); |
||
392 | if (obj == NULL) { |
||
393 | spin_unlock(&filp->table_lock); |
||
394 | return NULL; |
||
395 | } |
||
396 | |||
397 | drm_gem_object_reference(obj); |
||
398 | |||
399 | spin_unlock(&filp->table_lock); |
||
400 | |||
401 | return obj; |
||
402 | } |
||
403 | EXPORT_SYMBOL(drm_gem_object_lookup); |
||
404 | |||
405 | /** |
||
406 | * Releases the handle to an mm object. |
||
407 | */ |
||
408 | int |
||
409 | drm_gem_close_ioctl(struct drm_device *dev, void *data, |
||
410 | struct drm_file *file_priv) |
||
411 | { |
||
412 | struct drm_gem_close *args = data; |
||
413 | int ret; |
||
414 | |||
415 | ret = drm_gem_handle_delete(file_priv, args->handle); |
||
416 | |||
417 | return ret; |
||
418 | } |
||
419 | |||
420 | /** |
||
421 | * Create a global name for an object, returning the name. |
||
422 | * |
||
423 | * Note that the name does not hold a reference; when the object |
||
424 | * is freed, the name goes away. |
||
425 | */ |
||
426 | |||
427 | #if 0 |
||
428 | int |
||
429 | drm_gem_flink_ioctl(struct drm_device *dev, void *data, |
||
430 | struct drm_file *file_priv) |
||
431 | { |
||
432 | struct drm_gem_flink *args = data; |
||
433 | struct drm_gem_object *obj; |
||
434 | int ret; |
||
435 | |||
436 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
||
437 | return -ENODEV; |
||
438 | |||
439 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
||
440 | if (obj == NULL) |
||
441 | return -ENOENT; |
||
442 | |||
443 | again: |
||
444 | if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) { |
||
445 | ret = -ENOMEM; |
||
446 | goto err; |
||
447 | } |
||
448 | |||
449 | spin_lock(&dev->object_name_lock); |
||
450 | if (!obj->name) { |
||
451 | ret = idr_get_new_above(&dev->object_name_idr, obj, 1, |
||
452 | &obj->name); |
||
453 | args->name = (uint64_t) obj->name; |
||
454 | spin_unlock(&dev->object_name_lock); |
||
455 | |||
456 | if (ret == -EAGAIN) |
||
457 | goto again; |
||
458 | else if (ret) |
||
459 | goto err; |
||
460 | |||
461 | /* Allocate a reference for the name table. */ |
||
462 | drm_gem_object_reference(obj); |
||
463 | } else { |
||
464 | args->name = (uint64_t) obj->name; |
||
465 | spin_unlock(&dev->object_name_lock); |
||
466 | ret = 0; |
||
467 | } |
||
468 | |||
469 | err: |
||
470 | drm_gem_object_unreference_unlocked(obj); |
||
471 | return ret; |
||
472 | } |
||
473 | |||
474 | /** |
||
475 | * Open an object using the global name, returning a handle and the size. |
||
476 | * |
||
477 | * This handle (of course) holds a reference to the object, so the object |
||
478 | * will not go away until the handle is deleted. |
||
479 | */ |
||
480 | int |
||
481 | drm_gem_open_ioctl(struct drm_device *dev, void *data, |
||
482 | struct drm_file *file_priv) |
||
483 | { |
||
484 | struct drm_gem_open *args = data; |
||
485 | struct drm_gem_object *obj; |
||
486 | int ret; |
||
487 | u32 handle; |
||
488 | |||
489 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
||
490 | return -ENODEV; |
||
491 | |||
492 | spin_lock(&dev->object_name_lock); |
||
493 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
||
494 | if (obj) |
||
495 | drm_gem_object_reference(obj); |
||
496 | spin_unlock(&dev->object_name_lock); |
||
497 | if (!obj) |
||
498 | return -ENOENT; |
||
499 | |||
500 | ret = drm_gem_handle_create(file_priv, obj, &handle); |
||
501 | drm_gem_object_unreference_unlocked(obj); |
||
502 | if (ret) |
||
503 | return ret; |
||
504 | |||
505 | args->handle = handle; |
||
506 | args->size = obj->size; |
||
507 | |||
508 | return 0; |
||
509 | } |
||
510 | |||
511 | /** |
||
512 | * Called at device open time, sets up the structure for handling refcounting |
||
513 | * of mm objects. |
||
514 | */ |
||
515 | void |
||
516 | drm_gem_open(struct drm_device *dev, struct drm_file *file_private) |
||
517 | { |
||
518 | idr_init(&file_private->object_idr); |
||
519 | spin_lock_init(&file_private->table_lock); |
||
520 | } |
||
521 | |||
522 | /** |
||
523 | * Called at device close to release the file's |
||
524 | * handle references on objects. |
||
525 | */ |
||
526 | static int |
||
527 | drm_gem_object_release_handle(int id, void *ptr, void *data) |
||
528 | { |
||
529 | struct drm_file *file_priv = data; |
||
530 | struct drm_gem_object *obj = ptr; |
||
531 | struct drm_device *dev = obj->dev; |
||
532 | |||
533 | drm_gem_remove_prime_handles(obj, file_priv); |
||
534 | |||
535 | if (dev->driver->gem_close_object) |
||
536 | dev->driver->gem_close_object(obj, file_priv); |
||
537 | |||
538 | drm_gem_object_handle_unreference_unlocked(obj); |
||
539 | |||
540 | return 0; |
||
541 | } |
||
542 | |||
543 | /** |
||
544 | * Called at close time when the filp is going away. |
||
545 | * |
||
546 | * Releases any remaining references on objects by this filp. |
||
547 | */ |
||
548 | void |
||
549 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) |
||
550 | { |
||
551 | idr_for_each(&file_private->object_idr, |
||
552 | &drm_gem_object_release_handle, file_private); |
||
553 | |||
554 | idr_remove_all(&file_private->object_idr); |
||
555 | idr_destroy(&file_private->object_idr); |
||
556 | } |
||
557 | #endif |
||
558 | |||
559 | void |
||
560 | drm_gem_object_release(struct drm_gem_object *obj) |
||
561 | { |
||
562 | if (obj->filp) |
||
563 | free(obj->filp); |
||
564 | } |
||
565 | EXPORT_SYMBOL(drm_gem_object_release); |
||
566 | |||
567 | /** |
||
568 | * Called after the last reference to the object has been lost. |
||
569 | * Must be called holding struct_ mutex |
||
570 | * |
||
571 | * Frees the object |
||
572 | */ |
||
573 | void |
||
574 | drm_gem_object_free(struct kref *kref) |
||
575 | { |
||
576 | struct drm_gem_object *obj = (struct drm_gem_object *) kref; |
||
577 | struct drm_device *dev = obj->dev; |
||
578 | |||
579 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
||
580 | |||
581 | if (dev->driver->gem_free_object != NULL) |
||
582 | dev->driver->gem_free_object(obj); |
||
583 | } |
||
584 | EXPORT_SYMBOL(drm_gem_object_free); |
||
585 | |||
586 | static void drm_gem_object_ref_bug(struct kref *list_kref) |
||
587 | { |
||
588 | BUG(); |
||
589 | } |
||
590 | |||
591 | /** |
||
592 | * Called after the last handle to the object has been closed |
||
593 | * |
||
594 | * Removes any name for the object. Note that this must be |
||
595 | * called before drm_gem_object_free or we'll be touching |
||
596 | * freed memory |
||
597 | */ |
||
598 | void drm_gem_object_handle_free(struct drm_gem_object *obj) |
||
599 | { |
||
600 | struct drm_device *dev = obj->dev; |
||
601 | |||
602 | /* Remove any name for this object */ |
||
603 | spin_lock(&dev->object_name_lock); |
||
604 | if (obj->name) { |
||
605 | idr_remove(&dev->object_name_idr, obj->name); |
||
606 | obj->name = 0; |
||
607 | spin_unlock(&dev->object_name_lock); |
||
608 | /* |
||
609 | * The object name held a reference to this object, drop |
||
610 | * that now. |
||
611 | * |
||
612 | * This cannot be the last reference, since the handle holds one too. |
||
613 | */ |
||
614 | kref_put(&obj->refcount, drm_gem_object_ref_bug); |
||
615 | } else |
||
616 | spin_unlock(&dev->object_name_lock); |
||
617 | |||
618 | } |
||
619 | EXPORT_SYMBOL(drm_gem_object_handle_free); |
||
620 | |||
621 | #if 0 |
||
622 | void drm_gem_vm_open(struct vm_area_struct *vma) |
||
623 | { |
||
624 | struct drm_gem_object *obj = vma->vm_private_data; |
||
625 | |||
626 | drm_gem_object_reference(obj); |
||
627 | |||
628 | mutex_lock(&obj->dev->struct_mutex); |
||
629 | drm_vm_open_locked(obj->dev, vma); |
||
630 | mutex_unlock(&obj->dev->struct_mutex); |
||
631 | } |
||
632 | EXPORT_SYMBOL(drm_gem_vm_open); |
||
633 | |||
634 | void drm_gem_vm_close(struct vm_area_struct *vma) |
||
635 | { |
||
636 | struct drm_gem_object *obj = vma->vm_private_data; |
||
637 | struct drm_device *dev = obj->dev; |
||
638 | |||
639 | mutex_lock(&dev->struct_mutex); |
||
640 | drm_vm_close_locked(obj->dev, vma); |
||
641 | drm_gem_object_unreference(obj); |
||
642 | mutex_unlock(&dev->struct_mutex); |
||
643 | } |
||
644 | EXPORT_SYMBOL(drm_gem_vm_close); |
||
645 | |||
646 | #endif |
||
647 |