Rev 3290 | Rev 3480 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
3260 | Serge | 1 | /* |
2 | * Copyright © 2008 Intel Corporation |
||
3 | * |
||
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
5 | * copy of this software and associated documentation files (the "Software"), |
||
6 | * to deal in the Software without restriction, including without limitation |
||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
8 | * and/or sell copies of the Software, and to permit persons to whom the |
||
9 | * Software is furnished to do so, subject to the following conditions: |
||
10 | * |
||
11 | * The above copyright notice and this permission notice (including the next |
||
12 | * paragraph) shall be included in all copies or substantial portions of the |
||
13 | * Software. |
||
14 | * |
||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
||
21 | * IN THE SOFTWARE. |
||
22 | * |
||
23 | * Authors: |
||
24 | * Eric Anholt |
||
25 | * |
||
26 | */ |
||
27 | |||
28 | #include |
||
29 | #include |
||
30 | #include |
||
31 | #include |
||
32 | #include |
||
33 | #include |
||
34 | #include |
||
35 | |||
36 | /** @file drm_gem.c |
||
37 | * |
||
38 | * This file provides some of the base ioctls and library routines for |
||
39 | * the graphics memory manager implemented by each device driver. |
||
40 | * |
||
41 | * Because various devices have different requirements in terms of |
||
42 | * synchronization and migration strategies, implementing that is left up to |
||
43 | * the driver, and all that the general API provides should be generic -- |
||
44 | * allocating objects, reading/writing data with the cpu, freeing objects. |
||
45 | * Even there, platform-dependent optimizations for reading/writing data with |
||
46 | * the CPU mean we'll likely hook those out to driver-specific calls. However, |
||
47 | * the DRI2 implementation wants to have at least allocate/mmap be generic. |
||
48 | * |
||
49 | * The goal was to have swap-backed object allocation managed through |
||
50 | * struct file. However, file descriptors as handles to a struct file have |
||
51 | * two major failings: |
||
52 | * - Process limits prevent more than 1024 or so being used at a time by |
||
53 | * default. |
||
54 | * - Inability to allocate high fds will aggravate the X Server's select() |
||
55 | * handling, and likely that of many GL client applications as well. |
||
56 | * |
||
57 | * This led to a plan of using our own integer IDs (called handles, following |
||
58 | * DRM terminology) to mimic fds, and implement the fd syscalls we need as |
||
59 | * ioctls. The objects themselves will still include the struct file so |
||
60 | * that we can transition to fds if the required kernel infrastructure shows |
||
61 | * up at a later date, and as our interface with shmfs for memory allocation. |
||
62 | */ |
||
63 | |||
64 | /* |
||
65 | * We make up offsets for buffer objects so we can recognize them at |
||
66 | * mmap time. |
||
67 | */ |
||
68 | |||
69 | /* pgoff in mmap is an unsigned long, so we need to make sure that |
||
70 | * the faked up offset will fit |
||
71 | */ |
||
72 | |||
73 | #if BITS_PER_LONG == 64 |
||
74 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) |
||
75 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) |
||
76 | #else |
||
77 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) |
||
78 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) |
||
79 | #endif |
||
80 | |||
81 | #if 0 |
||
82 | /** |
||
83 | * Initialize the GEM device fields |
||
84 | */ |
||
85 | |||
86 | int |
||
87 | drm_gem_init(struct drm_device *dev) |
||
88 | { |
||
89 | struct drm_gem_mm *mm; |
||
90 | |||
91 | spin_lock_init(&dev->object_name_lock); |
||
92 | idr_init(&dev->object_name_idr); |
||
93 | |||
94 | mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); |
||
95 | if (!mm) { |
||
96 | DRM_ERROR("out of memory\n"); |
||
97 | return -ENOMEM; |
||
98 | } |
||
99 | |||
100 | dev->mm_private = mm; |
||
101 | |||
102 | if (drm_ht_create(&mm->offset_hash, 12)) { |
||
103 | kfree(mm); |
||
104 | return -ENOMEM; |
||
105 | } |
||
106 | |||
107 | if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, |
||
108 | DRM_FILE_PAGE_OFFSET_SIZE)) { |
||
109 | drm_ht_remove(&mm->offset_hash); |
||
110 | kfree(mm); |
||
111 | return -ENOMEM; |
||
112 | } |
||
113 | |||
114 | return 0; |
||
115 | } |
||
116 | |||
117 | void |
||
118 | drm_gem_destroy(struct drm_device *dev) |
||
119 | { |
||
120 | struct drm_gem_mm *mm = dev->mm_private; |
||
121 | |||
122 | drm_mm_takedown(&mm->offset_manager); |
||
123 | drm_ht_remove(&mm->offset_hash); |
||
124 | kfree(mm); |
||
125 | dev->mm_private = NULL; |
||
126 | } |
||
127 | #endif |
||
128 | |||
129 | /** |
||
130 | * Initialize an already allocated GEM object of the specified size with |
||
131 | * shmfs backing store. |
||
132 | */ |
||
133 | int drm_gem_object_init(struct drm_device *dev, |
||
134 | struct drm_gem_object *obj, size_t size) |
||
135 | { |
||
136 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
||
137 | |||
138 | obj->dev = dev; |
||
139 | obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
||
140 | if (IS_ERR(obj->filp)) |
||
141 | return PTR_ERR(obj->filp); |
||
142 | |||
143 | kref_init(&obj->refcount); |
||
144 | atomic_set(&obj->handle_count, 0); |
||
145 | obj->size = size; |
||
146 | |||
147 | return 0; |
||
148 | } |
||
149 | EXPORT_SYMBOL(drm_gem_object_init); |
||
150 | |||
151 | /** |
||
152 | * Initialize an already allocated GEM object of the specified size with |
||
153 | * no GEM provided backing store. Instead the caller is responsible for |
||
154 | * backing the object and handling it. |
||
155 | */ |
||
156 | int drm_gem_private_object_init(struct drm_device *dev, |
||
157 | struct drm_gem_object *obj, size_t size) |
||
158 | { |
||
159 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
||
160 | |||
161 | obj->dev = dev; |
||
162 | obj->filp = NULL; |
||
163 | |||
164 | kref_init(&obj->refcount); |
||
165 | atomic_set(&obj->handle_count, 0); |
||
166 | obj->size = size; |
||
167 | |||
168 | return 0; |
||
169 | } |
||
170 | EXPORT_SYMBOL(drm_gem_private_object_init); |
||
171 | |||
172 | /** |
||
173 | * Allocate a GEM object of the specified size with shmfs backing store |
||
174 | */ |
||
175 | struct drm_gem_object * |
||
176 | drm_gem_object_alloc(struct drm_device *dev, size_t size) |
||
177 | { |
||
178 | struct drm_gem_object *obj; |
||
179 | |||
180 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
||
181 | if (!obj) |
||
182 | goto free; |
||
183 | |||
184 | if (drm_gem_object_init(dev, obj, size) != 0) |
||
185 | goto free; |
||
186 | |||
187 | if (dev->driver->gem_init_object != NULL && |
||
188 | dev->driver->gem_init_object(obj) != 0) { |
||
189 | goto fput; |
||
190 | } |
||
191 | return obj; |
||
192 | fput: |
||
193 | /* Object_init mangles the global counters - readjust them. */ |
||
194 | free(obj->filp); |
||
195 | free: |
||
196 | kfree(obj); |
||
197 | return NULL; |
||
198 | } |
||
199 | EXPORT_SYMBOL(drm_gem_object_alloc); |
||
200 | |||
201 | |||
202 | /** |
||
203 | * Removes the mapping from handle to filp for this object. |
||
204 | */ |
||
205 | int |
||
206 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
||
207 | { |
||
208 | struct drm_device *dev; |
||
209 | struct drm_gem_object *obj; |
||
210 | |||
211 | /* This is gross. The idr system doesn't let us try a delete and |
||
212 | * return an error code. It just spews if you fail at deleting. |
||
213 | * So, we have to grab a lock around finding the object and then |
||
214 | * doing the delete on it and dropping the refcount, or the user |
||
215 | * could race us to double-decrement the refcount and cause a |
||
216 | * use-after-free later. Given the frequency of our handle lookups, |
||
217 | * we may want to use ida for number allocation and a hash table |
||
218 | * for the pointers, anyway. |
||
219 | */ |
||
220 | spin_lock(&filp->table_lock); |
||
221 | |||
222 | /* Check if we currently have a reference on the object */ |
||
223 | obj = idr_find(&filp->object_idr, handle); |
||
224 | if (obj == NULL) { |
||
225 | spin_unlock(&filp->table_lock); |
||
226 | return -EINVAL; |
||
227 | } |
||
228 | dev = obj->dev; |
||
229 | |||
3298 | Serge | 230 | // printf("%s handle %d obj %p\n", __FUNCTION__, handle, obj); |
3290 | Serge | 231 | |
3260 | Serge | 232 | /* Release reference and decrement refcount. */ |
233 | idr_remove(&filp->object_idr, handle); |
||
234 | spin_unlock(&filp->table_lock); |
||
235 | |||
236 | // drm_gem_remove_prime_handles(obj, filp); |
||
237 | |||
238 | if (dev->driver->gem_close_object) |
||
239 | dev->driver->gem_close_object(obj, filp); |
||
240 | drm_gem_object_handle_unreference_unlocked(obj); |
||
241 | |||
242 | return 0; |
||
243 | } |
||
244 | EXPORT_SYMBOL(drm_gem_handle_delete); |
||
245 | |||
246 | /** |
||
247 | * Create a handle for this object. This adds a handle reference |
||
248 | * to the object, which includes a regular reference count. Callers |
||
249 | * will likely want to dereference the object afterwards. |
||
250 | */ |
||
251 | int |
||
252 | drm_gem_handle_create(struct drm_file *file_priv, |
||
253 | struct drm_gem_object *obj, |
||
254 | u32 *handlep) |
||
255 | { |
||
256 | struct drm_device *dev = obj->dev; |
||
257 | int ret; |
||
258 | |||
259 | /* |
||
260 | * Get the user-visible handle using idr. |
||
261 | */ |
||
262 | again: |
||
263 | /* ensure there is space available to allocate a handle */ |
||
264 | if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) |
||
265 | return -ENOMEM; |
||
266 | |||
267 | /* do the allocation under our spinlock */ |
||
268 | spin_lock(&file_priv->table_lock); |
||
269 | ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep); |
||
270 | spin_unlock(&file_priv->table_lock); |
||
271 | if (ret == -EAGAIN) |
||
272 | goto again; |
||
273 | else if (ret) |
||
274 | return ret; |
||
275 | |||
276 | drm_gem_object_handle_reference(obj); |
||
277 | |||
278 | if (dev->driver->gem_open_object) { |
||
279 | ret = dev->driver->gem_open_object(obj, file_priv); |
||
280 | if (ret) { |
||
281 | drm_gem_handle_delete(file_priv, *handlep); |
||
282 | return ret; |
||
283 | } |
||
284 | } |
||
285 | |||
286 | return 0; |
||
287 | } |
||
288 | EXPORT_SYMBOL(drm_gem_handle_create); |
||
289 | |||
290 | |||
291 | /** |
||
292 | * drm_gem_free_mmap_offset - release a fake mmap offset for an object |
||
293 | * @obj: obj in question |
||
294 | * |
||
295 | * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). |
||
296 | */ |
||
297 | #if 0 |
||
298 | void |
||
299 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) |
||
300 | { |
||
301 | struct drm_device *dev = obj->dev; |
||
302 | struct drm_gem_mm *mm = dev->mm_private; |
||
303 | struct drm_map_list *list = &obj->map_list; |
||
304 | |||
305 | drm_ht_remove_item(&mm->offset_hash, &list->hash); |
||
306 | drm_mm_put_block(list->file_offset_node); |
||
307 | kfree(list->map); |
||
308 | list->map = NULL; |
||
309 | } |
||
310 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); |
||
311 | |||
312 | /** |
||
313 | * drm_gem_create_mmap_offset - create a fake mmap offset for an object |
||
314 | * @obj: obj in question |
||
315 | * |
||
316 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
||
317 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
||
318 | * up the object based on the offset and sets up the various memory mapping |
||
319 | * structures. |
||
320 | * |
||
321 | * This routine allocates and attaches a fake offset for @obj. |
||
322 | */ |
||
323 | int |
||
324 | drm_gem_create_mmap_offset(struct drm_gem_object *obj) |
||
325 | { |
||
326 | struct drm_device *dev = obj->dev; |
||
327 | struct drm_gem_mm *mm = dev->mm_private; |
||
328 | struct drm_map_list *list; |
||
329 | struct drm_local_map *map; |
||
330 | int ret; |
||
331 | |||
332 | /* Set the object up for mmap'ing */ |
||
333 | list = &obj->map_list; |
||
334 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); |
||
335 | if (!list->map) |
||
336 | return -ENOMEM; |
||
337 | |||
338 | map = list->map; |
||
339 | map->type = _DRM_GEM; |
||
340 | map->size = obj->size; |
||
341 | map->handle = obj; |
||
342 | |||
343 | /* Get a DRM GEM mmap offset allocated... */ |
||
344 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, |
||
345 | obj->size / PAGE_SIZE, 0, false); |
||
346 | |||
347 | if (!list->file_offset_node) { |
||
348 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); |
||
349 | ret = -ENOSPC; |
||
350 | goto out_free_list; |
||
351 | } |
||
352 | |||
353 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, |
||
354 | obj->size / PAGE_SIZE, 0); |
||
355 | if (!list->file_offset_node) { |
||
356 | ret = -ENOMEM; |
||
357 | goto out_free_list; |
||
358 | } |
||
359 | |||
360 | list->hash.key = list->file_offset_node->start; |
||
361 | ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); |
||
362 | if (ret) { |
||
363 | DRM_ERROR("failed to add to map hash\n"); |
||
364 | goto out_free_mm; |
||
365 | } |
||
366 | |||
367 | return 0; |
||
368 | |||
369 | out_free_mm: |
||
370 | drm_mm_put_block(list->file_offset_node); |
||
371 | out_free_list: |
||
372 | kfree(list->map); |
||
373 | list->map = NULL; |
||
374 | |||
375 | return ret; |
||
376 | } |
||
377 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); |
||
378 | #endif |
||
379 | |||
380 | /** Returns a reference to the object named by the handle. */ |
||
381 | struct drm_gem_object * |
||
382 | drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, |
||
383 | u32 handle) |
||
384 | { |
||
385 | struct drm_gem_object *obj; |
||
386 | |||
387 | spin_lock(&filp->table_lock); |
||
388 | |||
389 | /* Check if we currently have a reference on the object */ |
||
390 | obj = idr_find(&filp->object_idr, handle); |
||
391 | if (obj == NULL) { |
||
392 | spin_unlock(&filp->table_lock); |
||
393 | return NULL; |
||
394 | } |
||
395 | |||
396 | drm_gem_object_reference(obj); |
||
397 | |||
398 | spin_unlock(&filp->table_lock); |
||
399 | |||
400 | return obj; |
||
401 | } |
||
402 | EXPORT_SYMBOL(drm_gem_object_lookup); |
||
403 | |||
404 | /** |
||
405 | * Releases the handle to an mm object. |
||
406 | */ |
||
407 | int |
||
408 | drm_gem_close_ioctl(struct drm_device *dev, void *data, |
||
409 | struct drm_file *file_priv) |
||
410 | { |
||
411 | struct drm_gem_close *args = data; |
||
412 | int ret; |
||
413 | |||
414 | ret = drm_gem_handle_delete(file_priv, args->handle); |
||
415 | |||
416 | return ret; |
||
417 | } |
||
418 | |||
419 | /** |
||
420 | * Create a global name for an object, returning the name. |
||
421 | * |
||
422 | * Note that the name does not hold a reference; when the object |
||
423 | * is freed, the name goes away. |
||
424 | */ |
||
425 | |||
426 | #if 0 |
||
427 | int |
||
428 | drm_gem_flink_ioctl(struct drm_device *dev, void *data, |
||
429 | struct drm_file *file_priv) |
||
430 | { |
||
431 | struct drm_gem_flink *args = data; |
||
432 | struct drm_gem_object *obj; |
||
433 | int ret; |
||
434 | |||
435 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
||
436 | return -ENODEV; |
||
437 | |||
438 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
||
439 | if (obj == NULL) |
||
440 | return -ENOENT; |
||
441 | |||
442 | again: |
||
443 | if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) { |
||
444 | ret = -ENOMEM; |
||
445 | goto err; |
||
446 | } |
||
447 | |||
448 | spin_lock(&dev->object_name_lock); |
||
449 | if (!obj->name) { |
||
450 | ret = idr_get_new_above(&dev->object_name_idr, obj, 1, |
||
451 | &obj->name); |
||
452 | args->name = (uint64_t) obj->name; |
||
453 | spin_unlock(&dev->object_name_lock); |
||
454 | |||
455 | if (ret == -EAGAIN) |
||
456 | goto again; |
||
457 | else if (ret) |
||
458 | goto err; |
||
459 | |||
460 | /* Allocate a reference for the name table. */ |
||
461 | drm_gem_object_reference(obj); |
||
462 | } else { |
||
463 | args->name = (uint64_t) obj->name; |
||
464 | spin_unlock(&dev->object_name_lock); |
||
465 | ret = 0; |
||
466 | } |
||
467 | |||
468 | err: |
||
469 | drm_gem_object_unreference_unlocked(obj); |
||
470 | return ret; |
||
471 | } |
||
472 | |||
473 | /** |
||
474 | * Open an object using the global name, returning a handle and the size. |
||
475 | * |
||
476 | * This handle (of course) holds a reference to the object, so the object |
||
477 | * will not go away until the handle is deleted. |
||
478 | */ |
||
479 | int |
||
480 | drm_gem_open_ioctl(struct drm_device *dev, void *data, |
||
481 | struct drm_file *file_priv) |
||
482 | { |
||
483 | struct drm_gem_open *args = data; |
||
484 | struct drm_gem_object *obj; |
||
485 | int ret; |
||
486 | u32 handle; |
||
487 | |||
488 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
||
489 | return -ENODEV; |
||
490 | |||
491 | spin_lock(&dev->object_name_lock); |
||
492 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
||
493 | if (obj) |
||
494 | drm_gem_object_reference(obj); |
||
495 | spin_unlock(&dev->object_name_lock); |
||
496 | if (!obj) |
||
497 | return -ENOENT; |
||
498 | |||
499 | ret = drm_gem_handle_create(file_priv, obj, &handle); |
||
500 | drm_gem_object_unreference_unlocked(obj); |
||
501 | if (ret) |
||
502 | return ret; |
||
503 | |||
504 | args->handle = handle; |
||
505 | args->size = obj->size; |
||
506 | |||
507 | return 0; |
||
508 | } |
||
509 | |||
510 | /** |
||
511 | * Called at device open time, sets up the structure for handling refcounting |
||
512 | * of mm objects. |
||
513 | */ |
||
514 | void |
||
515 | drm_gem_open(struct drm_device *dev, struct drm_file *file_private) |
||
516 | { |
||
517 | idr_init(&file_private->object_idr); |
||
518 | spin_lock_init(&file_private->table_lock); |
||
519 | } |
||
520 | |||
521 | /** |
||
522 | * Called at device close to release the file's |
||
523 | * handle references on objects. |
||
524 | */ |
||
525 | static int |
||
526 | drm_gem_object_release_handle(int id, void *ptr, void *data) |
||
527 | { |
||
528 | struct drm_file *file_priv = data; |
||
529 | struct drm_gem_object *obj = ptr; |
||
530 | struct drm_device *dev = obj->dev; |
||
531 | |||
532 | drm_gem_remove_prime_handles(obj, file_priv); |
||
533 | |||
534 | if (dev->driver->gem_close_object) |
||
535 | dev->driver->gem_close_object(obj, file_priv); |
||
536 | |||
537 | drm_gem_object_handle_unreference_unlocked(obj); |
||
538 | |||
539 | return 0; |
||
540 | } |
||
541 | |||
542 | /** |
||
543 | * Called at close time when the filp is going away. |
||
544 | * |
||
545 | * Releases any remaining references on objects by this filp. |
||
546 | */ |
||
547 | void |
||
548 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) |
||
549 | { |
||
550 | idr_for_each(&file_private->object_idr, |
||
551 | &drm_gem_object_release_handle, file_private); |
||
552 | |||
553 | idr_remove_all(&file_private->object_idr); |
||
554 | idr_destroy(&file_private->object_idr); |
||
555 | } |
||
556 | #endif |
||
557 | |||
558 | void |
||
559 | drm_gem_object_release(struct drm_gem_object *obj) |
||
560 | { |
||
561 | if (obj->filp) |
||
562 | free(obj->filp); |
||
563 | } |
||
564 | EXPORT_SYMBOL(drm_gem_object_release); |
||
565 | |||
566 | /** |
||
567 | * Called after the last reference to the object has been lost. |
||
568 | * Must be called holding struct_ mutex |
||
569 | * |
||
570 | * Frees the object |
||
571 | */ |
||
572 | void |
||
573 | drm_gem_object_free(struct kref *kref) |
||
574 | { |
||
575 | struct drm_gem_object *obj = (struct drm_gem_object *) kref; |
||
576 | struct drm_device *dev = obj->dev; |
||
577 | |||
578 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
||
579 | |||
580 | if (dev->driver->gem_free_object != NULL) |
||
581 | dev->driver->gem_free_object(obj); |
||
582 | } |
||
583 | EXPORT_SYMBOL(drm_gem_object_free); |
||
584 | |||
585 | static void drm_gem_object_ref_bug(struct kref *list_kref) |
||
586 | { |
||
587 | BUG(); |
||
588 | } |
||
589 | |||
590 | /** |
||
591 | * Called after the last handle to the object has been closed |
||
592 | * |
||
593 | * Removes any name for the object. Note that this must be |
||
594 | * called before drm_gem_object_free or we'll be touching |
||
595 | * freed memory |
||
596 | */ |
||
597 | void drm_gem_object_handle_free(struct drm_gem_object *obj) |
||
598 | { |
||
599 | struct drm_device *dev = obj->dev; |
||
600 | |||
601 | /* Remove any name for this object */ |
||
602 | spin_lock(&dev->object_name_lock); |
||
603 | if (obj->name) { |
||
604 | idr_remove(&dev->object_name_idr, obj->name); |
||
605 | obj->name = 0; |
||
606 | spin_unlock(&dev->object_name_lock); |
||
607 | /* |
||
608 | * The object name held a reference to this object, drop |
||
609 | * that now. |
||
610 | * |
||
611 | * This cannot be the last reference, since the handle holds one too. |
||
612 | */ |
||
613 | kref_put(&obj->refcount, drm_gem_object_ref_bug); |
||
614 | } else |
||
615 | spin_unlock(&dev->object_name_lock); |
||
616 | |||
617 | } |
||
618 | EXPORT_SYMBOL(drm_gem_object_handle_free); |
||
619 | |||
620 | #if 0 |
||
621 | void drm_gem_vm_open(struct vm_area_struct *vma) |
||
622 | { |
||
623 | struct drm_gem_object *obj = vma->vm_private_data; |
||
624 | |||
625 | drm_gem_object_reference(obj); |
||
626 | |||
627 | mutex_lock(&obj->dev->struct_mutex); |
||
628 | drm_vm_open_locked(obj->dev, vma); |
||
629 | mutex_unlock(&obj->dev->struct_mutex); |
||
630 | } |
||
631 | EXPORT_SYMBOL(drm_gem_vm_open); |
||
632 | |||
633 | void drm_gem_vm_close(struct vm_area_struct *vma) |
||
634 | { |
||
635 | struct drm_gem_object *obj = vma->vm_private_data; |
||
636 | struct drm_device *dev = obj->dev; |
||
637 | |||
638 | mutex_lock(&dev->struct_mutex); |
||
639 | drm_vm_close_locked(obj->dev, vma); |
||
640 | drm_gem_object_unreference(obj); |
||
641 | mutex_unlock(&dev->struct_mutex); |
||
642 | } |
||
643 | EXPORT_SYMBOL(drm_gem_vm_close); |
||
644 | |||
645 | #endif |
||
646 |