Rev 4280 | Rev 4560 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4280 | Rev 4539 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright © 2008 Intel Corporation |
2 | * Copyright © 2008 Intel Corporation |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
21 | * IN THE SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * Eric Anholt |
24 | * Eric Anholt |
25 | * |
25 | * |
26 | */ |
26 | */ |
27 | 27 | ||
28 | #include |
28 | #include |
29 | #include |
29 | #include |
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | #include |
33 | #include |
34 | #include |
34 | #include |
35 | #include |
35 | #include |
36 | 36 | ||
37 | /** @file drm_gem.c |
37 | /** @file drm_gem.c |
38 | * |
38 | * |
39 | * This file provides some of the base ioctls and library routines for |
39 | * This file provides some of the base ioctls and library routines for |
40 | * the graphics memory manager implemented by each device driver. |
40 | * the graphics memory manager implemented by each device driver. |
41 | * |
41 | * |
42 | * Because various devices have different requirements in terms of |
42 | * Because various devices have different requirements in terms of |
43 | * synchronization and migration strategies, implementing that is left up to |
43 | * synchronization and migration strategies, implementing that is left up to |
44 | * the driver, and all that the general API provides should be generic -- |
44 | * the driver, and all that the general API provides should be generic -- |
45 | * allocating objects, reading/writing data with the cpu, freeing objects. |
45 | * allocating objects, reading/writing data with the cpu, freeing objects. |
46 | * Even there, platform-dependent optimizations for reading/writing data with |
46 | * Even there, platform-dependent optimizations for reading/writing data with |
47 | * the CPU mean we'll likely hook those out to driver-specific calls. However, |
47 | * the CPU mean we'll likely hook those out to driver-specific calls. However, |
48 | * the DRI2 implementation wants to have at least allocate/mmap be generic. |
48 | * the DRI2 implementation wants to have at least allocate/mmap be generic. |
49 | * |
49 | * |
50 | * The goal was to have swap-backed object allocation managed through |
50 | * The goal was to have swap-backed object allocation managed through |
51 | * struct file. However, file descriptors as handles to a struct file have |
51 | * struct file. However, file descriptors as handles to a struct file have |
52 | * two major failings: |
52 | * two major failings: |
53 | * - Process limits prevent more than 1024 or so being used at a time by |
53 | * - Process limits prevent more than 1024 or so being used at a time by |
54 | * default. |
54 | * default. |
55 | * - Inability to allocate high fds will aggravate the X Server's select() |
55 | * - Inability to allocate high fds will aggravate the X Server's select() |
56 | * handling, and likely that of many GL client applications as well. |
56 | * handling, and likely that of many GL client applications as well. |
57 | * |
57 | * |
58 | * This led to a plan of using our own integer IDs (called handles, following |
58 | * This led to a plan of using our own integer IDs (called handles, following |
59 | * DRM terminology) to mimic fds, and implement the fd syscalls we need as |
59 | * DRM terminology) to mimic fds, and implement the fd syscalls we need as |
60 | * ioctls. The objects themselves will still include the struct file so |
60 | * ioctls. The objects themselves will still include the struct file so |
61 | * that we can transition to fds if the required kernel infrastructure shows |
61 | * that we can transition to fds if the required kernel infrastructure shows |
62 | * up at a later date, and as our interface with shmfs for memory allocation. |
62 | * up at a later date, and as our interface with shmfs for memory allocation. |
63 | */ |
63 | */ |
64 | 64 | ||
65 | /* |
65 | /* |
66 | * We make up offsets for buffer objects so we can recognize them at |
66 | * We make up offsets for buffer objects so we can recognize them at |
67 | * mmap time. |
67 | * mmap time. |
68 | */ |
68 | */ |
69 | 69 | ||
70 | /* pgoff in mmap is an unsigned long, so we need to make sure that |
70 | /* pgoff in mmap is an unsigned long, so we need to make sure that |
71 | * the faked up offset will fit |
71 | * the faked up offset will fit |
72 | */ |
72 | */ |
73 | 73 | ||
74 | #if BITS_PER_LONG == 64 |
74 | #if BITS_PER_LONG == 64 |
75 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) |
75 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) |
76 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) |
76 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) |
77 | #else |
77 | #else |
78 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) |
78 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) |
79 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) |
79 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) |
80 | #endif |
80 | #endif |
81 | 81 | ||
82 | /** |
82 | /** |
83 | * Initialize the GEM device fields |
83 | * Initialize the GEM device fields |
84 | */ |
84 | */ |
85 | 85 | ||
86 | int |
86 | int |
87 | drm_gem_init(struct drm_device *dev) |
87 | drm_gem_init(struct drm_device *dev) |
88 | { |
88 | { |
89 | struct drm_gem_mm *mm; |
89 | struct drm_gem_mm *mm; |
90 | 90 | ||
91 | mutex_init(&dev->object_name_lock); |
91 | mutex_init(&dev->object_name_lock); |
92 | idr_init(&dev->object_name_idr); |
92 | idr_init(&dev->object_name_idr); |
93 | 93 | ||
94 | mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); |
94 | mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); |
95 | if (!mm) { |
95 | if (!mm) { |
96 | DRM_ERROR("out of memory\n"); |
96 | DRM_ERROR("out of memory\n"); |
97 | return -ENOMEM; |
97 | return -ENOMEM; |
98 | } |
98 | } |
99 | 99 | ||
100 | dev->mm_private = mm; |
100 | dev->mm_private = mm; |
101 | drm_vma_offset_manager_init(&mm->vma_manager, |
101 | drm_vma_offset_manager_init(&mm->vma_manager, |
102 | DRM_FILE_PAGE_OFFSET_START, |
102 | DRM_FILE_PAGE_OFFSET_START, |
103 | DRM_FILE_PAGE_OFFSET_SIZE); |
103 | DRM_FILE_PAGE_OFFSET_SIZE); |
104 | 104 | ||
105 | return 0; |
105 | return 0; |
106 | } |
106 | } |
107 | 107 | ||
108 | void |
108 | void |
109 | drm_gem_destroy(struct drm_device *dev) |
109 | drm_gem_destroy(struct drm_device *dev) |
110 | { |
110 | { |
111 | struct drm_gem_mm *mm = dev->mm_private; |
111 | struct drm_gem_mm *mm = dev->mm_private; |
112 | 112 | ||
113 | drm_vma_offset_manager_destroy(&mm->vma_manager); |
113 | drm_vma_offset_manager_destroy(&mm->vma_manager); |
114 | kfree(mm); |
114 | kfree(mm); |
115 | dev->mm_private = NULL; |
115 | dev->mm_private = NULL; |
116 | } |
116 | } |
117 | 117 | ||
118 | /** |
118 | /** |
119 | * Initialize an already allocated GEM object of the specified size with |
119 | * Initialize an already allocated GEM object of the specified size with |
120 | * shmfs backing store. |
120 | * shmfs backing store. |
121 | */ |
121 | */ |
122 | int drm_gem_object_init(struct drm_device *dev, |
122 | int drm_gem_object_init(struct drm_device *dev, |
123 | struct drm_gem_object *obj, size_t size) |
123 | struct drm_gem_object *obj, size_t size) |
124 | { |
124 | { |
125 | struct file *filp; |
125 | struct file *filp; |
126 | 126 | ||
127 | filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
127 | filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
128 | if (IS_ERR(filp)) |
128 | if (IS_ERR(filp)) |
129 | return PTR_ERR(filp); |
129 | return PTR_ERR(filp); |
130 | 130 | ||
131 | drm_gem_private_object_init(dev, obj, size); |
131 | drm_gem_private_object_init(dev, obj, size); |
132 | obj->filp = filp; |
132 | obj->filp = filp; |
133 | 133 | ||
134 | return 0; |
134 | return 0; |
135 | } |
135 | } |
136 | EXPORT_SYMBOL(drm_gem_object_init); |
136 | EXPORT_SYMBOL(drm_gem_object_init); |
137 | 137 | ||
138 | /** |
138 | /** |
139 | * Initialize an already allocated GEM object of the specified size with |
139 | * Initialize an already allocated GEM object of the specified size with |
140 | * no GEM provided backing store. Instead the caller is responsible for |
140 | * no GEM provided backing store. Instead the caller is responsible for |
141 | * backing the object and handling it. |
141 | * backing the object and handling it. |
142 | */ |
142 | */ |
143 | void drm_gem_private_object_init(struct drm_device *dev, |
143 | void drm_gem_private_object_init(struct drm_device *dev, |
144 | struct drm_gem_object *obj, size_t size) |
144 | struct drm_gem_object *obj, size_t size) |
145 | { |
145 | { |
146 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
146 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
147 | 147 | ||
148 | obj->dev = dev; |
148 | obj->dev = dev; |
149 | obj->filp = NULL; |
149 | obj->filp = NULL; |
150 | 150 | ||
151 | kref_init(&obj->refcount); |
151 | kref_init(&obj->refcount); |
152 | obj->handle_count = 0; |
152 | obj->handle_count = 0; |
153 | obj->size = size; |
153 | obj->size = size; |
154 | drm_vma_node_reset(&obj->vma_node); |
154 | drm_vma_node_reset(&obj->vma_node); |
155 | } |
155 | } |
156 | EXPORT_SYMBOL(drm_gem_private_object_init); |
156 | EXPORT_SYMBOL(drm_gem_private_object_init); |
157 | 157 | ||
158 | /** |
158 | /** |
159 | * Allocate a GEM object of the specified size with shmfs backing store |
159 | * Allocate a GEM object of the specified size with shmfs backing store |
160 | */ |
160 | */ |
161 | struct drm_gem_object * |
161 | struct drm_gem_object * |
162 | drm_gem_object_alloc(struct drm_device *dev, size_t size) |
162 | drm_gem_object_alloc(struct drm_device *dev, size_t size) |
163 | { |
163 | { |
164 | struct drm_gem_object *obj; |
164 | struct drm_gem_object *obj; |
165 | 165 | ||
166 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
166 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
167 | if (!obj) |
167 | if (!obj) |
168 | goto free; |
168 | goto free; |
169 | 169 | ||
170 | if (drm_gem_object_init(dev, obj, size) != 0) |
170 | if (drm_gem_object_init(dev, obj, size) != 0) |
171 | goto free; |
171 | goto free; |
172 | 172 | ||
173 | if (dev->driver->gem_init_object != NULL && |
173 | if (dev->driver->gem_init_object != NULL && |
174 | dev->driver->gem_init_object(obj) != 0) { |
174 | dev->driver->gem_init_object(obj) != 0) { |
175 | goto fput; |
175 | goto fput; |
176 | } |
176 | } |
177 | return obj; |
177 | return obj; |
178 | fput: |
178 | fput: |
179 | /* Object_init mangles the global counters - readjust them. */ |
179 | /* Object_init mangles the global counters - readjust them. */ |
180 | free(obj->filp); |
180 | free(obj->filp); |
181 | free: |
181 | free: |
182 | kfree(obj); |
182 | kfree(obj); |
183 | return NULL; |
183 | return NULL; |
184 | } |
184 | } |
185 | EXPORT_SYMBOL(drm_gem_object_alloc); |
185 | EXPORT_SYMBOL(drm_gem_object_alloc); |
186 | 186 | ||
187 | static void drm_gem_object_ref_bug(struct kref *list_kref) |
187 | static void drm_gem_object_ref_bug(struct kref *list_kref) |
188 | { |
188 | { |
189 | BUG(); |
189 | BUG(); |
190 | } |
190 | } |
191 | 191 | ||
192 | /** |
192 | /** |
193 | * Called after the last handle to the object has been closed |
193 | * Called after the last handle to the object has been closed |
194 | * |
194 | * |
195 | * Removes any name for the object. Note that this must be |
195 | * Removes any name for the object. Note that this must be |
196 | * called before drm_gem_object_free or we'll be touching |
196 | * called before drm_gem_object_free or we'll be touching |
197 | * freed memory |
197 | * freed memory |
198 | */ |
198 | */ |
199 | static void drm_gem_object_handle_free(struct drm_gem_object *obj) |
199 | static void drm_gem_object_handle_free(struct drm_gem_object *obj) |
200 | { |
200 | { |
201 | struct drm_device *dev = obj->dev; |
201 | struct drm_device *dev = obj->dev; |
202 | 202 | ||
203 | /* Remove any name for this object */ |
203 | /* Remove any name for this object */ |
204 | if (obj->name) { |
204 | if (obj->name) { |
205 | idr_remove(&dev->object_name_idr, obj->name); |
205 | idr_remove(&dev->object_name_idr, obj->name); |
206 | obj->name = 0; |
206 | obj->name = 0; |
207 | /* |
207 | /* |
208 | * The object name held a reference to this object, drop |
208 | * The object name held a reference to this object, drop |
209 | * that now. |
209 | * that now. |
210 | * |
210 | * |
211 | * This cannot be the last reference, since the handle holds one too. |
211 | * This cannot be the last reference, since the handle holds one too. |
212 | */ |
212 | */ |
213 | kref_put(&obj->refcount, drm_gem_object_ref_bug); |
213 | kref_put(&obj->refcount, drm_gem_object_ref_bug); |
214 | } |
214 | } |
215 | } |
215 | } |
216 | 216 | ||
217 | 217 | ||
218 | static void |
218 | static void |
219 | drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) |
219 | drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) |
220 | { |
220 | { |
221 | if (WARN_ON(obj->handle_count == 0)) |
221 | if (WARN_ON(obj->handle_count == 0)) |
222 | return; |
222 | return; |
223 | 223 | ||
224 | /* |
224 | /* |
225 | * Must bump handle count first as this may be the last |
225 | * Must bump handle count first as this may be the last |
226 | * ref, in which case the object would disappear before we |
226 | * ref, in which case the object would disappear before we |
227 | * checked for a name |
227 | * checked for a name |
228 | */ |
228 | */ |
229 | 229 | ||
230 | mutex_lock(&obj->dev->object_name_lock); |
230 | mutex_lock(&obj->dev->object_name_lock); |
231 | if (--obj->handle_count == 0) { |
231 | if (--obj->handle_count == 0) { |
232 | drm_gem_object_handle_free(obj); |
232 | drm_gem_object_handle_free(obj); |
233 | } |
233 | } |
234 | mutex_unlock(&obj->dev->object_name_lock); |
234 | mutex_unlock(&obj->dev->object_name_lock); |
235 | 235 | ||
236 | drm_gem_object_unreference_unlocked(obj); |
236 | drm_gem_object_unreference_unlocked(obj); |
237 | } |
237 | } |
238 | 238 | ||
239 | /** |
239 | /** |
240 | * Removes the mapping from handle to filp for this object. |
240 | * Removes the mapping from handle to filp for this object. |
241 | */ |
241 | */ |
242 | int |
242 | int |
243 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
243 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
244 | { |
244 | { |
245 | struct drm_device *dev; |
245 | struct drm_device *dev; |
246 | struct drm_gem_object *obj; |
246 | struct drm_gem_object *obj; |
247 | 247 | ||
248 | /* This is gross. The idr system doesn't let us try a delete and |
248 | /* This is gross. The idr system doesn't let us try a delete and |
249 | * return an error code. It just spews if you fail at deleting. |
249 | * return an error code. It just spews if you fail at deleting. |
250 | * So, we have to grab a lock around finding the object and then |
250 | * So, we have to grab a lock around finding the object and then |
251 | * doing the delete on it and dropping the refcount, or the user |
251 | * doing the delete on it and dropping the refcount, or the user |
252 | * could race us to double-decrement the refcount and cause a |
252 | * could race us to double-decrement the refcount and cause a |
253 | * use-after-free later. Given the frequency of our handle lookups, |
253 | * use-after-free later. Given the frequency of our handle lookups, |
254 | * we may want to use ida for number allocation and a hash table |
254 | * we may want to use ida for number allocation and a hash table |
255 | * for the pointers, anyway. |
255 | * for the pointers, anyway. |
256 | */ |
256 | */ |
257 | if(handle == -2) |
- | |
258 | printf("%s handle %d\n", __FUNCTION__, handle); |
- | |
259 | - | ||
260 | spin_lock(&filp->table_lock); |
257 | spin_lock(&filp->table_lock); |
261 | 258 | ||
262 | /* Check if we currently have a reference on the object */ |
259 | /* Check if we currently have a reference on the object */ |
263 | obj = idr_find(&filp->object_idr, handle); |
260 | obj = idr_find(&filp->object_idr, handle); |
264 | if (obj == NULL) { |
261 | if (obj == NULL) { |
265 | spin_unlock(&filp->table_lock); |
262 | spin_unlock(&filp->table_lock); |
266 | return -EINVAL; |
263 | return -EINVAL; |
267 | } |
264 | } |
268 | dev = obj->dev; |
265 | dev = obj->dev; |
269 | - | ||
270 | // printf("%s handle %d obj %p\n", __FUNCTION__, handle, obj); |
- | |
271 | 266 | ||
272 | /* Release reference and decrement refcount. */ |
267 | /* Release reference and decrement refcount. */ |
273 | idr_remove(&filp->object_idr, handle); |
268 | idr_remove(&filp->object_idr, handle); |
274 | spin_unlock(&filp->table_lock); |
269 | spin_unlock(&filp->table_lock); |
275 | 270 | ||
276 | 271 | ||
277 | if (dev->driver->gem_close_object) |
272 | if (dev->driver->gem_close_object) |
278 | dev->driver->gem_close_object(obj, filp); |
273 | dev->driver->gem_close_object(obj, filp); |
279 | drm_gem_object_handle_unreference_unlocked(obj); |
274 | drm_gem_object_handle_unreference_unlocked(obj); |
280 | 275 | ||
281 | return 0; |
276 | return 0; |
282 | } |
277 | } |
283 | EXPORT_SYMBOL(drm_gem_handle_delete); |
278 | EXPORT_SYMBOL(drm_gem_handle_delete); |
284 | 279 | ||
285 | /** |
280 | /** |
286 | * Create a handle for this object. This adds a handle reference |
281 | * Create a handle for this object. This adds a handle reference |
287 | * to the object, which includes a regular reference count. Callers |
282 | * to the object, which includes a regular reference count. Callers |
288 | * will likely want to dereference the object afterwards. |
283 | * will likely want to dereference the object afterwards. |
- | 284 | /** |
|
- | 285 | * drm_gem_handle_create_tail - internal functions to create a handle |
|
- | 286 | * |
|
- | 287 | * This expects the dev->object_name_lock to be held already and will drop it |
|
- | 288 | * before returning. Used to avoid races in establishing new handles when |
|
- | 289 | * importing an object from either an flink name or a dma-buf. |
|
289 | */ |
290 | */ |
290 | int |
291 | int |
291 | drm_gem_handle_create_tail(struct drm_file *file_priv, |
292 | drm_gem_handle_create_tail(struct drm_file *file_priv, |
292 | struct drm_gem_object *obj, |
293 | struct drm_gem_object *obj, |
293 | u32 *handlep) |
294 | u32 *handlep) |
294 | { |
295 | { |
295 | struct drm_device *dev = obj->dev; |
296 | struct drm_device *dev = obj->dev; |
296 | int ret; |
297 | int ret; |
297 | 298 | ||
298 | WARN_ON(!mutex_is_locked(&dev->object_name_lock)); |
299 | WARN_ON(!mutex_is_locked(&dev->object_name_lock)); |
299 | 300 | ||
300 | /* |
301 | /* |
301 | * Get the user-visible handle using idr. Preload and perform |
302 | * Get the user-visible handle using idr. Preload and perform |
302 | * allocation under our spinlock. |
303 | * allocation under our spinlock. |
303 | */ |
304 | */ |
304 | idr_preload(GFP_KERNEL); |
305 | idr_preload(GFP_KERNEL); |
305 | spin_lock(&file_priv->table_lock); |
306 | spin_lock(&file_priv->table_lock); |
306 | 307 | ||
307 | ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); |
308 | ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); |
308 | drm_gem_object_reference(obj); |
309 | drm_gem_object_reference(obj); |
309 | obj->handle_count++; |
310 | obj->handle_count++; |
310 | spin_unlock(&file_priv->table_lock); |
311 | spin_unlock(&file_priv->table_lock); |
311 | idr_preload_end(); |
312 | idr_preload_end(); |
312 | mutex_unlock(&dev->object_name_lock); |
313 | mutex_unlock(&dev->object_name_lock); |
313 | if (ret < 0) { |
314 | if (ret < 0) { |
314 | drm_gem_object_handle_unreference_unlocked(obj); |
315 | drm_gem_object_handle_unreference_unlocked(obj); |
315 | return ret; |
316 | return ret; |
316 | } |
317 | } |
317 | *handlep = ret; |
318 | *handlep = ret; |
318 | 319 | ||
319 | if (dev->driver->gem_open_object) { |
320 | if (dev->driver->gem_open_object) { |
320 | ret = dev->driver->gem_open_object(obj, file_priv); |
321 | ret = dev->driver->gem_open_object(obj, file_priv); |
321 | if (ret) { |
322 | if (ret) { |
322 | drm_gem_handle_delete(file_priv, *handlep); |
323 | drm_gem_handle_delete(file_priv, *handlep); |
323 | return ret; |
324 | return ret; |
324 | } |
325 | } |
325 | } |
326 | } |
326 | 327 | ||
327 | return 0; |
328 | return 0; |
328 | } |
329 | } |
329 | 330 | ||
330 | /** |
331 | /** |
331 | * Create a handle for this object. This adds a handle reference |
332 | * Create a handle for this object. This adds a handle reference |
332 | * to the object, which includes a regular reference count. Callers |
333 | * to the object, which includes a regular reference count. Callers |
333 | * will likely want to dereference the object afterwards. |
334 | * will likely want to dereference the object afterwards. |
334 | */ |
335 | */ |
335 | int |
336 | int |
336 | drm_gem_handle_create(struct drm_file *file_priv, |
337 | drm_gem_handle_create(struct drm_file *file_priv, |
337 | struct drm_gem_object *obj, |
338 | struct drm_gem_object *obj, |
338 | u32 *handlep) |
339 | u32 *handlep) |
339 | { |
340 | { |
340 | mutex_lock(&obj->dev->object_name_lock); |
341 | mutex_lock(&obj->dev->object_name_lock); |
341 | 342 | ||
342 | return drm_gem_handle_create_tail(file_priv, obj, handlep); |
343 | return drm_gem_handle_create_tail(file_priv, obj, handlep); |
343 | } |
344 | } |
344 | EXPORT_SYMBOL(drm_gem_handle_create); |
345 | EXPORT_SYMBOL(drm_gem_handle_create); |
345 | 346 | ||
346 | 347 | ||
347 | /** |
348 | /** |
348 | * drm_gem_free_mmap_offset - release a fake mmap offset for an object |
349 | * drm_gem_free_mmap_offset - release a fake mmap offset for an object |
349 | * @obj: obj in question |
350 | * @obj: obj in question |
350 | * |
351 | * |
351 | * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). |
352 | * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). |
352 | */ |
353 | */ |
353 | #if 0 |
354 | #if 0 |
354 | void |
355 | void |
355 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) |
356 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) |
356 | { |
357 | { |
357 | struct drm_device *dev = obj->dev; |
358 | struct drm_device *dev = obj->dev; |
358 | struct drm_gem_mm *mm = dev->mm_private; |
359 | struct drm_gem_mm *mm = dev->mm_private; |
359 | 360 | ||
360 | drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node); |
361 | drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node); |
361 | } |
362 | } |
362 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); |
363 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); |
363 | 364 | ||
364 | /** |
365 | /** |
365 | * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object |
366 | * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object |
366 | * @obj: obj in question |
367 | * @obj: obj in question |
367 | * @size: the virtual size |
368 | * @size: the virtual size |
368 | * |
369 | * |
369 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
370 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
370 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
371 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
371 | * up the object based on the offset and sets up the various memory mapping |
372 | * up the object based on the offset and sets up the various memory mapping |
372 | * structures. |
373 | * structures. |
373 | * |
374 | * |
374 | * This routine allocates and attaches a fake offset for @obj, in cases where |
375 | * This routine allocates and attaches a fake offset for @obj, in cases where |
375 | * the virtual size differs from the physical size (ie. obj->size). Otherwise |
376 | * the virtual size differs from the physical size (ie. obj->size). Otherwise |
376 | * just use drm_gem_create_mmap_offset(). |
377 | * just use drm_gem_create_mmap_offset(). |
377 | */ |
378 | */ |
378 | int |
379 | int |
379 | drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) |
380 | drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) |
380 | { |
381 | { |
381 | struct drm_device *dev = obj->dev; |
382 | struct drm_device *dev = obj->dev; |
382 | struct drm_gem_mm *mm = dev->mm_private; |
383 | struct drm_gem_mm *mm = dev->mm_private; |
383 | 384 | ||
384 | /* Set the object up for mmap'ing */ |
385 | /* Set the object up for mmap'ing */ |
385 | list = &obj->map_list; |
386 | list = &obj->map_list; |
386 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); |
387 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); |
387 | if (!list->map) |
388 | if (!list->map) |
388 | return -ENOMEM; |
389 | return -ENOMEM; |
389 | 390 | ||
390 | map = list->map; |
391 | map = list->map; |
391 | map->type = _DRM_GEM; |
392 | map->type = _DRM_GEM; |
392 | map->size = obj->size; |
393 | map->size = obj->size; |
393 | map->handle = obj; |
394 | map->handle = obj; |
394 | 395 | ||
395 | /* Get a DRM GEM mmap offset allocated... */ |
396 | /* Get a DRM GEM mmap offset allocated... */ |
396 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, |
397 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, |
397 | obj->size / PAGE_SIZE, 0, false); |
398 | obj->size / PAGE_SIZE, 0, false); |
398 | 399 | ||
399 | if (!list->file_offset_node) { |
400 | if (!list->file_offset_node) { |
400 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); |
401 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); |
401 | ret = -ENOSPC; |
402 | ret = -ENOSPC; |
402 | goto out_free_list; |
403 | goto out_free_list; |
403 | } |
404 | } |
404 | 405 | ||
405 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, |
406 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, |
406 | obj->size / PAGE_SIZE, 0); |
407 | obj->size / PAGE_SIZE, 0); |
407 | if (!list->file_offset_node) { |
408 | if (!list->file_offset_node) { |
408 | ret = -ENOMEM; |
409 | ret = -ENOMEM; |
409 | goto out_free_list; |
410 | goto out_free_list; |
410 | } |
411 | } |
411 | 412 | ||
412 | list->hash.key = list->file_offset_node->start; |
413 | list->hash.key = list->file_offset_node->start; |
413 | ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); |
414 | ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); |
414 | if (ret) { |
415 | if (ret) { |
415 | DRM_ERROR("failed to add to map hash\n"); |
416 | DRM_ERROR("failed to add to map hash\n"); |
416 | goto out_free_mm; |
417 | goto out_free_mm; |
417 | } |
418 | } |
418 | 419 | ||
419 | return 0; |
420 | return 0; |
420 | 421 | ||
421 | out_free_mm: |
422 | out_free_mm: |
422 | drm_mm_put_block(list->file_offset_node); |
423 | drm_mm_put_block(list->file_offset_node); |
423 | out_free_list: |
424 | out_free_list: |
424 | kfree(list->map); |
425 | kfree(list->map); |
425 | list->map = NULL; |
426 | list->map = NULL; |
426 | 427 | ||
427 | return ret; |
428 | return ret; |
428 | } |
429 | } |
429 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); |
430 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); |
430 | #endif |
431 | #endif |
431 | 432 | ||
432 | /** Returns a reference to the object named by the handle. */ |
433 | /** Returns a reference to the object named by the handle. */ |
433 | struct drm_gem_object * |
434 | struct drm_gem_object * |
434 | drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, |
435 | drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, |
435 | u32 handle) |
436 | u32 handle) |
436 | { |
437 | { |
437 | struct drm_gem_object *obj; |
438 | struct drm_gem_object *obj; |
438 | - | ||
439 | if(handle == -2) |
- | |
440 | printf("%s handle %d\n", __FUNCTION__, handle); |
- | |
441 | 439 | ||
442 | spin_lock(&filp->table_lock); |
440 | spin_lock(&filp->table_lock); |
443 | 441 | ||
444 | /* Check if we currently have a reference on the object */ |
442 | /* Check if we currently have a reference on the object */ |
445 | obj = idr_find(&filp->object_idr, handle); |
443 | obj = idr_find(&filp->object_idr, handle); |
446 | if (obj == NULL) { |
444 | if (obj == NULL) { |
447 | spin_unlock(&filp->table_lock); |
445 | spin_unlock(&filp->table_lock); |
448 | return NULL; |
446 | return NULL; |
449 | } |
447 | } |
450 | 448 | ||
451 | drm_gem_object_reference(obj); |
449 | drm_gem_object_reference(obj); |
452 | 450 | ||
453 | spin_unlock(&filp->table_lock); |
451 | spin_unlock(&filp->table_lock); |
454 | 452 | ||
455 | return obj; |
453 | return obj; |
456 | } |
454 | } |
457 | EXPORT_SYMBOL(drm_gem_object_lookup); |
455 | EXPORT_SYMBOL(drm_gem_object_lookup); |
458 | 456 | ||
459 | /** |
457 | /** |
460 | * Releases the handle to an mm object. |
458 | * Releases the handle to an mm object. |
461 | */ |
459 | */ |
462 | int |
460 | int |
463 | drm_gem_close_ioctl(struct drm_device *dev, void *data, |
461 | drm_gem_close_ioctl(struct drm_device *dev, void *data, |
464 | struct drm_file *file_priv) |
462 | struct drm_file *file_priv) |
465 | { |
463 | { |
466 | struct drm_gem_close *args = data; |
464 | struct drm_gem_close *args = data; |
467 | int ret; |
465 | int ret; |
468 | 466 | ||
469 | ret = drm_gem_handle_delete(file_priv, args->handle); |
467 | ret = drm_gem_handle_delete(file_priv, args->handle); |
470 | 468 | ||
471 | return ret; |
469 | return ret; |
472 | } |
470 | } |
473 | 471 | ||
474 | /** |
472 | /** |
475 | * Create a global name for an object, returning the name. |
473 | * Create a global name for an object, returning the name. |
476 | * |
474 | * |
477 | * Note that the name does not hold a reference; when the object |
475 | * Note that the name does not hold a reference; when the object |
478 | * is freed, the name goes away. |
476 | * is freed, the name goes away. |
479 | */ |
477 | */ |
480 | int |
478 | int |
481 | drm_gem_flink_ioctl(struct drm_device *dev, void *data, |
479 | drm_gem_flink_ioctl(struct drm_device *dev, void *data, |
482 | struct drm_file *file_priv) |
480 | struct drm_file *file_priv) |
483 | { |
481 | { |
484 | struct drm_gem_flink *args = data; |
482 | struct drm_gem_flink *args = data; |
485 | struct drm_gem_object *obj; |
483 | struct drm_gem_object *obj; |
486 | int ret; |
484 | int ret; |
487 | 485 | ||
488 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
486 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
489 | return -ENODEV; |
487 | return -ENODEV; |
490 | 488 | ||
491 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
489 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
492 | if (obj == NULL) |
490 | if (obj == NULL) |
493 | return -ENOENT; |
491 | return -ENOENT; |
494 | 492 | ||
495 | mutex_lock(&dev->object_name_lock); |
493 | mutex_lock(&dev->object_name_lock); |
496 | idr_preload(GFP_KERNEL); |
494 | idr_preload(GFP_KERNEL); |
497 | /* prevent races with concurrent gem_close. */ |
495 | /* prevent races with concurrent gem_close. */ |
498 | if (obj->handle_count == 0) { |
496 | if (obj->handle_count == 0) { |
499 | ret = -ENOENT; |
497 | ret = -ENOENT; |
500 | goto err; |
498 | goto err; |
501 | } |
499 | } |
502 | 500 | ||
503 | if (!obj->name) { |
501 | if (!obj->name) { |
504 | ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); |
502 | ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); |
505 | if (ret < 0) |
503 | if (ret < 0) |
506 | goto err; |
504 | goto err; |
507 | 505 | ||
508 | obj->name = ret; |
506 | obj->name = ret; |
509 | 507 | ||
510 | /* Allocate a reference for the name table. */ |
508 | /* Allocate a reference for the name table. */ |
511 | drm_gem_object_reference(obj); |
509 | drm_gem_object_reference(obj); |
512 | } |
510 | } |
513 | 511 | ||
514 | args->name = (uint64_t) obj->name; |
512 | args->name = (uint64_t) obj->name; |
515 | ret = 0; |
513 | ret = 0; |
516 | 514 | ||
517 | err: |
515 | err: |
518 | idr_preload_end(); |
516 | idr_preload_end(); |
519 | mutex_unlock(&dev->object_name_lock); |
517 | mutex_unlock(&dev->object_name_lock); |
520 | drm_gem_object_unreference_unlocked(obj); |
518 | drm_gem_object_unreference_unlocked(obj); |
521 | return ret; |
519 | return ret; |
522 | } |
520 | } |
523 | 521 | ||
524 | /** |
522 | /** |
525 | * Open an object using the global name, returning a handle and the size. |
523 | * Open an object using the global name, returning a handle and the size. |
526 | * |
524 | * |
527 | * This handle (of course) holds a reference to the object, so the object |
525 | * This handle (of course) holds a reference to the object, so the object |
528 | * will not go away until the handle is deleted. |
526 | * will not go away until the handle is deleted. |
529 | */ |
527 | */ |
530 | int |
528 | int |
531 | drm_gem_open_ioctl(struct drm_device *dev, void *data, |
529 | drm_gem_open_ioctl(struct drm_device *dev, void *data, |
532 | struct drm_file *file_priv) |
530 | struct drm_file *file_priv) |
533 | { |
531 | { |
534 | struct drm_gem_open *args = data; |
532 | struct drm_gem_open *args = data; |
535 | struct drm_gem_object *obj; |
533 | struct drm_gem_object *obj; |
536 | int ret; |
534 | int ret; |
537 | u32 handle; |
535 | u32 handle; |
538 | 536 | ||
539 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
537 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
540 | return -ENODEV; |
538 | return -ENODEV; |
541 | - | ||
542 | if(handle == -2) |
- | |
543 | printf("%s handle %d\n", __FUNCTION__, handle); |
- | |
544 | 539 | ||
545 | mutex_lock(&dev->object_name_lock); |
540 | mutex_lock(&dev->object_name_lock); |
546 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
541 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
547 | if (obj) { |
542 | if (obj) { |
548 | drm_gem_object_reference(obj); |
543 | drm_gem_object_reference(obj); |
549 | } else { |
544 | } else { |
550 | mutex_unlock(&dev->object_name_lock); |
545 | mutex_unlock(&dev->object_name_lock); |
551 | return -ENOENT; |
546 | return -ENOENT; |
552 | } |
547 | } |
553 | 548 | ||
554 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ |
549 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ |
555 | ret = drm_gem_handle_create_tail(file_priv, obj, &handle); |
550 | ret = drm_gem_handle_create_tail(file_priv, obj, &handle); |
556 | drm_gem_object_unreference_unlocked(obj); |
551 | drm_gem_object_unreference_unlocked(obj); |
557 | if (ret) |
552 | if (ret) |
558 | return ret; |
553 | return ret; |
559 | 554 | ||
560 | args->handle = handle; |
555 | args->handle = handle; |
561 | args->size = obj->size; |
556 | args->size = obj->size; |
562 | 557 | ||
563 | return 0; |
558 | return 0; |
564 | } |
559 | } |
565 | 560 | ||
566 | #if 0 |
561 | #if 0 |
567 | /** |
562 | /** |
568 | * Called at device open time, sets up the structure for handling refcounting |
563 | * Called at device open time, sets up the structure for handling refcounting |
569 | * of mm objects. |
564 | * of mm objects. |
570 | */ |
565 | */ |
571 | void |
566 | void |
572 | drm_gem_open(struct drm_device *dev, struct drm_file *file_private) |
567 | drm_gem_open(struct drm_device *dev, struct drm_file *file_private) |
573 | { |
568 | { |
574 | idr_init(&file_private->object_idr); |
569 | idr_init(&file_private->object_idr); |
575 | spin_lock_init(&file_private->table_lock); |
570 | spin_lock_init(&file_private->table_lock); |
576 | } |
571 | } |
577 | 572 | ||
578 | /** |
573 | /** |
579 | * Called at device close to release the file's |
574 | * Called at device close to release the file's |
580 | * handle references on objects. |
575 | * handle references on objects. |
581 | */ |
576 | */ |
582 | static int |
577 | static int |
583 | drm_gem_object_release_handle(int id, void *ptr, void *data) |
578 | drm_gem_object_release_handle(int id, void *ptr, void *data) |
584 | { |
579 | { |
585 | struct drm_file *file_priv = data; |
580 | struct drm_file *file_priv = data; |
586 | struct drm_gem_object *obj = ptr; |
581 | struct drm_gem_object *obj = ptr; |
587 | struct drm_device *dev = obj->dev; |
582 | struct drm_device *dev = obj->dev; |
588 | 583 | ||
589 | drm_gem_remove_prime_handles(obj, file_priv); |
584 | drm_gem_remove_prime_handles(obj, file_priv); |
590 | drm_vma_node_revoke(&obj->vma_node, file_priv->filp); |
585 | drm_vma_node_revoke(&obj->vma_node, file_priv->filp); |
591 | 586 | ||
592 | if (dev->driver->gem_close_object) |
587 | if (dev->driver->gem_close_object) |
593 | dev->driver->gem_close_object(obj, file_priv); |
588 | dev->driver->gem_close_object(obj, file_priv); |
594 | 589 | ||
595 | drm_gem_object_handle_unreference_unlocked(obj); |
590 | drm_gem_object_handle_unreference_unlocked(obj); |
596 | 591 | ||
597 | return 0; |
592 | return 0; |
598 | } |
593 | } |
599 | 594 | ||
600 | /** |
595 | /** |
601 | * Called at close time when the filp is going away. |
596 | * Called at close time when the filp is going away. |
602 | * |
597 | * |
603 | * Releases any remaining references on objects by this filp. |
598 | * Releases any remaining references on objects by this filp. |
604 | */ |
599 | */ |
605 | void |
600 | void |
606 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) |
601 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) |
607 | { |
602 | { |
608 | idr_for_each(&file_private->object_idr, |
603 | idr_for_each(&file_private->object_idr, |
609 | &drm_gem_object_release_handle, file_private); |
604 | &drm_gem_object_release_handle, file_private); |
610 | idr_destroy(&file_private->object_idr); |
605 | idr_destroy(&file_private->object_idr); |
611 | } |
606 | } |
612 | #endif |
607 | #endif |
613 | 608 | ||
614 | void |
609 | void |
615 | drm_gem_object_release(struct drm_gem_object *obj) |
610 | drm_gem_object_release(struct drm_gem_object *obj) |
616 | { |
611 | { |
617 | if (obj->filp) |
612 | if (obj->filp) |
618 | free(obj->filp); |
613 | free(obj->filp); |
619 | } |
614 | } |
620 | EXPORT_SYMBOL(drm_gem_object_release); |
615 | EXPORT_SYMBOL(drm_gem_object_release); |
621 | 616 | ||
622 | /** |
617 | /** |
623 | * Called after the last reference to the object has been lost. |
618 | * Called after the last reference to the object has been lost. |
624 | * Must be called holding struct_ mutex |
619 | * Must be called holding struct_ mutex |
625 | * |
620 | * |
626 | * Frees the object |
621 | * Frees the object |
627 | */ |
622 | */ |
628 | void |
623 | void |
629 | drm_gem_object_free(struct kref *kref) |
624 | drm_gem_object_free(struct kref *kref) |
630 | { |
625 | { |
631 | struct drm_gem_object *obj = (struct drm_gem_object *) kref; |
626 | struct drm_gem_object *obj = (struct drm_gem_object *) kref; |
632 | struct drm_device *dev = obj->dev; |
627 | struct drm_device *dev = obj->dev; |
633 | 628 | ||
634 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
629 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
635 | 630 | ||
636 | if (dev->driver->gem_free_object != NULL) |
631 | if (dev->driver->gem_free_object != NULL) |
637 | dev->driver->gem_free_object(obj); |
632 | dev->driver->gem_free_object(obj); |
638 | } |
633 | } |
639 | EXPORT_SYMBOL(drm_gem_object_free); |
634 | EXPORT_SYMBOL(drm_gem_object_free); |
640 | 635 | ||
641 | 636 | ||
642 | #if 0 |
637 | #if 0 |
643 | void drm_gem_vm_open(struct vm_area_struct *vma) |
638 | void drm_gem_vm_open(struct vm_area_struct *vma) |
644 | { |
639 | { |
645 | struct drm_gem_object *obj = vma->vm_private_data; |
640 | struct drm_gem_object *obj = vma->vm_private_data; |
646 | 641 | ||
647 | drm_gem_object_reference(obj); |
642 | drm_gem_object_reference(obj); |
648 | 643 | ||
649 | mutex_lock(&obj->dev->struct_mutex); |
644 | mutex_lock(&obj->dev->struct_mutex); |
650 | drm_vm_open_locked(obj->dev, vma); |
645 | drm_vm_open_locked(obj->dev, vma); |
651 | mutex_unlock(&obj->dev->struct_mutex); |
646 | mutex_unlock(&obj->dev->struct_mutex); |
652 | } |
647 | } |
653 | EXPORT_SYMBOL(drm_gem_vm_open); |
648 | EXPORT_SYMBOL(drm_gem_vm_open); |
654 | 649 | ||
655 | void drm_gem_vm_close(struct vm_area_struct *vma) |
650 | void drm_gem_vm_close(struct vm_area_struct *vma) |
656 | { |
651 | { |
657 | struct drm_gem_object *obj = vma->vm_private_data; |
652 | struct drm_gem_object *obj = vma->vm_private_data; |
658 | struct drm_device *dev = obj->dev; |
653 | struct drm_device *dev = obj->dev; |
659 | 654 | ||
660 | mutex_lock(&dev->struct_mutex); |
655 | mutex_lock(&dev->struct_mutex); |
661 | drm_vm_close_locked(obj->dev, vma); |
656 | drm_vm_close_locked(obj->dev, vma); |
662 | drm_gem_object_unreference(obj); |
657 | drm_gem_object_unreference(obj); |
663 | mutex_unlock(&dev->struct_mutex); |
658 | mutex_unlock(&dev->struct_mutex); |
664 | } |
659 | } |
665 | EXPORT_SYMBOL(drm_gem_vm_close); |
660 | EXPORT_SYMBOL(drm_gem_vm_close); |
666 | 661 | ||
667 | #endif>> |
662 | #endif>> |