Rev 6935 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6935 | Rev 6937 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright © 2008 Intel Corporation |
2 | * Copyright © 2008 Intel Corporation |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
21 | * IN THE SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * Eric Anholt |
24 | * Eric Anholt |
25 | * |
25 | * |
26 | */ |
26 | */ |
27 | 27 | ||
28 | #include |
28 | #include |
29 | #include |
29 | #include |
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | #include |
33 | #include |
34 | #include |
34 | #include |
35 | #include |
35 | #include |
36 | #include |
36 | #include |
37 | #include |
37 | #include |
38 | #include |
38 | #include |
39 | #include |
39 | #include |
40 | #include "drm_internal.h" |
40 | #include "drm_internal.h" |
41 | 41 | ||
42 | /** @file drm_gem.c |
42 | /** @file drm_gem.c |
43 | * |
43 | * |
44 | * This file provides some of the base ioctls and library routines for |
44 | * This file provides some of the base ioctls and library routines for |
45 | * the graphics memory manager implemented by each device driver. |
45 | * the graphics memory manager implemented by each device driver. |
46 | * |
46 | * |
47 | * Because various devices have different requirements in terms of |
47 | * Because various devices have different requirements in terms of |
48 | * synchronization and migration strategies, implementing that is left up to |
48 | * synchronization and migration strategies, implementing that is left up to |
49 | * the driver, and all that the general API provides should be generic -- |
49 | * the driver, and all that the general API provides should be generic -- |
50 | * allocating objects, reading/writing data with the cpu, freeing objects. |
50 | * allocating objects, reading/writing data with the cpu, freeing objects. |
51 | * Even there, platform-dependent optimizations for reading/writing data with |
51 | * Even there, platform-dependent optimizations for reading/writing data with |
52 | * the CPU mean we'll likely hook those out to driver-specific calls. However, |
52 | * the CPU mean we'll likely hook those out to driver-specific calls. However, |
53 | * the DRI2 implementation wants to have at least allocate/mmap be generic. |
53 | * the DRI2 implementation wants to have at least allocate/mmap be generic. |
54 | * |
54 | * |
55 | * The goal was to have swap-backed object allocation managed through |
55 | * The goal was to have swap-backed object allocation managed through |
56 | * struct file. However, file descriptors as handles to a struct file have |
56 | * struct file. However, file descriptors as handles to a struct file have |
57 | * two major failings: |
57 | * two major failings: |
58 | * - Process limits prevent more than 1024 or so being used at a time by |
58 | * - Process limits prevent more than 1024 or so being used at a time by |
59 | * default. |
59 | * default. |
60 | * - Inability to allocate high fds will aggravate the X Server's select() |
60 | * - Inability to allocate high fds will aggravate the X Server's select() |
61 | * handling, and likely that of many GL client applications as well. |
61 | * handling, and likely that of many GL client applications as well. |
62 | * |
62 | * |
63 | * This led to a plan of using our own integer IDs (called handles, following |
63 | * This led to a plan of using our own integer IDs (called handles, following |
64 | * DRM terminology) to mimic fds, and implement the fd syscalls we need as |
64 | * DRM terminology) to mimic fds, and implement the fd syscalls we need as |
65 | * ioctls. The objects themselves will still include the struct file so |
65 | * ioctls. The objects themselves will still include the struct file so |
66 | * that we can transition to fds if the required kernel infrastructure shows |
66 | * that we can transition to fds if the required kernel infrastructure shows |
67 | * up at a later date, and as our interface with shmfs for memory allocation. |
67 | * up at a later date, and as our interface with shmfs for memory allocation. |
68 | */ |
68 | */ |
69 | 69 | ||
70 | /* |
70 | /* |
71 | * We make up offsets for buffer objects so we can recognize them at |
71 | * We make up offsets for buffer objects so we can recognize them at |
72 | * mmap time. |
72 | * mmap time. |
73 | */ |
73 | */ |
74 | 74 | ||
75 | /* pgoff in mmap is an unsigned long, so we need to make sure that |
75 | /* pgoff in mmap is an unsigned long, so we need to make sure that |
76 | * the faked up offset will fit |
76 | * the faked up offset will fit |
77 | */ |
77 | */ |
78 | 78 | ||
79 | #if BITS_PER_LONG == 64 |
79 | #if BITS_PER_LONG == 64 |
80 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) |
80 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) |
81 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) |
81 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) |
82 | #else |
82 | #else |
83 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) |
83 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) |
84 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) |
84 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) |
85 | #endif |
85 | #endif |
86 | 86 | ||
87 | /** |
87 | /** |
88 | * drm_gem_init - Initialize the GEM device fields |
88 | * drm_gem_init - Initialize the GEM device fields |
89 | * @dev: drm_devic structure to initialize |
89 | * @dev: drm_devic structure to initialize |
90 | */ |
90 | */ |
91 | int |
91 | int |
92 | drm_gem_init(struct drm_device *dev) |
92 | drm_gem_init(struct drm_device *dev) |
93 | { |
93 | { |
94 | struct drm_vma_offset_manager *vma_offset_manager; |
94 | struct drm_vma_offset_manager *vma_offset_manager; |
95 | 95 | ||
96 | mutex_init(&dev->object_name_lock); |
96 | mutex_init(&dev->object_name_lock); |
97 | idr_init(&dev->object_name_idr); |
97 | idr_init(&dev->object_name_idr); |
98 | 98 | ||
99 | vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL); |
99 | vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL); |
100 | if (!vma_offset_manager) { |
100 | if (!vma_offset_manager) { |
101 | DRM_ERROR("out of memory\n"); |
101 | DRM_ERROR("out of memory\n"); |
102 | return -ENOMEM; |
102 | return -ENOMEM; |
103 | } |
103 | } |
104 | 104 | ||
105 | dev->vma_offset_manager = vma_offset_manager; |
105 | dev->vma_offset_manager = vma_offset_manager; |
106 | drm_vma_offset_manager_init(vma_offset_manager, |
106 | drm_vma_offset_manager_init(vma_offset_manager, |
107 | DRM_FILE_PAGE_OFFSET_START, |
107 | DRM_FILE_PAGE_OFFSET_START, |
108 | DRM_FILE_PAGE_OFFSET_SIZE); |
108 | DRM_FILE_PAGE_OFFSET_SIZE); |
109 | 109 | ||
110 | return 0; |
110 | return 0; |
111 | } |
111 | } |
112 | 112 | ||
113 | void |
113 | void |
114 | drm_gem_destroy(struct drm_device *dev) |
114 | drm_gem_destroy(struct drm_device *dev) |
115 | { |
115 | { |
116 | 116 | ||
117 | drm_vma_offset_manager_destroy(dev->vma_offset_manager); |
117 | drm_vma_offset_manager_destroy(dev->vma_offset_manager); |
118 | kfree(dev->vma_offset_manager); |
118 | kfree(dev->vma_offset_manager); |
119 | dev->vma_offset_manager = NULL; |
119 | dev->vma_offset_manager = NULL; |
120 | } |
120 | } |
121 | 121 | ||
122 | /** |
122 | /** |
123 | * drm_gem_object_init - initialize an allocated shmem-backed GEM object |
123 | * drm_gem_object_init - initialize an allocated shmem-backed GEM object |
124 | * @dev: drm_device the object should be initialized for |
124 | * @dev: drm_device the object should be initialized for |
125 | * @obj: drm_gem_object to initialize |
125 | * @obj: drm_gem_object to initialize |
126 | * @size: object size |
126 | * @size: object size |
127 | * |
127 | * |
128 | * Initialize an already allocated GEM object of the specified size with |
128 | * Initialize an already allocated GEM object of the specified size with |
129 | * shmfs backing store. |
129 | * shmfs backing store. |
130 | */ |
130 | */ |
131 | int drm_gem_object_init(struct drm_device *dev, |
131 | int drm_gem_object_init(struct drm_device *dev, |
132 | struct drm_gem_object *obj, size_t size) |
132 | struct drm_gem_object *obj, size_t size) |
133 | { |
133 | { |
134 | struct file *filp; |
134 | struct file *filp; |
135 | 135 | ||
136 | drm_gem_private_object_init(dev, obj, size); |
136 | drm_gem_private_object_init(dev, obj, size); |
137 | 137 | ||
138 | filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
138 | filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
139 | if (IS_ERR(filp)) |
139 | if (IS_ERR(filp)) |
140 | return PTR_ERR(filp); |
140 | return PTR_ERR(filp); |
141 | 141 | ||
142 | obj->filp = filp; |
142 | obj->filp = filp; |
143 | 143 | ||
144 | return 0; |
144 | return 0; |
145 | } |
145 | } |
146 | EXPORT_SYMBOL(drm_gem_object_init); |
146 | EXPORT_SYMBOL(drm_gem_object_init); |
147 | 147 | ||
148 | /** |
148 | /** |
149 | * drm_gem_private_object_init - initialize an allocated private GEM object |
149 | * drm_gem_private_object_init - initialize an allocated private GEM object |
150 | * @dev: drm_device the object should be initialized for |
150 | * @dev: drm_device the object should be initialized for |
151 | * @obj: drm_gem_object to initialize |
151 | * @obj: drm_gem_object to initialize |
152 | * @size: object size |
152 | * @size: object size |
153 | * |
153 | * |
154 | * Initialize an already allocated GEM object of the specified size with |
154 | * Initialize an already allocated GEM object of the specified size with |
155 | * no GEM provided backing store. Instead the caller is responsible for |
155 | * no GEM provided backing store. Instead the caller is responsible for |
156 | * backing the object and handling it. |
156 | * backing the object and handling it. |
157 | */ |
157 | */ |
158 | void drm_gem_private_object_init(struct drm_device *dev, |
158 | void drm_gem_private_object_init(struct drm_device *dev, |
159 | struct drm_gem_object *obj, size_t size) |
159 | struct drm_gem_object *obj, size_t size) |
160 | { |
160 | { |
161 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
161 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
162 | 162 | ||
163 | obj->dev = dev; |
163 | obj->dev = dev; |
164 | obj->filp = NULL; |
164 | obj->filp = NULL; |
165 | 165 | ||
166 | kref_init(&obj->refcount); |
166 | kref_init(&obj->refcount); |
167 | obj->handle_count = 0; |
167 | obj->handle_count = 0; |
168 | obj->size = size; |
168 | obj->size = size; |
169 | drm_vma_node_reset(&obj->vma_node); |
169 | drm_vma_node_reset(&obj->vma_node); |
170 | } |
170 | } |
171 | EXPORT_SYMBOL(drm_gem_private_object_init); |
171 | EXPORT_SYMBOL(drm_gem_private_object_init); |
- | 172 | ||
- | 173 | static void |
|
- | 174 | drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) |
|
- | 175 | { |
|
- | 176 | /* |
|
- | 177 | * Note: obj->dma_buf can't disappear as long as we still hold a |
|
- | 178 | * handle reference in obj->handle_count. |
|
- | 179 | */ |
|
- | 180 | } |
|
172 | 181 | ||
173 | /** |
182 | /** |
174 | * drm_gem_object_handle_free - release resources bound to userspace handles |
183 | * drm_gem_object_handle_free - release resources bound to userspace handles |
175 | * @obj: GEM object to clean up. |
184 | * @obj: GEM object to clean up. |
176 | * |
185 | * |
177 | * Called after the last handle to the object has been closed |
186 | * Called after the last handle to the object has been closed |
178 | * |
187 | * |
179 | * Removes any name for the object. Note that this must be |
188 | * Removes any name for the object. Note that this must be |
180 | * called before drm_gem_object_free or we'll be touching |
189 | * called before drm_gem_object_free or we'll be touching |
181 | * freed memory |
190 | * freed memory |
182 | */ |
191 | */ |
183 | static void drm_gem_object_handle_free(struct drm_gem_object *obj) |
192 | static void drm_gem_object_handle_free(struct drm_gem_object *obj) |
184 | { |
193 | { |
185 | struct drm_device *dev = obj->dev; |
194 | struct drm_device *dev = obj->dev; |
186 | 195 | ||
187 | /* Remove any name for this object */ |
196 | /* Remove any name for this object */ |
188 | if (obj->name) { |
197 | if (obj->name) { |
189 | idr_remove(&dev->object_name_idr, obj->name); |
198 | idr_remove(&dev->object_name_idr, obj->name); |
190 | obj->name = 0; |
199 | obj->name = 0; |
191 | } |
200 | } |
192 | } |
201 | } |
193 | 202 | ||
194 | 203 | ||
195 | static void |
204 | static void |
196 | drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) |
205 | drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) |
197 | { |
206 | { |
- | 207 | struct drm_device *dev = obj->dev; |
|
- | 208 | bool final = false; |
|
- | 209 | ||
198 | if (WARN_ON(obj->handle_count == 0)) |
210 | if (WARN_ON(obj->handle_count == 0)) |
199 | return; |
211 | return; |
200 | 212 | ||
201 | /* |
213 | /* |
202 | * Must bump handle count first as this may be the last |
214 | * Must bump handle count first as this may be the last |
203 | * ref, in which case the object would disappear before we |
215 | * ref, in which case the object would disappear before we |
204 | * checked for a name |
216 | * checked for a name |
205 | */ |
217 | */ |
206 | 218 | ||
207 | mutex_lock(&obj->dev->object_name_lock); |
219 | mutex_lock(&dev->object_name_lock); |
208 | if (--obj->handle_count == 0) { |
220 | if (--obj->handle_count == 0) { |
- | 221 | drm_gem_object_handle_free(obj); |
|
209 | drm_gem_object_handle_free(obj); |
222 | final = true; |
210 | } |
223 | } |
- | 224 | mutex_unlock(&dev->object_name_lock); |
|
211 | mutex_unlock(&obj->dev->object_name_lock); |
225 | |
212 | 226 | if (final) |
|
213 | drm_gem_object_unreference_unlocked(obj); |
227 | drm_gem_object_unreference_unlocked(obj); |
214 | } |
228 | } |
- | 229 | ||
- | 230 | /* |
|
- | 231 | * Called at device or object close to release the file's |
|
- | 232 | * handle references on objects. |
|
- | 233 | */ |
|
- | 234 | static int |
|
- | 235 | drm_gem_object_release_handle(int id, void *ptr, void *data) |
|
- | 236 | { |
|
- | 237 | struct drm_file *file_priv = data; |
|
- | 238 | struct drm_gem_object *obj = ptr; |
|
- | 239 | struct drm_device *dev = obj->dev; |
|
- | 240 | ||
- | 241 | if (drm_core_check_feature(dev, DRIVER_PRIME)) |
|
- | 242 | drm_gem_remove_prime_handles(obj, file_priv); |
|
- | 243 | // drm_vma_node_revoke(&obj->vma_node, file_priv->filp); |
|
- | 244 | ||
- | 245 | if (dev->driver->gem_close_object) |
|
- | 246 | dev->driver->gem_close_object(obj, file_priv); |
|
- | 247 | ||
- | 248 | drm_gem_object_handle_unreference_unlocked(obj); |
|
- | 249 | ||
- | 250 | return 0; |
|
- | 251 | } |
|
215 | 252 | ||
216 | /** |
253 | /** |
217 | * drm_gem_handle_delete - deletes the given file-private handle |
254 | * drm_gem_handle_delete - deletes the given file-private handle |
218 | * @filp: drm file-private structure to use for the handle look up |
255 | * @filp: drm file-private structure to use for the handle look up |
219 | * @handle: userspace handle to delete |
256 | * @handle: userspace handle to delete |
220 | * |
257 | * |
221 | * Removes the GEM handle from the @filp lookup table and if this is the last |
258 | * Removes the GEM handle from the @filp lookup table which has been added with |
- | 259 | * drm_gem_handle_create(). If this is the last handle also cleans up linked |
|
222 | * handle also cleans up linked resources like GEM names. |
260 | * resources like GEM names. |
223 | */ |
261 | */ |
224 | int |
262 | int |
225 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
263 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
226 | { |
264 | { |
227 | struct drm_device *dev; |
265 | struct drm_device *dev; |
228 | struct drm_gem_object *obj; |
266 | struct drm_gem_object *obj; |
229 | 267 | ||
230 | /* This is gross. The idr system doesn't let us try a delete and |
268 | /* This is gross. The idr system doesn't let us try a delete and |
231 | * return an error code. It just spews if you fail at deleting. |
269 | * return an error code. It just spews if you fail at deleting. |
232 | * So, we have to grab a lock around finding the object and then |
270 | * So, we have to grab a lock around finding the object and then |
233 | * doing the delete on it and dropping the refcount, or the user |
271 | * doing the delete on it and dropping the refcount, or the user |
234 | * could race us to double-decrement the refcount and cause a |
272 | * could race us to double-decrement the refcount and cause a |
235 | * use-after-free later. Given the frequency of our handle lookups, |
273 | * use-after-free later. Given the frequency of our handle lookups, |
236 | * we may want to use ida for number allocation and a hash table |
274 | * we may want to use ida for number allocation and a hash table |
237 | * for the pointers, anyway. |
275 | * for the pointers, anyway. |
238 | */ |
276 | */ |
239 | spin_lock(&filp->table_lock); |
277 | spin_lock(&filp->table_lock); |
240 | 278 | ||
241 | /* Check if we currently have a reference on the object */ |
279 | /* Check if we currently have a reference on the object */ |
242 | obj = idr_find(&filp->object_idr, handle); |
280 | obj = idr_find(&filp->object_idr, handle); |
243 | if (obj == NULL) { |
281 | if (obj == NULL) { |
244 | spin_unlock(&filp->table_lock); |
282 | spin_unlock(&filp->table_lock); |
245 | return -EINVAL; |
283 | return -EINVAL; |
246 | } |
284 | } |
247 | dev = obj->dev; |
285 | dev = obj->dev; |
248 | 286 | ||
249 | /* Release reference and decrement refcount. */ |
287 | /* Release reference and decrement refcount. */ |
250 | idr_remove(&filp->object_idr, handle); |
288 | idr_remove(&filp->object_idr, handle); |
251 | spin_unlock(&filp->table_lock); |
289 | spin_unlock(&filp->table_lock); |
252 | - | ||
253 | // drm_vma_node_revoke(&obj->vma_node, filp->filp); |
- | |
254 | - | ||
255 | if (dev->driver->gem_close_object) |
- | |
256 | dev->driver->gem_close_object(obj, filp); |
- | |
257 | 290 | ||
258 | drm_gem_object_handle_unreference_unlocked(obj); |
291 | drm_gem_object_release_handle(handle, obj, filp); |
259 | return 0; |
292 | return 0; |
260 | } |
293 | } |
261 | EXPORT_SYMBOL(drm_gem_handle_delete); |
294 | EXPORT_SYMBOL(drm_gem_handle_delete); |
262 | 295 | ||
263 | /** |
296 | /** |
264 | * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers |
297 | * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers |
265 | * @file: drm file-private structure to remove the dumb handle from |
298 | * @file: drm file-private structure to remove the dumb handle from |
266 | * @dev: corresponding drm_device |
299 | * @dev: corresponding drm_device |
267 | * @handle: the dumb handle to remove |
300 | * @handle: the dumb handle to remove |
268 | * |
301 | * |
269 | * This implements the ->dumb_destroy kms driver callback for drivers which use |
302 | * This implements the ->dumb_destroy kms driver callback for drivers which use |
270 | * gem to manage their backing storage. |
303 | * gem to manage their backing storage. |
271 | */ |
304 | */ |
272 | int drm_gem_dumb_destroy(struct drm_file *file, |
305 | int drm_gem_dumb_destroy(struct drm_file *file, |
273 | struct drm_device *dev, |
306 | struct drm_device *dev, |
274 | uint32_t handle) |
307 | uint32_t handle) |
275 | { |
308 | { |
276 | return drm_gem_handle_delete(file, handle); |
309 | return drm_gem_handle_delete(file, handle); |
277 | } |
310 | } |
278 | EXPORT_SYMBOL(drm_gem_dumb_destroy); |
311 | EXPORT_SYMBOL(drm_gem_dumb_destroy); |
279 | 312 | ||
280 | /** |
313 | /** |
281 | * drm_gem_handle_create_tail - internal functions to create a handle |
314 | * drm_gem_handle_create_tail - internal functions to create a handle |
282 | * @file_priv: drm file-private structure to register the handle for |
315 | * @file_priv: drm file-private structure to register the handle for |
283 | * @obj: object to register |
316 | * @obj: object to register |
284 | * @handlep: pointer to return the created handle to the caller |
317 | * @handlep: pointer to return the created handle to the caller |
285 | * |
318 | * |
286 | * This expects the dev->object_name_lock to be held already and will drop it |
319 | * This expects the dev->object_name_lock to be held already and will drop it |
287 | * before returning. Used to avoid races in establishing new handles when |
320 | * before returning. Used to avoid races in establishing new handles when |
288 | * importing an object from either an flink name or a dma-buf. |
321 | * importing an object from either an flink name or a dma-buf. |
- | 322 | * |
|
- | 323 | * Handles must be release again through drm_gem_handle_delete(). This is done |
|
- | 324 | * when userspace closes @file_priv for all attached handles, or through the |
|
- | 325 | * GEM_CLOSE ioctl for individual handles. |
|
289 | */ |
326 | */ |
290 | int |
327 | int |
291 | drm_gem_handle_create_tail(struct drm_file *file_priv, |
328 | drm_gem_handle_create_tail(struct drm_file *file_priv, |
292 | struct drm_gem_object *obj, |
329 | struct drm_gem_object *obj, |
293 | u32 *handlep) |
330 | u32 *handlep) |
294 | { |
331 | { |
295 | struct drm_device *dev = obj->dev; |
332 | struct drm_device *dev = obj->dev; |
- | 333 | u32 handle; |
|
296 | int ret; |
334 | int ret; |
297 | 335 | ||
298 | WARN_ON(!mutex_is_locked(&dev->object_name_lock)); |
336 | WARN_ON(!mutex_is_locked(&dev->object_name_lock)); |
- | 337 | if (obj->handle_count++ == 0) |
|
- | 338 | drm_gem_object_reference(obj); |
|
299 | 339 | ||
300 | /* |
340 | /* |
301 | * Get the user-visible handle using idr. Preload and perform |
341 | * Get the user-visible handle using idr. Preload and perform |
302 | * allocation under our spinlock. |
342 | * allocation under our spinlock. |
303 | */ |
343 | */ |
304 | idr_preload(GFP_KERNEL); |
344 | idr_preload(GFP_KERNEL); |
305 | spin_lock(&file_priv->table_lock); |
345 | spin_lock(&file_priv->table_lock); |
306 | 346 | ||
307 | ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); |
347 | ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); |
308 | drm_gem_object_reference(obj); |
- | |
309 | obj->handle_count++; |
- | |
- | 348 | ||
310 | spin_unlock(&file_priv->table_lock); |
349 | spin_unlock(&file_priv->table_lock); |
311 | idr_preload_end(); |
350 | idr_preload_end(); |
- | 351 | ||
312 | mutex_unlock(&dev->object_name_lock); |
352 | mutex_unlock(&dev->object_name_lock); |
313 | if (ret < 0) |
353 | if (ret < 0) |
314 | goto err_unref; |
354 | goto err_unref; |
315 | 355 | ||
316 | *handlep = ret; |
356 | handle = ret; |
317 | 357 | ||
318 | // ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp); |
358 | // ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp); |
319 | // if (ret) { |
359 | // if (ret) { |
320 | // drm_gem_handle_delete(file_priv, *handlep); |
360 | // drm_gem_handle_delete(file_priv, *handlep); |
321 | // return ret; |
361 | // return ret; |
322 | // } |
362 | // } |
323 | 363 | ||
324 | if (dev->driver->gem_open_object) { |
364 | if (dev->driver->gem_open_object) { |
325 | ret = dev->driver->gem_open_object(obj, file_priv); |
365 | ret = dev->driver->gem_open_object(obj, file_priv); |
326 | if (ret) |
366 | if (ret) |
327 | goto err_revoke; |
367 | goto err_revoke; |
328 | } |
368 | } |
- | 369 | ||
329 | 370 | *handlep = handle; |
|
330 | return 0; |
371 | return 0; |
331 | 372 | ||
332 | err_revoke: |
373 | err_revoke: |
333 | // drm_vma_node_revoke(&obj->vma_node, file_priv->filp); |
374 | // drm_vma_node_revoke(&obj->vma_node, file_priv->filp); |
334 | err_remove: |
375 | err_remove: |
335 | spin_lock(&file_priv->table_lock); |
376 | spin_lock(&file_priv->table_lock); |
336 | idr_remove(&file_priv->object_idr, *handlep); |
377 | idr_remove(&file_priv->object_idr, handle); |
337 | spin_unlock(&file_priv->table_lock); |
378 | spin_unlock(&file_priv->table_lock); |
338 | err_unref: |
379 | err_unref: |
339 | drm_gem_object_handle_unreference_unlocked(obj); |
380 | drm_gem_object_handle_unreference_unlocked(obj); |
340 | return ret; |
381 | return ret; |
341 | } |
382 | } |
342 | 383 | ||
343 | /** |
384 | /** |
344 | * drm_gem_handle_create - create a gem handle for an object |
385 | * drm_gem_handle_create - create a gem handle for an object |
345 | * @file_priv: drm file-private structure to register the handle for |
386 | * @file_priv: drm file-private structure to register the handle for |
346 | * @obj: object to register |
387 | * @obj: object to register |
347 | * @handlep: pionter to return the created handle to the caller |
388 | * @handlep: pionter to return the created handle to the caller |
348 | * |
389 | * |
349 | * Create a handle for this object. This adds a handle reference |
390 | * Create a handle for this object. This adds a handle reference |
350 | * to the object, which includes a regular reference count. Callers |
391 | * to the object, which includes a regular reference count. Callers |
351 | * will likely want to dereference the object afterwards. |
392 | * will likely want to dereference the object afterwards. |
352 | */ |
393 | */ |
353 | int drm_gem_handle_create(struct drm_file *file_priv, |
394 | int drm_gem_handle_create(struct drm_file *file_priv, |
354 | struct drm_gem_object *obj, |
395 | struct drm_gem_object *obj, |
355 | u32 *handlep) |
396 | u32 *handlep) |
356 | { |
397 | { |
357 | mutex_lock(&obj->dev->object_name_lock); |
398 | mutex_lock(&obj->dev->object_name_lock); |
358 | 399 | ||
359 | return drm_gem_handle_create_tail(file_priv, obj, handlep); |
400 | return drm_gem_handle_create_tail(file_priv, obj, handlep); |
360 | } |
401 | } |
361 | EXPORT_SYMBOL(drm_gem_handle_create); |
402 | EXPORT_SYMBOL(drm_gem_handle_create); |
362 | 403 | ||
363 | #if 0 |
404 | #if 0 |
364 | /** |
405 | /** |
365 | * drm_gem_free_mmap_offset - release a fake mmap offset for an object |
406 | * drm_gem_free_mmap_offset - release a fake mmap offset for an object |
366 | * @obj: obj in question |
407 | * @obj: obj in question |
367 | * |
408 | * |
368 | * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). |
409 | * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). |
369 | */ |
410 | */ |
370 | void |
411 | void |
371 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) |
412 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) |
372 | { |
413 | { |
373 | struct drm_device *dev = obj->dev; |
414 | struct drm_device *dev = obj->dev; |
374 | 415 | ||
375 | drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); |
416 | drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); |
376 | } |
417 | } |
377 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); |
418 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); |
378 | 419 | ||
379 | /** |
420 | /** |
380 | * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object |
421 | * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object |
381 | * @obj: obj in question |
422 | * @obj: obj in question |
382 | * @size: the virtual size |
423 | * @size: the virtual size |
383 | * |
424 | * |
384 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
425 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
385 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
426 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
386 | * up the object based on the offset and sets up the various memory mapping |
427 | * up the object based on the offset and sets up the various memory mapping |
387 | * structures. |
428 | * structures. |
388 | * |
429 | * |
389 | * This routine allocates and attaches a fake offset for @obj, in cases where |
430 | * This routine allocates and attaches a fake offset for @obj, in cases where |
390 | * the virtual size differs from the physical size (ie. obj->size). Otherwise |
431 | * the virtual size differs from the physical size (ie. obj->size). Otherwise |
391 | * just use drm_gem_create_mmap_offset(). |
432 | * just use drm_gem_create_mmap_offset(). |
392 | */ |
433 | */ |
393 | int |
434 | int |
394 | drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) |
435 | drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) |
395 | { |
436 | { |
396 | struct drm_device *dev = obj->dev; |
437 | struct drm_device *dev = obj->dev; |
397 | 438 | ||
398 | return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, |
439 | return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, |
399 | size / PAGE_SIZE); |
440 | size / PAGE_SIZE); |
400 | } |
441 | } |
401 | EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); |
442 | EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); |
402 | 443 | ||
403 | /** |
444 | /** |
404 | * drm_gem_create_mmap_offset - create a fake mmap offset for an object |
445 | * drm_gem_create_mmap_offset - create a fake mmap offset for an object |
405 | * @obj: obj in question |
446 | * @obj: obj in question |
406 | * |
447 | * |
407 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
448 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
408 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
449 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
409 | * up the object based on the offset and sets up the various memory mapping |
450 | * up the object based on the offset and sets up the various memory mapping |
410 | * structures. |
451 | * structures. |
411 | * |
452 | * |
412 | * This routine allocates and attaches a fake offset for @obj. |
453 | * This routine allocates and attaches a fake offset for @obj. |
413 | */ |
454 | */ |
414 | int drm_gem_create_mmap_offset(struct drm_gem_object *obj) |
455 | int drm_gem_create_mmap_offset(struct drm_gem_object *obj) |
415 | { |
456 | { |
416 | return drm_gem_create_mmap_offset_size(obj, obj->size); |
457 | return drm_gem_create_mmap_offset_size(obj, obj->size); |
417 | } |
458 | } |
418 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); |
459 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); |
419 | 460 | ||
420 | /** |
461 | /** |
421 | * drm_gem_get_pages - helper to allocate backing pages for a GEM object |
462 | * drm_gem_get_pages - helper to allocate backing pages for a GEM object |
422 | * from shmem |
463 | * from shmem |
423 | * @obj: obj in question |
464 | * @obj: obj in question |
424 | * |
465 | * |
425 | * This reads the page-array of the shmem-backing storage of the given gem |
466 | * This reads the page-array of the shmem-backing storage of the given gem |
426 | * object. An array of pages is returned. If a page is not allocated or |
467 | * object. An array of pages is returned. If a page is not allocated or |
427 | * swapped-out, this will allocate/swap-in the required pages. Note that the |
468 | * swapped-out, this will allocate/swap-in the required pages. Note that the |
428 | * whole object is covered by the page-array and pinned in memory. |
469 | * whole object is covered by the page-array and pinned in memory. |
429 | * |
470 | * |
430 | * Use drm_gem_put_pages() to release the array and unpin all pages. |
471 | * Use drm_gem_put_pages() to release the array and unpin all pages. |
431 | * |
472 | * |
432 | * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). |
473 | * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). |
433 | * If you require other GFP-masks, you have to do those allocations yourself. |
474 | * If you require other GFP-masks, you have to do those allocations yourself. |
434 | * |
475 | * |
435 | * Note that you are not allowed to change gfp-zones during runtime. That is, |
476 | * Note that you are not allowed to change gfp-zones during runtime. That is, |
436 | * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as |
477 | * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as |
437 | * set during initialization. If you have special zone constraints, set them |
478 | * set during initialization. If you have special zone constraints, set them |
438 | * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care |
479 | * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care |
439 | * to keep pages in the required zone during swap-in. |
480 | * to keep pages in the required zone during swap-in. |
440 | */ |
481 | */ |
441 | struct page **drm_gem_get_pages(struct drm_gem_object *obj) |
482 | struct page **drm_gem_get_pages(struct drm_gem_object *obj) |
442 | { |
483 | { |
443 | struct address_space *mapping; |
484 | struct address_space *mapping; |
444 | struct page *p, **pages; |
485 | struct page *p, **pages; |
445 | int i, npages; |
486 | int i, npages; |
446 | 487 | ||
447 | /* This is the shared memory object that backs the GEM resource */ |
488 | /* This is the shared memory object that backs the GEM resource */ |
448 | mapping = file_inode(obj->filp)->i_mapping; |
489 | mapping = file_inode(obj->filp)->i_mapping; |
449 | 490 | ||
450 | /* We already BUG_ON() for non-page-aligned sizes in |
491 | /* We already BUG_ON() for non-page-aligned sizes in |
451 | * drm_gem_object_init(), so we should never hit this unless |
492 | * drm_gem_object_init(), so we should never hit this unless |
452 | * driver author is doing something really wrong: |
493 | * driver author is doing something really wrong: |
453 | */ |
494 | */ |
454 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); |
495 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); |
455 | 496 | ||
456 | npages = obj->size >> PAGE_SHIFT; |
497 | npages = obj->size >> PAGE_SHIFT; |
457 | 498 | ||
458 | pages = drm_malloc_ab(npages, sizeof(struct page *)); |
499 | pages = drm_malloc_ab(npages, sizeof(struct page *)); |
459 | if (pages == NULL) |
500 | if (pages == NULL) |
460 | return ERR_PTR(-ENOMEM); |
501 | return ERR_PTR(-ENOMEM); |
461 | 502 | ||
462 | for (i = 0; i < npages; i++) { |
503 | for (i = 0; i < npages; i++) { |
463 | p = shmem_read_mapping_page(mapping, i); |
504 | p = shmem_read_mapping_page(mapping, i); |
464 | if (IS_ERR(p)) |
505 | if (IS_ERR(p)) |
465 | goto fail; |
506 | goto fail; |
466 | pages[i] = p; |
507 | pages[i] = p; |
467 | 508 | ||
468 | /* Make sure shmem keeps __GFP_DMA32 allocated pages in the |
509 | /* Make sure shmem keeps __GFP_DMA32 allocated pages in the |
469 | * correct region during swapin. Note that this requires |
510 | * correct region during swapin. Note that this requires |
470 | * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) |
511 | * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) |
471 | * so shmem can relocate pages during swapin if required. |
512 | * so shmem can relocate pages during swapin if required. |
472 | */ |
513 | */ |
473 | BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && |
514 | BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && |
474 | (page_to_pfn(p) >= 0x00100000UL)); |
515 | (page_to_pfn(p) >= 0x00100000UL)); |
475 | } |
516 | } |
476 | 517 | ||
477 | return pages; |
518 | return pages; |
478 | 519 | ||
479 | fail: |
520 | fail: |
480 | while (i--) |
521 | while (i--) |
481 | page_cache_release(pages[i]); |
522 | page_cache_release(pages[i]); |
482 | 523 | ||
483 | drm_free_large(pages); |
524 | drm_free_large(pages); |
484 | return ERR_CAST(p); |
525 | return ERR_CAST(p); |
485 | } |
526 | } |
486 | EXPORT_SYMBOL(drm_gem_get_pages); |
527 | EXPORT_SYMBOL(drm_gem_get_pages); |
487 | 528 | ||
488 | /** |
529 | /** |
489 | * drm_gem_put_pages - helper to free backing pages for a GEM object |
530 | * drm_gem_put_pages - helper to free backing pages for a GEM object |
490 | * @obj: obj in question |
531 | * @obj: obj in question |
491 | * @pages: pages to free |
532 | * @pages: pages to free |
492 | * @dirty: if true, pages will be marked as dirty |
533 | * @dirty: if true, pages will be marked as dirty |
493 | * @accessed: if true, the pages will be marked as accessed |
534 | * @accessed: if true, the pages will be marked as accessed |
494 | */ |
535 | */ |
495 | void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, |
536 | void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, |
496 | bool dirty, bool accessed) |
537 | bool dirty, bool accessed) |
497 | { |
538 | { |
498 | int i, npages; |
539 | int i, npages; |
499 | 540 | ||
500 | /* We already BUG_ON() for non-page-aligned sizes in |
541 | /* We already BUG_ON() for non-page-aligned sizes in |
501 | * drm_gem_object_init(), so we should never hit this unless |
542 | * drm_gem_object_init(), so we should never hit this unless |
502 | * driver author is doing something really wrong: |
543 | * driver author is doing something really wrong: |
503 | */ |
544 | */ |
504 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); |
545 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); |
505 | 546 | ||
506 | npages = obj->size >> PAGE_SHIFT; |
547 | npages = obj->size >> PAGE_SHIFT; |
507 | 548 | ||
508 | for (i = 0; i < npages; i++) { |
549 | for (i = 0; i < npages; i++) { |
509 | if (dirty) |
550 | if (dirty) |
510 | set_page_dirty(pages[i]); |
551 | set_page_dirty(pages[i]); |
511 | 552 | ||
512 | if (accessed) |
553 | if (accessed) |
513 | mark_page_accessed(pages[i]); |
554 | mark_page_accessed(pages[i]); |
514 | 555 | ||
515 | /* Undo the reference we took when populating the table */ |
556 | /* Undo the reference we took when populating the table */ |
516 | page_cache_release(pages[i]); |
557 | page_cache_release(pages[i]); |
517 | } |
558 | } |
518 | 559 | ||
519 | drm_free_large(pages); |
560 | drm_free_large(pages); |
520 | } |
561 | } |
521 | EXPORT_SYMBOL(drm_gem_put_pages); |
562 | EXPORT_SYMBOL(drm_gem_put_pages); |
522 | #endif |
563 | #endif |
- | 564 | ||
- | 565 | /** |
|
- | 566 | * drm_gem_object_lookup - look up a GEM object from it's handle |
|
- | 567 | * @dev: DRM device |
|
- | 568 | * @filp: DRM file private date |
|
- | 569 | * @handle: userspace handle |
|
- | 570 | * |
|
- | 571 | * Returns: |
|
523 | 572 | * |
|
- | 573 | * A reference to the object named by the handle if such exists on @filp, NULL |
|
- | 574 | * otherwise. |
|
524 | /** Returns a reference to the object named by the handle. */ |
575 | */ |
525 | struct drm_gem_object * |
576 | struct drm_gem_object * |
526 | drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, |
577 | drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, |
527 | u32 handle) |
578 | u32 handle) |
528 | { |
579 | { |
529 | struct drm_gem_object *obj; |
580 | struct drm_gem_object *obj; |
530 | 581 | ||
531 | spin_lock(&filp->table_lock); |
582 | spin_lock(&filp->table_lock); |
532 | 583 | ||
533 | /* Check if we currently have a reference on the object */ |
584 | /* Check if we currently have a reference on the object */ |
534 | obj = idr_find(&filp->object_idr, handle); |
585 | obj = idr_find(&filp->object_idr, handle); |
535 | if (obj == NULL) { |
586 | if (obj == NULL) { |
536 | spin_unlock(&filp->table_lock); |
587 | spin_unlock(&filp->table_lock); |
537 | return NULL; |
588 | return NULL; |
538 | } |
589 | } |
539 | 590 | ||
540 | drm_gem_object_reference(obj); |
591 | drm_gem_object_reference(obj); |
541 | 592 | ||
542 | spin_unlock(&filp->table_lock); |
593 | spin_unlock(&filp->table_lock); |
543 | 594 | ||
544 | return obj; |
595 | return obj; |
545 | } |
596 | } |
546 | EXPORT_SYMBOL(drm_gem_object_lookup); |
597 | EXPORT_SYMBOL(drm_gem_object_lookup); |
547 | 598 | ||
548 | /** |
599 | /** |
549 | * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl |
600 | * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl |
550 | * @dev: drm_device |
601 | * @dev: drm_device |
551 | * @data: ioctl data |
602 | * @data: ioctl data |
552 | * @file_priv: drm file-private structure |
603 | * @file_priv: drm file-private structure |
553 | * |
604 | * |
554 | * Releases the handle to an mm object. |
605 | * Releases the handle to an mm object. |
555 | */ |
606 | */ |
556 | int |
607 | int |
557 | drm_gem_close_ioctl(struct drm_device *dev, void *data, |
608 | drm_gem_close_ioctl(struct drm_device *dev, void *data, |
558 | struct drm_file *file_priv) |
609 | struct drm_file *file_priv) |
559 | { |
610 | { |
560 | struct drm_gem_close *args = data; |
611 | struct drm_gem_close *args = data; |
561 | int ret; |
612 | int ret; |
562 | 613 | ||
563 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
614 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
564 | return -ENODEV; |
615 | return -ENODEV; |
565 | 616 | ||
566 | ret = drm_gem_handle_delete(file_priv, args->handle); |
617 | ret = drm_gem_handle_delete(file_priv, args->handle); |
567 | 618 | ||
568 | return ret; |
619 | return ret; |
569 | } |
620 | } |
570 | 621 | ||
571 | /** |
622 | /** |
572 | * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl |
623 | * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl |
573 | * @dev: drm_device |
624 | * @dev: drm_device |
574 | * @data: ioctl data |
625 | * @data: ioctl data |
575 | * @file_priv: drm file-private structure |
626 | * @file_priv: drm file-private structure |
576 | * |
627 | * |
577 | * Create a global name for an object, returning the name. |
628 | * Create a global name for an object, returning the name. |
578 | * |
629 | * |
579 | * Note that the name does not hold a reference; when the object |
630 | * Note that the name does not hold a reference; when the object |
580 | * is freed, the name goes away. |
631 | * is freed, the name goes away. |
581 | */ |
632 | */ |
582 | int |
633 | int |
583 | drm_gem_flink_ioctl(struct drm_device *dev, void *data, |
634 | drm_gem_flink_ioctl(struct drm_device *dev, void *data, |
584 | struct drm_file *file_priv) |
635 | struct drm_file *file_priv) |
585 | { |
636 | { |
586 | struct drm_gem_flink *args = data; |
637 | struct drm_gem_flink *args = data; |
587 | struct drm_gem_object *obj; |
638 | struct drm_gem_object *obj; |
588 | int ret; |
639 | int ret; |
589 | 640 | ||
590 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
641 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
591 | return -ENODEV; |
642 | return -ENODEV; |
592 | 643 | ||
593 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
644 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
594 | if (obj == NULL) |
645 | if (obj == NULL) |
595 | return -ENOENT; |
646 | return -ENOENT; |
596 | 647 | ||
597 | mutex_lock(&dev->object_name_lock); |
648 | mutex_lock(&dev->object_name_lock); |
598 | idr_preload(GFP_KERNEL); |
- | |
599 | /* prevent races with concurrent gem_close. */ |
649 | /* prevent races with concurrent gem_close. */ |
600 | if (obj->handle_count == 0) { |
650 | if (obj->handle_count == 0) { |
601 | ret = -ENOENT; |
651 | ret = -ENOENT; |
602 | goto err; |
652 | goto err; |
603 | } |
653 | } |
604 | 654 | ||
605 | if (!obj->name) { |
655 | if (!obj->name) { |
606 | ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); |
656 | ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); |
607 | if (ret < 0) |
657 | if (ret < 0) |
608 | goto err; |
658 | goto err; |
609 | 659 | ||
610 | obj->name = ret; |
660 | obj->name = ret; |
611 | } |
661 | } |
612 | 662 | ||
613 | args->name = (uint64_t) obj->name; |
663 | args->name = (uint64_t) obj->name; |
614 | ret = 0; |
664 | ret = 0; |
615 | 665 | ||
616 | err: |
666 | err: |
617 | idr_preload_end(); |
- | |
618 | mutex_unlock(&dev->object_name_lock); |
667 | mutex_unlock(&dev->object_name_lock); |
619 | drm_gem_object_unreference_unlocked(obj); |
668 | drm_gem_object_unreference_unlocked(obj); |
- | 669 | ||
- | 670 | // printf("%s object %p name %d refcount %d\n", |
|
- | 671 | // __FUNCTION__, obj, obj->name, obj->refcount.refcount); |
|
- | 672 | ||
620 | return ret; |
673 | return ret; |
621 | } |
674 | } |
622 | 675 | ||
623 | /** |
676 | /** |
624 | * drm_gem_open - implementation of the GEM_OPEN ioctl |
677 | * drm_gem_open - implementation of the GEM_OPEN ioctl |
625 | * @dev: drm_device |
678 | * @dev: drm_device |
626 | * @data: ioctl data |
679 | * @data: ioctl data |
627 | * @file_priv: drm file-private structure |
680 | * @file_priv: drm file-private structure |
628 | * |
681 | * |
629 | * Open an object using the global name, returning a handle and the size. |
682 | * Open an object using the global name, returning a handle and the size. |
630 | * |
683 | * |
631 | * This handle (of course) holds a reference to the object, so the object |
684 | * This handle (of course) holds a reference to the object, so the object |
632 | * will not go away until the handle is deleted. |
685 | * will not go away until the handle is deleted. |
633 | */ |
686 | */ |
634 | int |
687 | int |
635 | drm_gem_open_ioctl(struct drm_device *dev, void *data, |
688 | drm_gem_open_ioctl(struct drm_device *dev, void *data, |
636 | struct drm_file *file_priv) |
689 | struct drm_file *file_priv) |
637 | { |
690 | { |
638 | struct drm_gem_open *args = data; |
691 | struct drm_gem_open *args = data; |
639 | struct drm_gem_object *obj; |
692 | struct drm_gem_object *obj; |
640 | int ret; |
693 | int ret; |
641 | u32 handle; |
694 | u32 handle; |
642 | 695 | ||
643 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
696 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
644 | return -ENODEV; |
697 | return -ENODEV; |
645 | 698 | ||
646 | mutex_lock(&dev->object_name_lock); |
699 | mutex_lock(&dev->object_name_lock); |
647 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
700 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
648 | if (obj) { |
701 | if (obj) { |
649 | drm_gem_object_reference(obj); |
702 | drm_gem_object_reference(obj); |
650 | } else { |
703 | } else { |
651 | mutex_unlock(&dev->object_name_lock); |
704 | mutex_unlock(&dev->object_name_lock); |
652 | return -ENOENT; |
705 | return -ENOENT; |
653 | } |
706 | } |
654 | 707 | ||
655 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ |
708 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ |
656 | ret = drm_gem_handle_create_tail(file_priv, obj, &handle); |
709 | ret = drm_gem_handle_create_tail(file_priv, obj, &handle); |
657 | drm_gem_object_unreference_unlocked(obj); |
710 | drm_gem_object_unreference_unlocked(obj); |
658 | if (ret) |
711 | if (ret) |
659 | return ret; |
712 | return ret; |
660 | 713 | ||
661 | args->handle = handle; |
714 | args->handle = handle; |
662 | args->size = obj->size; |
715 | args->size = obj->size; |
- | 716 | ||
- | 717 | // printf("%s object %p handle %d refcount %d\n", |
|
- | 718 | // __FUNCTION__, obj, handle, obj->refcount.refcount); |
|
663 | 719 | ||
664 | return 0; |
720 | return 0; |
665 | } |
721 | } |
666 | 722 | ||
667 | #if 0 |
723 | #if 0 |
668 | /** |
724 | /** |
669 | * gem_gem_open - initalizes GEM file-private structures at devnode open time |
725 | * gem_gem_open - initalizes GEM file-private structures at devnode open time |
670 | * @dev: drm_device which is being opened by userspace |
726 | * @dev: drm_device which is being opened by userspace |
671 | * @file_private: drm file-private structure to set up |
727 | * @file_private: drm file-private structure to set up |
672 | * |
728 | * |
673 | * Called at device open time, sets up the structure for handling refcounting |
729 | * Called at device open time, sets up the structure for handling refcounting |
674 | * of mm objects. |
730 | * of mm objects. |
675 | */ |
731 | */ |
676 | void |
732 | void |
677 | drm_gem_open(struct drm_device *dev, struct drm_file *file_private) |
733 | drm_gem_open(struct drm_device *dev, struct drm_file *file_private) |
678 | { |
734 | { |
679 | idr_init(&file_private->object_idr); |
735 | idr_init(&file_private->object_idr); |
680 | spin_lock_init(&file_private->table_lock); |
736 | spin_lock_init(&file_private->table_lock); |
681 | } |
737 | } |
682 | - | ||
683 | /* |
- | |
684 | * Called at device close to release the file's |
- | |
685 | * handle references on objects. |
- | |
686 | */ |
- | |
687 | static int |
- | |
688 | drm_gem_object_release_handle(int id, void *ptr, void *data) |
- | |
689 | { |
- | |
690 | struct drm_file *file_priv = data; |
- | |
691 | struct drm_gem_object *obj = ptr; |
- | |
692 | struct drm_device *dev = obj->dev; |
- | |
693 | - | ||
694 | drm_vma_node_revoke(&obj->vma_node, file_priv->filp); |
- | |
695 | - | ||
696 | if (dev->driver->gem_close_object) |
- | |
697 | dev->driver->gem_close_object(obj, file_priv); |
- | |
698 | - | ||
699 | drm_gem_object_handle_unreference_unlocked(obj); |
- | |
700 | - | ||
701 | return 0; |
- | |
702 | } |
- | |
703 | 738 | ||
704 | /** |
739 | /** |
705 | * drm_gem_release - release file-private GEM resources |
740 | * drm_gem_release - release file-private GEM resources |
706 | * @dev: drm_device which is being closed by userspace |
741 | * @dev: drm_device which is being closed by userspace |
707 | * @file_private: drm file-private structure to clean up |
742 | * @file_private: drm file-private structure to clean up |
708 | * |
743 | * |
709 | * Called at close time when the filp is going away. |
744 | * Called at close time when the filp is going away. |
710 | * |
745 | * |
711 | * Releases any remaining references on objects by this filp. |
746 | * Releases any remaining references on objects by this filp. |
712 | */ |
747 | */ |
713 | void |
748 | void |
714 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) |
749 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) |
715 | { |
750 | { |
716 | idr_for_each(&file_private->object_idr, |
751 | idr_for_each(&file_private->object_idr, |
717 | &drm_gem_object_release_handle, file_private); |
752 | &drm_gem_object_release_handle, file_private); |
718 | idr_destroy(&file_private->object_idr); |
753 | idr_destroy(&file_private->object_idr); |
719 | } |
754 | } |
720 | #endif |
755 | #endif |
721 | 756 | ||
722 | void |
757 | void |
723 | drm_gem_object_release(struct drm_gem_object *obj) |
758 | drm_gem_object_release(struct drm_gem_object *obj) |
724 | { |
759 | { |
725 | WARN_ON(obj->dma_buf); |
760 | WARN_ON(obj->dma_buf); |
726 | 761 | ||
727 | if (obj->filp) |
762 | if (obj->filp) |
728 | free(obj->filp); |
763 | free(obj->filp); |
729 | } |
764 | } |
730 | EXPORT_SYMBOL(drm_gem_object_release); |
765 | EXPORT_SYMBOL(drm_gem_object_release); |
731 | 766 | ||
732 | /** |
767 | /** |
733 | * drm_gem_object_free - free a GEM object |
768 | * drm_gem_object_free - free a GEM object |
734 | * @kref: kref of the object to free |
769 | * @kref: kref of the object to free |
735 | * |
770 | * |
736 | * Called after the last reference to the object has been lost. |
771 | * Called after the last reference to the object has been lost. |
737 | * Must be called holding struct_ mutex |
772 | * Must be called holding struct_ mutex |
738 | * |
773 | * |
739 | * Frees the object |
774 | * Frees the object |
740 | */ |
775 | */ |
741 | void |
776 | void |
742 | drm_gem_object_free(struct kref *kref) |
777 | drm_gem_object_free(struct kref *kref) |
743 | { |
778 | { |
744 | struct drm_gem_object *obj = |
779 | struct drm_gem_object *obj = |
745 | container_of(kref, struct drm_gem_object, refcount); |
780 | container_of(kref, struct drm_gem_object, refcount); |
746 | struct drm_device *dev = obj->dev; |
781 | struct drm_device *dev = obj->dev; |
747 | 782 | ||
748 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
783 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
749 | 784 | ||
750 | if (dev->driver->gem_free_object != NULL) |
785 | if (dev->driver->gem_free_object != NULL) |
751 | dev->driver->gem_free_object(obj); |
786 | dev->driver->gem_free_object(obj); |
752 | } |
787 | } |
753 | EXPORT_SYMBOL(drm_gem_object_free); |
788 | EXPORT_SYMBOL(drm_gem_object_free); |
754 | 789 | ||
755 | 790 | ||
756 | #if 0 |
791 | #if 0 |
757 | void drm_gem_vm_open(struct vm_area_struct *vma) |
792 | void drm_gem_vm_open(struct vm_area_struct *vma) |
758 | { |
793 | { |
759 | struct drm_gem_object *obj = vma->vm_private_data; |
794 | struct drm_gem_object *obj = vma->vm_private_data; |
760 | 795 | ||
761 | drm_gem_object_reference(obj); |
796 | drm_gem_object_reference(obj); |
762 | } |
797 | } |
763 | EXPORT_SYMBOL(drm_gem_vm_open); |
798 | EXPORT_SYMBOL(drm_gem_vm_open); |
764 | 799 | ||
765 | void drm_gem_vm_close(struct vm_area_struct *vma) |
800 | void drm_gem_vm_close(struct vm_area_struct *vma) |
766 | { |
801 | { |
767 | struct drm_gem_object *obj = vma->vm_private_data; |
802 | struct drm_gem_object *obj = vma->vm_private_data; |
768 | struct drm_device *dev = obj->dev; |
803 | struct drm_device *dev = obj->dev; |
769 | 804 | ||
770 | mutex_lock(&dev->struct_mutex); |
805 | mutex_lock(&dev->struct_mutex); |
771 | drm_vm_close_locked(obj->dev, vma); |
806 | drm_vm_close_locked(obj->dev, vma); |
772 | drm_gem_object_unreference(obj); |
807 | drm_gem_object_unreference(obj); |
773 | mutex_unlock(&dev->struct_mutex); |
808 | mutex_unlock(&dev->struct_mutex); |
774 | } |
809 | } |
775 | EXPORT_SYMBOL(drm_gem_vm_close); |
810 | EXPORT_SYMBOL(drm_gem_vm_close); |
776 | 811 | ||
777 | #endif>>>> |
812 | #endif>>>> |