Rev 6131 | Rev 6935 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6131 | Rev 6660 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright © 2008 Intel Corporation |
2 | * Copyright © 2008 Intel Corporation |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
21 | * IN THE SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * Eric Anholt |
24 | * Eric Anholt |
25 | * |
25 | * |
26 | */ |
26 | */ |
27 | 27 | ||
28 | #include |
28 | #include |
29 | #include |
29 | #include |
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | #include |
33 | #include |
34 | #include |
34 | #include |
35 | #include |
35 | #include |
36 | #include |
36 | #include |
37 | #include |
37 | #include |
38 | #include |
38 | #include |
39 | #include "drm_internal.h" |
39 | #include "drm_internal.h" |
40 | 40 | ||
41 | /** @file drm_gem.c |
41 | /** @file drm_gem.c |
42 | * |
42 | * |
43 | * This file provides some of the base ioctls and library routines for |
43 | * This file provides some of the base ioctls and library routines for |
44 | * the graphics memory manager implemented by each device driver. |
44 | * the graphics memory manager implemented by each device driver. |
45 | * |
45 | * |
46 | * Because various devices have different requirements in terms of |
46 | * Because various devices have different requirements in terms of |
47 | * synchronization and migration strategies, implementing that is left up to |
47 | * synchronization and migration strategies, implementing that is left up to |
48 | * the driver, and all that the general API provides should be generic -- |
48 | * the driver, and all that the general API provides should be generic -- |
49 | * allocating objects, reading/writing data with the cpu, freeing objects. |
49 | * allocating objects, reading/writing data with the cpu, freeing objects. |
50 | * Even there, platform-dependent optimizations for reading/writing data with |
50 | * Even there, platform-dependent optimizations for reading/writing data with |
51 | * the CPU mean we'll likely hook those out to driver-specific calls. However, |
51 | * the CPU mean we'll likely hook those out to driver-specific calls. However, |
52 | * the DRI2 implementation wants to have at least allocate/mmap be generic. |
52 | * the DRI2 implementation wants to have at least allocate/mmap be generic. |
53 | * |
53 | * |
54 | * The goal was to have swap-backed object allocation managed through |
54 | * The goal was to have swap-backed object allocation managed through |
55 | * struct file. However, file descriptors as handles to a struct file have |
55 | * struct file. However, file descriptors as handles to a struct file have |
56 | * two major failings: |
56 | * two major failings: |
57 | * - Process limits prevent more than 1024 or so being used at a time by |
57 | * - Process limits prevent more than 1024 or so being used at a time by |
58 | * default. |
58 | * default. |
59 | * - Inability to allocate high fds will aggravate the X Server's select() |
59 | * - Inability to allocate high fds will aggravate the X Server's select() |
60 | * handling, and likely that of many GL client applications as well. |
60 | * handling, and likely that of many GL client applications as well. |
61 | * |
61 | * |
62 | * This led to a plan of using our own integer IDs (called handles, following |
62 | * This led to a plan of using our own integer IDs (called handles, following |
63 | * DRM terminology) to mimic fds, and implement the fd syscalls we need as |
63 | * DRM terminology) to mimic fds, and implement the fd syscalls we need as |
64 | * ioctls. The objects themselves will still include the struct file so |
64 | * ioctls. The objects themselves will still include the struct file so |
65 | * that we can transition to fds if the required kernel infrastructure shows |
65 | * that we can transition to fds if the required kernel infrastructure shows |
66 | * up at a later date, and as our interface with shmfs for memory allocation. |
66 | * up at a later date, and as our interface with shmfs for memory allocation. |
67 | */ |
67 | */ |
68 | 68 | ||
69 | /* |
69 | /* |
70 | * We make up offsets for buffer objects so we can recognize them at |
70 | * We make up offsets for buffer objects so we can recognize them at |
71 | * mmap time. |
71 | * mmap time. |
72 | */ |
72 | */ |
73 | 73 | ||
74 | /* pgoff in mmap is an unsigned long, so we need to make sure that |
74 | /* pgoff in mmap is an unsigned long, so we need to make sure that |
75 | * the faked up offset will fit |
75 | * the faked up offset will fit |
76 | */ |
76 | */ |
77 | 77 | ||
78 | #if BITS_PER_LONG == 64 |
78 | #if BITS_PER_LONG == 64 |
79 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) |
79 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) |
80 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) |
80 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) |
81 | #else |
81 | #else |
82 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) |
82 | #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) |
83 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) |
83 | #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) |
84 | #endif |
84 | #endif |
85 | 85 | ||
86 | /** |
86 | /** |
87 | * drm_gem_init - Initialize the GEM device fields |
87 | * drm_gem_init - Initialize the GEM device fields |
88 | * @dev: drm_devic structure to initialize |
88 | * @dev: drm_devic structure to initialize |
89 | */ |
89 | */ |
90 | int |
90 | int |
91 | drm_gem_init(struct drm_device *dev) |
91 | drm_gem_init(struct drm_device *dev) |
92 | { |
92 | { |
93 | struct drm_vma_offset_manager *vma_offset_manager; |
93 | struct drm_vma_offset_manager *vma_offset_manager; |
94 | 94 | ||
95 | mutex_init(&dev->object_name_lock); |
95 | mutex_init(&dev->object_name_lock); |
96 | idr_init(&dev->object_name_idr); |
96 | idr_init(&dev->object_name_idr); |
97 | 97 | ||
98 | vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL); |
98 | vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL); |
99 | if (!vma_offset_manager) { |
99 | if (!vma_offset_manager) { |
100 | DRM_ERROR("out of memory\n"); |
100 | DRM_ERROR("out of memory\n"); |
101 | return -ENOMEM; |
101 | return -ENOMEM; |
102 | } |
102 | } |
103 | 103 | ||
104 | dev->vma_offset_manager = vma_offset_manager; |
104 | dev->vma_offset_manager = vma_offset_manager; |
105 | drm_vma_offset_manager_init(vma_offset_manager, |
105 | drm_vma_offset_manager_init(vma_offset_manager, |
106 | DRM_FILE_PAGE_OFFSET_START, |
106 | DRM_FILE_PAGE_OFFSET_START, |
107 | DRM_FILE_PAGE_OFFSET_SIZE); |
107 | DRM_FILE_PAGE_OFFSET_SIZE); |
108 | 108 | ||
109 | return 0; |
109 | return 0; |
110 | } |
110 | } |
111 | 111 | ||
112 | void |
112 | void |
113 | drm_gem_destroy(struct drm_device *dev) |
113 | drm_gem_destroy(struct drm_device *dev) |
114 | { |
114 | { |
115 | 115 | ||
116 | drm_vma_offset_manager_destroy(dev->vma_offset_manager); |
116 | drm_vma_offset_manager_destroy(dev->vma_offset_manager); |
117 | kfree(dev->vma_offset_manager); |
117 | kfree(dev->vma_offset_manager); |
118 | dev->vma_offset_manager = NULL; |
118 | dev->vma_offset_manager = NULL; |
119 | } |
119 | } |
120 | 120 | ||
121 | /** |
121 | /** |
122 | * drm_gem_object_init - initialize an allocated shmem-backed GEM object |
122 | * drm_gem_object_init - initialize an allocated shmem-backed GEM object |
123 | * @dev: drm_device the object should be initialized for |
123 | * @dev: drm_device the object should be initialized for |
124 | * @obj: drm_gem_object to initialize |
124 | * @obj: drm_gem_object to initialize |
125 | * @size: object size |
125 | * @size: object size |
126 | * |
126 | * |
127 | * Initialize an already allocated GEM object of the specified size with |
127 | * Initialize an already allocated GEM object of the specified size with |
128 | * shmfs backing store. |
128 | * shmfs backing store. |
129 | */ |
129 | */ |
130 | int drm_gem_object_init(struct drm_device *dev, |
130 | int drm_gem_object_init(struct drm_device *dev, |
131 | struct drm_gem_object *obj, size_t size) |
131 | struct drm_gem_object *obj, size_t size) |
132 | { |
132 | { |
133 | struct file *filp; |
133 | struct file *filp; |
134 | 134 | ||
135 | drm_gem_private_object_init(dev, obj, size); |
135 | drm_gem_private_object_init(dev, obj, size); |
136 | 136 | ||
137 | filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
137 | filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
138 | if (IS_ERR(filp)) |
138 | if (IS_ERR(filp)) |
139 | return PTR_ERR(filp); |
139 | return PTR_ERR(filp); |
140 | 140 | ||
141 | obj->filp = filp; |
141 | obj->filp = filp; |
142 | 142 | ||
143 | return 0; |
143 | return 0; |
144 | } |
144 | } |
145 | EXPORT_SYMBOL(drm_gem_object_init); |
145 | EXPORT_SYMBOL(drm_gem_object_init); |
146 | 146 | ||
147 | /** |
147 | /** |
148 | * drm_gem_private_object_init - initialize an allocated private GEM object |
148 | * drm_gem_private_object_init - initialize an allocated private GEM object |
149 | * @dev: drm_device the object should be initialized for |
149 | * @dev: drm_device the object should be initialized for |
150 | * @obj: drm_gem_object to initialize |
150 | * @obj: drm_gem_object to initialize |
151 | * @size: object size |
151 | * @size: object size |
152 | * |
152 | * |
153 | * Initialize an already allocated GEM object of the specified size with |
153 | * Initialize an already allocated GEM object of the specified size with |
154 | * no GEM provided backing store. Instead the caller is responsible for |
154 | * no GEM provided backing store. Instead the caller is responsible for |
155 | * backing the object and handling it. |
155 | * backing the object and handling it. |
156 | */ |
156 | */ |
157 | void drm_gem_private_object_init(struct drm_device *dev, |
157 | void drm_gem_private_object_init(struct drm_device *dev, |
158 | struct drm_gem_object *obj, size_t size) |
158 | struct drm_gem_object *obj, size_t size) |
159 | { |
159 | { |
160 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
160 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
161 | 161 | ||
162 | obj->dev = dev; |
162 | obj->dev = dev; |
163 | obj->filp = NULL; |
163 | obj->filp = NULL; |
164 | 164 | ||
165 | kref_init(&obj->refcount); |
165 | kref_init(&obj->refcount); |
166 | obj->handle_count = 0; |
166 | obj->handle_count = 0; |
167 | obj->size = size; |
167 | obj->size = size; |
168 | drm_vma_node_reset(&obj->vma_node); |
168 | drm_vma_node_reset(&obj->vma_node); |
169 | } |
169 | } |
170 | EXPORT_SYMBOL(drm_gem_private_object_init); |
170 | EXPORT_SYMBOL(drm_gem_private_object_init); |
171 | 171 | ||
172 | /** |
172 | /** |
173 | * drm_gem_object_handle_free - release resources bound to userspace handles |
173 | * drm_gem_object_handle_free - release resources bound to userspace handles |
174 | * @obj: GEM object to clean up. |
174 | * @obj: GEM object to clean up. |
175 | * |
175 | * |
176 | * Called after the last handle to the object has been closed |
176 | * Called after the last handle to the object has been closed |
177 | * |
177 | * |
178 | * Removes any name for the object. Note that this must be |
178 | * Removes any name for the object. Note that this must be |
179 | * called before drm_gem_object_free or we'll be touching |
179 | * called before drm_gem_object_free or we'll be touching |
180 | * freed memory |
180 | * freed memory |
181 | */ |
181 | */ |
182 | static void drm_gem_object_handle_free(struct drm_gem_object *obj) |
182 | static void drm_gem_object_handle_free(struct drm_gem_object *obj) |
183 | { |
183 | { |
184 | struct drm_device *dev = obj->dev; |
184 | struct drm_device *dev = obj->dev; |
185 | 185 | ||
186 | /* Remove any name for this object */ |
186 | /* Remove any name for this object */ |
187 | if (obj->name) { |
187 | if (obj->name) { |
188 | idr_remove(&dev->object_name_idr, obj->name); |
188 | idr_remove(&dev->object_name_idr, obj->name); |
189 | obj->name = 0; |
189 | obj->name = 0; |
190 | } |
190 | } |
191 | } |
191 | } |
192 | 192 | ||
193 | 193 | ||
194 | static void |
194 | static void |
195 | drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) |
195 | drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) |
196 | { |
196 | { |
197 | if (WARN_ON(obj->handle_count == 0)) |
197 | if (WARN_ON(obj->handle_count == 0)) |
198 | return; |
198 | return; |
199 | 199 | ||
200 | /* |
200 | /* |
201 | * Must bump handle count first as this may be the last |
201 | * Must bump handle count first as this may be the last |
202 | * ref, in which case the object would disappear before we |
202 | * ref, in which case the object would disappear before we |
203 | * checked for a name |
203 | * checked for a name |
204 | */ |
204 | */ |
205 | 205 | ||
206 | mutex_lock(&obj->dev->object_name_lock); |
206 | mutex_lock(&obj->dev->object_name_lock); |
207 | if (--obj->handle_count == 0) { |
207 | if (--obj->handle_count == 0) { |
208 | drm_gem_object_handle_free(obj); |
208 | drm_gem_object_handle_free(obj); |
209 | } |
209 | } |
210 | mutex_unlock(&obj->dev->object_name_lock); |
210 | mutex_unlock(&obj->dev->object_name_lock); |
211 | 211 | ||
212 | drm_gem_object_unreference_unlocked(obj); |
212 | drm_gem_object_unreference_unlocked(obj); |
213 | } |
213 | } |
214 | 214 | ||
215 | /** |
215 | /** |
216 | * drm_gem_handle_delete - deletes the given file-private handle |
216 | * drm_gem_handle_delete - deletes the given file-private handle |
217 | * @filp: drm file-private structure to use for the handle look up |
217 | * @filp: drm file-private structure to use for the handle look up |
218 | * @handle: userspace handle to delete |
218 | * @handle: userspace handle to delete |
219 | * |
219 | * |
220 | * Removes the GEM handle from the @filp lookup table and if this is the last |
220 | * Removes the GEM handle from the @filp lookup table and if this is the last |
221 | * handle also cleans up linked resources like GEM names. |
221 | * handle also cleans up linked resources like GEM names. |
222 | */ |
222 | */ |
223 | int |
223 | int |
224 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
224 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
225 | { |
225 | { |
226 | struct drm_device *dev; |
226 | struct drm_device *dev; |
227 | struct drm_gem_object *obj; |
227 | struct drm_gem_object *obj; |
228 | 228 | ||
229 | /* This is gross. The idr system doesn't let us try a delete and |
229 | /* This is gross. The idr system doesn't let us try a delete and |
230 | * return an error code. It just spews if you fail at deleting. |
230 | * return an error code. It just spews if you fail at deleting. |
231 | * So, we have to grab a lock around finding the object and then |
231 | * So, we have to grab a lock around finding the object and then |
232 | * doing the delete on it and dropping the refcount, or the user |
232 | * doing the delete on it and dropping the refcount, or the user |
233 | * could race us to double-decrement the refcount and cause a |
233 | * could race us to double-decrement the refcount and cause a |
234 | * use-after-free later. Given the frequency of our handle lookups, |
234 | * use-after-free later. Given the frequency of our handle lookups, |
235 | * we may want to use ida for number allocation and a hash table |
235 | * we may want to use ida for number allocation and a hash table |
236 | * for the pointers, anyway. |
236 | * for the pointers, anyway. |
237 | */ |
237 | */ |
238 | spin_lock(&filp->table_lock); |
238 | spin_lock(&filp->table_lock); |
239 | 239 | ||
240 | /* Check if we currently have a reference on the object */ |
240 | /* Check if we currently have a reference on the object */ |
241 | obj = idr_find(&filp->object_idr, handle); |
241 | obj = idr_find(&filp->object_idr, handle); |
242 | if (obj == NULL) { |
242 | if (obj == NULL) { |
243 | spin_unlock(&filp->table_lock); |
243 | spin_unlock(&filp->table_lock); |
244 | return -EINVAL; |
244 | return -EINVAL; |
245 | } |
245 | } |
246 | dev = obj->dev; |
246 | dev = obj->dev; |
247 | 247 | ||
248 | /* Release reference and decrement refcount. */ |
248 | /* Release reference and decrement refcount. */ |
249 | idr_remove(&filp->object_idr, handle); |
249 | idr_remove(&filp->object_idr, handle); |
250 | spin_unlock(&filp->table_lock); |
250 | spin_unlock(&filp->table_lock); |
251 | 251 | ||
252 | // drm_vma_node_revoke(&obj->vma_node, filp->filp); |
252 | // drm_vma_node_revoke(&obj->vma_node, filp->filp); |
253 | 253 | ||
254 | if (dev->driver->gem_close_object) |
254 | if (dev->driver->gem_close_object) |
255 | dev->driver->gem_close_object(obj, filp); |
255 | dev->driver->gem_close_object(obj, filp); |
256 | drm_gem_object_handle_unreference_unlocked(obj); |
- | |
- | 256 | ||
257 | 257 | drm_gem_object_handle_unreference_unlocked(obj); |
|
258 | return 0; |
258 | return 0; |
259 | } |
259 | } |
260 | EXPORT_SYMBOL(drm_gem_handle_delete); |
260 | EXPORT_SYMBOL(drm_gem_handle_delete); |
261 | 261 | ||
262 | /** |
262 | /** |
263 | * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers |
263 | * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers |
264 | * @file: drm file-private structure to remove the dumb handle from |
264 | * @file: drm file-private structure to remove the dumb handle from |
265 | * @dev: corresponding drm_device |
265 | * @dev: corresponding drm_device |
266 | * @handle: the dumb handle to remove |
266 | * @handle: the dumb handle to remove |
267 | * |
267 | * |
268 | * This implements the ->dumb_destroy kms driver callback for drivers which use |
268 | * This implements the ->dumb_destroy kms driver callback for drivers which use |
269 | * gem to manage their backing storage. |
269 | * gem to manage their backing storage. |
270 | */ |
270 | */ |
271 | int drm_gem_dumb_destroy(struct drm_file *file, |
271 | int drm_gem_dumb_destroy(struct drm_file *file, |
272 | struct drm_device *dev, |
272 | struct drm_device *dev, |
273 | uint32_t handle) |
273 | uint32_t handle) |
274 | { |
274 | { |
275 | return drm_gem_handle_delete(file, handle); |
275 | return drm_gem_handle_delete(file, handle); |
276 | } |
276 | } |
277 | EXPORT_SYMBOL(drm_gem_dumb_destroy); |
277 | EXPORT_SYMBOL(drm_gem_dumb_destroy); |
278 | 278 | ||
279 | /** |
279 | /** |
280 | * drm_gem_handle_create_tail - internal functions to create a handle |
280 | * drm_gem_handle_create_tail - internal functions to create a handle |
281 | * @file_priv: drm file-private structure to register the handle for |
281 | * @file_priv: drm file-private structure to register the handle for |
282 | * @obj: object to register |
282 | * @obj: object to register |
283 | * @handlep: pointer to return the created handle to the caller |
283 | * @handlep: pointer to return the created handle to the caller |
284 | * |
284 | * |
285 | * This expects the dev->object_name_lock to be held already and will drop it |
285 | * This expects the dev->object_name_lock to be held already and will drop it |
286 | * before returning. Used to avoid races in establishing new handles when |
286 | * before returning. Used to avoid races in establishing new handles when |
287 | * importing an object from either an flink name or a dma-buf. |
287 | * importing an object from either an flink name or a dma-buf. |
288 | */ |
288 | */ |
289 | int |
289 | int |
290 | drm_gem_handle_create_tail(struct drm_file *file_priv, |
290 | drm_gem_handle_create_tail(struct drm_file *file_priv, |
291 | struct drm_gem_object *obj, |
291 | struct drm_gem_object *obj, |
292 | u32 *handlep) |
292 | u32 *handlep) |
293 | { |
293 | { |
294 | struct drm_device *dev = obj->dev; |
294 | struct drm_device *dev = obj->dev; |
295 | int ret; |
295 | int ret; |
296 | 296 | ||
297 | WARN_ON(!mutex_is_locked(&dev->object_name_lock)); |
297 | WARN_ON(!mutex_is_locked(&dev->object_name_lock)); |
298 | 298 | ||
299 | /* |
299 | /* |
300 | * Get the user-visible handle using idr. Preload and perform |
300 | * Get the user-visible handle using idr. Preload and perform |
301 | * allocation under our spinlock. |
301 | * allocation under our spinlock. |
302 | */ |
302 | */ |
303 | idr_preload(GFP_KERNEL); |
303 | idr_preload(GFP_KERNEL); |
304 | spin_lock(&file_priv->table_lock); |
304 | spin_lock(&file_priv->table_lock); |
305 | 305 | ||
306 | ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); |
306 | ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); |
307 | drm_gem_object_reference(obj); |
307 | drm_gem_object_reference(obj); |
308 | obj->handle_count++; |
308 | obj->handle_count++; |
309 | spin_unlock(&file_priv->table_lock); |
309 | spin_unlock(&file_priv->table_lock); |
310 | idr_preload_end(); |
310 | idr_preload_end(); |
311 | mutex_unlock(&dev->object_name_lock); |
311 | mutex_unlock(&dev->object_name_lock); |
312 | if (ret < 0) { |
312 | if (ret < 0) { |
313 | drm_gem_object_handle_unreference_unlocked(obj); |
313 | drm_gem_object_handle_unreference_unlocked(obj); |
314 | return ret; |
314 | return ret; |
315 | } |
315 | } |
316 | *handlep = ret; |
316 | *handlep = ret; |
317 | 317 | ||
318 | // ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp); |
318 | // ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp); |
319 | // if (ret) { |
319 | // if (ret) { |
320 | // drm_gem_handle_delete(file_priv, *handlep); |
320 | // drm_gem_handle_delete(file_priv, *handlep); |
321 | // return ret; |
321 | // return ret; |
322 | // } |
322 | // } |
323 | 323 | ||
324 | if (dev->driver->gem_open_object) { |
324 | if (dev->driver->gem_open_object) { |
325 | ret = dev->driver->gem_open_object(obj, file_priv); |
325 | ret = dev->driver->gem_open_object(obj, file_priv); |
326 | if (ret) { |
326 | if (ret) { |
327 | drm_gem_handle_delete(file_priv, *handlep); |
327 | drm_gem_handle_delete(file_priv, *handlep); |
328 | return ret; |
328 | return ret; |
329 | } |
329 | } |
330 | } |
330 | } |
331 | 331 | ||
332 | return 0; |
332 | return 0; |
333 | } |
333 | } |
334 | 334 | ||
335 | /** |
335 | /** |
336 | * drm_gem_handle_create - create a gem handle for an object |
336 | * drm_gem_handle_create - create a gem handle for an object |
337 | * @file_priv: drm file-private structure to register the handle for |
337 | * @file_priv: drm file-private structure to register the handle for |
338 | * @obj: object to register |
338 | * @obj: object to register |
339 | * @handlep: pionter to return the created handle to the caller |
339 | * @handlep: pionter to return the created handle to the caller |
340 | * |
340 | * |
341 | * Create a handle for this object. This adds a handle reference |
341 | * Create a handle for this object. This adds a handle reference |
342 | * to the object, which includes a regular reference count. Callers |
342 | * to the object, which includes a regular reference count. Callers |
343 | * will likely want to dereference the object afterwards. |
343 | * will likely want to dereference the object afterwards. |
344 | */ |
344 | */ |
345 | int drm_gem_handle_create(struct drm_file *file_priv, |
345 | int drm_gem_handle_create(struct drm_file *file_priv, |
346 | struct drm_gem_object *obj, |
346 | struct drm_gem_object *obj, |
347 | u32 *handlep) |
347 | u32 *handlep) |
348 | { |
348 | { |
349 | mutex_lock(&obj->dev->object_name_lock); |
349 | mutex_lock(&obj->dev->object_name_lock); |
350 | 350 | ||
351 | return drm_gem_handle_create_tail(file_priv, obj, handlep); |
351 | return drm_gem_handle_create_tail(file_priv, obj, handlep); |
352 | } |
352 | } |
353 | EXPORT_SYMBOL(drm_gem_handle_create); |
353 | EXPORT_SYMBOL(drm_gem_handle_create); |
354 | 354 | ||
355 | #if 0 |
355 | #if 0 |
356 | /** |
356 | /** |
357 | * drm_gem_free_mmap_offset - release a fake mmap offset for an object |
357 | * drm_gem_free_mmap_offset - release a fake mmap offset for an object |
358 | * @obj: obj in question |
358 | * @obj: obj in question |
359 | * |
359 | * |
360 | * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). |
360 | * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). |
361 | */ |
361 | */ |
362 | void |
362 | void |
363 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) |
363 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) |
364 | { |
364 | { |
365 | struct drm_device *dev = obj->dev; |
365 | struct drm_device *dev = obj->dev; |
366 | 366 | ||
367 | drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); |
367 | drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); |
368 | } |
368 | } |
369 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); |
369 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); |
370 | 370 | ||
371 | /** |
371 | /** |
372 | * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object |
372 | * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object |
373 | * @obj: obj in question |
373 | * @obj: obj in question |
374 | * @size: the virtual size |
374 | * @size: the virtual size |
375 | * |
375 | * |
376 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
376 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
377 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
377 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
378 | * up the object based on the offset and sets up the various memory mapping |
378 | * up the object based on the offset and sets up the various memory mapping |
379 | * structures. |
379 | * structures. |
380 | * |
380 | * |
381 | * This routine allocates and attaches a fake offset for @obj, in cases where |
381 | * This routine allocates and attaches a fake offset for @obj, in cases where |
382 | * the virtual size differs from the physical size (ie. obj->size). Otherwise |
382 | * the virtual size differs from the physical size (ie. obj->size). Otherwise |
383 | * just use drm_gem_create_mmap_offset(). |
383 | * just use drm_gem_create_mmap_offset(). |
384 | */ |
384 | */ |
385 | int |
385 | int |
386 | drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) |
386 | drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) |
387 | { |
387 | { |
388 | struct drm_device *dev = obj->dev; |
388 | struct drm_device *dev = obj->dev; |
389 | 389 | ||
390 | return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, |
390 | return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, |
391 | size / PAGE_SIZE); |
391 | size / PAGE_SIZE); |
392 | } |
392 | } |
393 | EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); |
393 | EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); |
394 | 394 | ||
395 | /** |
395 | /** |
396 | * drm_gem_create_mmap_offset - create a fake mmap offset for an object |
396 | * drm_gem_create_mmap_offset - create a fake mmap offset for an object |
397 | * @obj: obj in question |
397 | * @obj: obj in question |
398 | * |
398 | * |
399 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
399 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
400 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
400 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
401 | * up the object based on the offset and sets up the various memory mapping |
401 | * up the object based on the offset and sets up the various memory mapping |
402 | * structures. |
402 | * structures. |
403 | * |
403 | * |
404 | * This routine allocates and attaches a fake offset for @obj. |
404 | * This routine allocates and attaches a fake offset for @obj. |
405 | */ |
405 | */ |
406 | int drm_gem_create_mmap_offset(struct drm_gem_object *obj) |
406 | int drm_gem_create_mmap_offset(struct drm_gem_object *obj) |
407 | { |
407 | { |
408 | return drm_gem_create_mmap_offset_size(obj, obj->size); |
408 | return drm_gem_create_mmap_offset_size(obj, obj->size); |
409 | } |
409 | } |
410 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); |
410 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); |
411 | 411 | ||
412 | /** |
412 | /** |
413 | * drm_gem_get_pages - helper to allocate backing pages for a GEM object |
413 | * drm_gem_get_pages - helper to allocate backing pages for a GEM object |
414 | * from shmem |
414 | * from shmem |
415 | * @obj: obj in question |
415 | * @obj: obj in question |
416 | * |
416 | * |
417 | * This reads the page-array of the shmem-backing storage of the given gem |
417 | * This reads the page-array of the shmem-backing storage of the given gem |
418 | * object. An array of pages is returned. If a page is not allocated or |
418 | * object. An array of pages is returned. If a page is not allocated or |
419 | * swapped-out, this will allocate/swap-in the required pages. Note that the |
419 | * swapped-out, this will allocate/swap-in the required pages. Note that the |
420 | * whole object is covered by the page-array and pinned in memory. |
420 | * whole object is covered by the page-array and pinned in memory. |
421 | * |
421 | * |
422 | * Use drm_gem_put_pages() to release the array and unpin all pages. |
422 | * Use drm_gem_put_pages() to release the array and unpin all pages. |
423 | * |
423 | * |
424 | * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). |
424 | * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). |
425 | * If you require other GFP-masks, you have to do those allocations yourself. |
425 | * If you require other GFP-masks, you have to do those allocations yourself. |
426 | * |
426 | * |
427 | * Note that you are not allowed to change gfp-zones during runtime. That is, |
427 | * Note that you are not allowed to change gfp-zones during runtime. That is, |
428 | * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as |
428 | * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as |
429 | * set during initialization. If you have special zone constraints, set them |
429 | * set during initialization. If you have special zone constraints, set them |
430 | * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care |
430 | * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care |
431 | * to keep pages in the required zone during swap-in. |
431 | * to keep pages in the required zone during swap-in. |
432 | */ |
432 | */ |
433 | struct page **drm_gem_get_pages(struct drm_gem_object *obj) |
433 | struct page **drm_gem_get_pages(struct drm_gem_object *obj) |
434 | { |
434 | { |
435 | struct address_space *mapping; |
435 | struct address_space *mapping; |
436 | struct page *p, **pages; |
436 | struct page *p, **pages; |
437 | int i, npages; |
437 | int i, npages; |
438 | 438 | ||
439 | /* This is the shared memory object that backs the GEM resource */ |
439 | /* This is the shared memory object that backs the GEM resource */ |
440 | mapping = file_inode(obj->filp)->i_mapping; |
440 | mapping = file_inode(obj->filp)->i_mapping; |
441 | 441 | ||
442 | /* We already BUG_ON() for non-page-aligned sizes in |
442 | /* We already BUG_ON() for non-page-aligned sizes in |
443 | * drm_gem_object_init(), so we should never hit this unless |
443 | * drm_gem_object_init(), so we should never hit this unless |
444 | * driver author is doing something really wrong: |
444 | * driver author is doing something really wrong: |
445 | */ |
445 | */ |
446 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); |
446 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); |
447 | 447 | ||
448 | npages = obj->size >> PAGE_SHIFT; |
448 | npages = obj->size >> PAGE_SHIFT; |
449 | 449 | ||
450 | pages = drm_malloc_ab(npages, sizeof(struct page *)); |
450 | pages = drm_malloc_ab(npages, sizeof(struct page *)); |
451 | if (pages == NULL) |
451 | if (pages == NULL) |
452 | return ERR_PTR(-ENOMEM); |
452 | return ERR_PTR(-ENOMEM); |
453 | 453 | ||
454 | for (i = 0; i < npages; i++) { |
454 | for (i = 0; i < npages; i++) { |
455 | p = shmem_read_mapping_page(mapping, i); |
455 | p = shmem_read_mapping_page(mapping, i); |
456 | if (IS_ERR(p)) |
456 | if (IS_ERR(p)) |
457 | goto fail; |
457 | goto fail; |
458 | pages[i] = p; |
458 | pages[i] = p; |
459 | 459 | ||
460 | /* Make sure shmem keeps __GFP_DMA32 allocated pages in the |
460 | /* Make sure shmem keeps __GFP_DMA32 allocated pages in the |
461 | * correct region during swapin. Note that this requires |
461 | * correct region during swapin. Note that this requires |
462 | * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) |
462 | * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) |
463 | * so shmem can relocate pages during swapin if required. |
463 | * so shmem can relocate pages during swapin if required. |
464 | */ |
464 | */ |
465 | BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && |
465 | BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && |
466 | (page_to_pfn(p) >= 0x00100000UL)); |
466 | (page_to_pfn(p) >= 0x00100000UL)); |
467 | } |
467 | } |
468 | 468 | ||
469 | return pages; |
469 | return pages; |
470 | 470 | ||
471 | fail: |
471 | fail: |
472 | while (i--) |
472 | while (i--) |
473 | page_cache_release(pages[i]); |
473 | page_cache_release(pages[i]); |
474 | 474 | ||
475 | drm_free_large(pages); |
475 | drm_free_large(pages); |
476 | return ERR_CAST(p); |
476 | return ERR_CAST(p); |
477 | } |
477 | } |
478 | EXPORT_SYMBOL(drm_gem_get_pages); |
478 | EXPORT_SYMBOL(drm_gem_get_pages); |
479 | 479 | ||
480 | /** |
480 | /** |
481 | * drm_gem_put_pages - helper to free backing pages for a GEM object |
481 | * drm_gem_put_pages - helper to free backing pages for a GEM object |
482 | * @obj: obj in question |
482 | * @obj: obj in question |
483 | * @pages: pages to free |
483 | * @pages: pages to free |
484 | * @dirty: if true, pages will be marked as dirty |
484 | * @dirty: if true, pages will be marked as dirty |
485 | * @accessed: if true, the pages will be marked as accessed |
485 | * @accessed: if true, the pages will be marked as accessed |
486 | */ |
486 | */ |
487 | void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, |
487 | void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, |
488 | bool dirty, bool accessed) |
488 | bool dirty, bool accessed) |
489 | { |
489 | { |
490 | int i, npages; |
490 | int i, npages; |
491 | 491 | ||
492 | /* We already BUG_ON() for non-page-aligned sizes in |
492 | /* We already BUG_ON() for non-page-aligned sizes in |
493 | * drm_gem_object_init(), so we should never hit this unless |
493 | * drm_gem_object_init(), so we should never hit this unless |
494 | * driver author is doing something really wrong: |
494 | * driver author is doing something really wrong: |
495 | */ |
495 | */ |
496 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); |
496 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); |
497 | 497 | ||
498 | npages = obj->size >> PAGE_SHIFT; |
498 | npages = obj->size >> PAGE_SHIFT; |
499 | 499 | ||
500 | for (i = 0; i < npages; i++) { |
500 | for (i = 0; i < npages; i++) { |
501 | if (dirty) |
501 | if (dirty) |
502 | set_page_dirty(pages[i]); |
502 | set_page_dirty(pages[i]); |
503 | 503 | ||
504 | if (accessed) |
504 | if (accessed) |
505 | mark_page_accessed(pages[i]); |
505 | mark_page_accessed(pages[i]); |
506 | 506 | ||
507 | /* Undo the reference we took when populating the table */ |
507 | /* Undo the reference we took when populating the table */ |
508 | page_cache_release(pages[i]); |
508 | page_cache_release(pages[i]); |
509 | } |
509 | } |
510 | 510 | ||
511 | drm_free_large(pages); |
511 | drm_free_large(pages); |
512 | } |
512 | } |
513 | EXPORT_SYMBOL(drm_gem_put_pages); |
513 | EXPORT_SYMBOL(drm_gem_put_pages); |
514 | #endif |
514 | #endif |
515 | 515 | ||
516 | /** Returns a reference to the object named by the handle. */ |
516 | /** Returns a reference to the object named by the handle. */ |
517 | struct drm_gem_object * |
517 | struct drm_gem_object * |
518 | drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, |
518 | drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, |
519 | u32 handle) |
519 | u32 handle) |
520 | { |
520 | { |
521 | struct drm_gem_object *obj; |
521 | struct drm_gem_object *obj; |
522 | 522 | ||
523 | spin_lock(&filp->table_lock); |
523 | spin_lock(&filp->table_lock); |
524 | 524 | ||
525 | /* Check if we currently have a reference on the object */ |
525 | /* Check if we currently have a reference on the object */ |
526 | obj = idr_find(&filp->object_idr, handle); |
526 | obj = idr_find(&filp->object_idr, handle); |
527 | if (obj == NULL) { |
527 | if (obj == NULL) { |
528 | spin_unlock(&filp->table_lock); |
528 | spin_unlock(&filp->table_lock); |
529 | return NULL; |
529 | return NULL; |
530 | } |
530 | } |
531 | 531 | ||
532 | drm_gem_object_reference(obj); |
532 | drm_gem_object_reference(obj); |
533 | 533 | ||
534 | spin_unlock(&filp->table_lock); |
534 | spin_unlock(&filp->table_lock); |
535 | 535 | ||
536 | return obj; |
536 | return obj; |
537 | } |
537 | } |
538 | EXPORT_SYMBOL(drm_gem_object_lookup); |
538 | EXPORT_SYMBOL(drm_gem_object_lookup); |
539 | 539 | ||
540 | /** |
540 | /** |
541 | * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl |
541 | * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl |
542 | * @dev: drm_device |
542 | * @dev: drm_device |
543 | * @data: ioctl data |
543 | * @data: ioctl data |
544 | * @file_priv: drm file-private structure |
544 | * @file_priv: drm file-private structure |
545 | * |
545 | * |
546 | * Releases the handle to an mm object. |
546 | * Releases the handle to an mm object. |
547 | */ |
547 | */ |
548 | int |
548 | int |
549 | drm_gem_close_ioctl(struct drm_device *dev, void *data, |
549 | drm_gem_close_ioctl(struct drm_device *dev, void *data, |
550 | struct drm_file *file_priv) |
550 | struct drm_file *file_priv) |
551 | { |
551 | { |
552 | struct drm_gem_close *args = data; |
552 | struct drm_gem_close *args = data; |
553 | int ret; |
553 | int ret; |
554 | 554 | ||
555 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
555 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
556 | return -ENODEV; |
556 | return -ENODEV; |
557 | 557 | ||
558 | ret = drm_gem_handle_delete(file_priv, args->handle); |
558 | ret = drm_gem_handle_delete(file_priv, args->handle); |
559 | 559 | ||
560 | return ret; |
560 | return ret; |
561 | } |
561 | } |
562 | 562 | ||
563 | /** |
563 | /** |
564 | * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl |
564 | * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl |
565 | * @dev: drm_device |
565 | * @dev: drm_device |
566 | * @data: ioctl data |
566 | * @data: ioctl data |
567 | * @file_priv: drm file-private structure |
567 | * @file_priv: drm file-private structure |
568 | * |
568 | * |
569 | * Create a global name for an object, returning the name. |
569 | * Create a global name for an object, returning the name. |
570 | * |
570 | * |
571 | * Note that the name does not hold a reference; when the object |
571 | * Note that the name does not hold a reference; when the object |
572 | * is freed, the name goes away. |
572 | * is freed, the name goes away. |
573 | */ |
573 | */ |
574 | int |
574 | int |
575 | drm_gem_flink_ioctl(struct drm_device *dev, void *data, |
575 | drm_gem_flink_ioctl(struct drm_device *dev, void *data, |
576 | struct drm_file *file_priv) |
576 | struct drm_file *file_priv) |
577 | { |
577 | { |
578 | struct drm_gem_flink *args = data; |
578 | struct drm_gem_flink *args = data; |
579 | struct drm_gem_object *obj; |
579 | struct drm_gem_object *obj; |
580 | int ret; |
580 | int ret; |
581 | 581 | ||
582 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
582 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
583 | return -ENODEV; |
583 | return -ENODEV; |
584 | 584 | ||
585 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
585 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
586 | if (obj == NULL) |
586 | if (obj == NULL) |
587 | return -ENOENT; |
587 | return -ENOENT; |
588 | 588 | ||
589 | mutex_lock(&dev->object_name_lock); |
589 | mutex_lock(&dev->object_name_lock); |
590 | idr_preload(GFP_KERNEL); |
590 | idr_preload(GFP_KERNEL); |
591 | /* prevent races with concurrent gem_close. */ |
591 | /* prevent races with concurrent gem_close. */ |
592 | if (obj->handle_count == 0) { |
592 | if (obj->handle_count == 0) { |
593 | ret = -ENOENT; |
593 | ret = -ENOENT; |
594 | goto err; |
594 | goto err; |
595 | } |
595 | } |
596 | 596 | ||
597 | if (!obj->name) { |
597 | if (!obj->name) { |
598 | ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); |
598 | ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); |
599 | if (ret < 0) |
599 | if (ret < 0) |
600 | goto err; |
600 | goto err; |
601 | 601 | ||
602 | obj->name = ret; |
602 | obj->name = ret; |
603 | } |
603 | } |
604 | 604 | ||
605 | args->name = (uint64_t) obj->name; |
605 | args->name = (uint64_t) obj->name; |
606 | ret = 0; |
606 | ret = 0; |
607 | 607 | ||
608 | err: |
608 | err: |
609 | idr_preload_end(); |
609 | idr_preload_end(); |
610 | mutex_unlock(&dev->object_name_lock); |
610 | mutex_unlock(&dev->object_name_lock); |
611 | drm_gem_object_unreference_unlocked(obj); |
611 | drm_gem_object_unreference_unlocked(obj); |
612 | return ret; |
612 | return ret; |
613 | } |
613 | } |
614 | 614 | ||
615 | /** |
615 | /** |
616 | * drm_gem_open - implementation of the GEM_OPEN ioctl |
616 | * drm_gem_open - implementation of the GEM_OPEN ioctl |
617 | * @dev: drm_device |
617 | * @dev: drm_device |
618 | * @data: ioctl data |
618 | * @data: ioctl data |
619 | * @file_priv: drm file-private structure |
619 | * @file_priv: drm file-private structure |
620 | * |
620 | * |
621 | * Open an object using the global name, returning a handle and the size. |
621 | * Open an object using the global name, returning a handle and the size. |
622 | * |
622 | * |
623 | * This handle (of course) holds a reference to the object, so the object |
623 | * This handle (of course) holds a reference to the object, so the object |
624 | * will not go away until the handle is deleted. |
624 | * will not go away until the handle is deleted. |
625 | */ |
625 | */ |
626 | int |
626 | int |
627 | drm_gem_open_ioctl(struct drm_device *dev, void *data, |
627 | drm_gem_open_ioctl(struct drm_device *dev, void *data, |
628 | struct drm_file *file_priv) |
628 | struct drm_file *file_priv) |
629 | { |
629 | { |
630 | struct drm_gem_open *args = data; |
630 | struct drm_gem_open *args = data; |
631 | struct drm_gem_object *obj; |
631 | struct drm_gem_object *obj; |
632 | int ret; |
632 | int ret; |
633 | u32 handle; |
633 | u32 handle; |
634 | 634 | ||
635 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
635 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
636 | return -ENODEV; |
636 | return -ENODEV; |
637 | 637 | ||
638 | mutex_lock(&dev->object_name_lock); |
638 | mutex_lock(&dev->object_name_lock); |
639 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
639 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
640 | if (obj) { |
640 | if (obj) { |
641 | drm_gem_object_reference(obj); |
641 | drm_gem_object_reference(obj); |
642 | } else { |
642 | } else { |
643 | mutex_unlock(&dev->object_name_lock); |
643 | mutex_unlock(&dev->object_name_lock); |
644 | return -ENOENT; |
644 | return -ENOENT; |
645 | } |
645 | } |
646 | 646 | ||
647 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ |
647 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ |
648 | ret = drm_gem_handle_create_tail(file_priv, obj, &handle); |
648 | ret = drm_gem_handle_create_tail(file_priv, obj, &handle); |
649 | drm_gem_object_unreference_unlocked(obj); |
649 | drm_gem_object_unreference_unlocked(obj); |
650 | if (ret) |
650 | if (ret) |
651 | return ret; |
651 | return ret; |
652 | 652 | ||
653 | args->handle = handle; |
653 | args->handle = handle; |
654 | args->size = obj->size; |
654 | args->size = obj->size; |
655 | 655 | ||
656 | return 0; |
656 | return 0; |
657 | } |
657 | } |
658 | 658 | ||
659 | #if 0 |
659 | #if 0 |
660 | /** |
660 | /** |
661 | * gem_gem_open - initalizes GEM file-private structures at devnode open time |
661 | * gem_gem_open - initalizes GEM file-private structures at devnode open time |
662 | * @dev: drm_device which is being opened by userspace |
662 | * @dev: drm_device which is being opened by userspace |
663 | * @file_private: drm file-private structure to set up |
663 | * @file_private: drm file-private structure to set up |
664 | * |
664 | * |
665 | * Called at device open time, sets up the structure for handling refcounting |
665 | * Called at device open time, sets up the structure for handling refcounting |
666 | * of mm objects. |
666 | * of mm objects. |
667 | */ |
667 | */ |
668 | void |
668 | void |
669 | drm_gem_open(struct drm_device *dev, struct drm_file *file_private) |
669 | drm_gem_open(struct drm_device *dev, struct drm_file *file_private) |
670 | { |
670 | { |
671 | idr_init(&file_private->object_idr); |
671 | idr_init(&file_private->object_idr); |
672 | spin_lock_init(&file_private->table_lock); |
672 | spin_lock_init(&file_private->table_lock); |
673 | } |
673 | } |
674 | 674 | ||
675 | /* |
675 | /* |
676 | * Called at device close to release the file's |
676 | * Called at device close to release the file's |
677 | * handle references on objects. |
677 | * handle references on objects. |
678 | */ |
678 | */ |
679 | static int |
679 | static int |
680 | drm_gem_object_release_handle(int id, void *ptr, void *data) |
680 | drm_gem_object_release_handle(int id, void *ptr, void *data) |
681 | { |
681 | { |
682 | struct drm_file *file_priv = data; |
682 | struct drm_file *file_priv = data; |
683 | struct drm_gem_object *obj = ptr; |
683 | struct drm_gem_object *obj = ptr; |
684 | struct drm_device *dev = obj->dev; |
684 | struct drm_device *dev = obj->dev; |
685 | 685 | ||
686 | drm_vma_node_revoke(&obj->vma_node, file_priv->filp); |
686 | drm_vma_node_revoke(&obj->vma_node, file_priv->filp); |
687 | 687 | ||
688 | if (dev->driver->gem_close_object) |
688 | if (dev->driver->gem_close_object) |
689 | dev->driver->gem_close_object(obj, file_priv); |
689 | dev->driver->gem_close_object(obj, file_priv); |
690 | 690 | ||
691 | drm_gem_object_handle_unreference_unlocked(obj); |
691 | drm_gem_object_handle_unreference_unlocked(obj); |
692 | 692 | ||
693 | return 0; |
693 | return 0; |
694 | } |
694 | } |
695 | 695 | ||
696 | /** |
696 | /** |
697 | * drm_gem_release - release file-private GEM resources |
697 | * drm_gem_release - release file-private GEM resources |
698 | * @dev: drm_device which is being closed by userspace |
698 | * @dev: drm_device which is being closed by userspace |
699 | * @file_private: drm file-private structure to clean up |
699 | * @file_private: drm file-private structure to clean up |
700 | * |
700 | * |
701 | * Called at close time when the filp is going away. |
701 | * Called at close time when the filp is going away. |
702 | * |
702 | * |
703 | * Releases any remaining references on objects by this filp. |
703 | * Releases any remaining references on objects by this filp. |
704 | */ |
704 | */ |
705 | void |
705 | void |
706 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) |
706 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) |
707 | { |
707 | { |
708 | idr_for_each(&file_private->object_idr, |
708 | idr_for_each(&file_private->object_idr, |
709 | &drm_gem_object_release_handle, file_private); |
709 | &drm_gem_object_release_handle, file_private); |
710 | idr_destroy(&file_private->object_idr); |
710 | idr_destroy(&file_private->object_idr); |
711 | } |
711 | } |
712 | #endif |
712 | #endif |
713 | 713 | ||
714 | void |
714 | void |
715 | drm_gem_object_release(struct drm_gem_object *obj) |
715 | drm_gem_object_release(struct drm_gem_object *obj) |
716 | { |
716 | { |
717 | WARN_ON(obj->dma_buf); |
717 | WARN_ON(obj->dma_buf); |
718 | 718 | ||
719 | if (obj->filp) |
719 | if (obj->filp) |
720 | free(obj->filp); |
720 | free(obj->filp); |
721 | } |
721 | } |
722 | EXPORT_SYMBOL(drm_gem_object_release); |
722 | EXPORT_SYMBOL(drm_gem_object_release); |
723 | 723 | ||
724 | /** |
724 | /** |
725 | * drm_gem_object_free - free a GEM object |
725 | * drm_gem_object_free - free a GEM object |
726 | * @kref: kref of the object to free |
726 | * @kref: kref of the object to free |
727 | * |
727 | * |
728 | * Called after the last reference to the object has been lost. |
728 | * Called after the last reference to the object has been lost. |
729 | * Must be called holding struct_ mutex |
729 | * Must be called holding struct_ mutex |
730 | * |
730 | * |
731 | * Frees the object |
731 | * Frees the object |
732 | */ |
732 | */ |
733 | void |
733 | void |
734 | drm_gem_object_free(struct kref *kref) |
734 | drm_gem_object_free(struct kref *kref) |
735 | { |
735 | { |
736 | struct drm_gem_object *obj = |
736 | struct drm_gem_object *obj = |
737 | container_of(kref, struct drm_gem_object, refcount); |
737 | container_of(kref, struct drm_gem_object, refcount); |
738 | struct drm_device *dev = obj->dev; |
738 | struct drm_device *dev = obj->dev; |
739 | 739 | ||
740 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
740 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
741 | 741 | ||
742 | if (dev->driver->gem_free_object != NULL) |
742 | if (dev->driver->gem_free_object != NULL) |
743 | dev->driver->gem_free_object(obj); |
743 | dev->driver->gem_free_object(obj); |
744 | } |
744 | } |
745 | EXPORT_SYMBOL(drm_gem_object_free); |
745 | EXPORT_SYMBOL(drm_gem_object_free); |
746 | 746 | ||
747 | 747 | ||
748 | #if 0 |
748 | #if 0 |
749 | void drm_gem_vm_open(struct vm_area_struct *vma) |
749 | void drm_gem_vm_open(struct vm_area_struct *vma) |
750 | { |
750 | { |
751 | struct drm_gem_object *obj = vma->vm_private_data; |
751 | struct drm_gem_object *obj = vma->vm_private_data; |
752 | 752 | ||
753 | drm_gem_object_reference(obj); |
753 | drm_gem_object_reference(obj); |
754 | } |
754 | } |
755 | EXPORT_SYMBOL(drm_gem_vm_open); |
755 | EXPORT_SYMBOL(drm_gem_vm_open); |
756 | 756 | ||
757 | void drm_gem_vm_close(struct vm_area_struct *vma) |
757 | void drm_gem_vm_close(struct vm_area_struct *vma) |
758 | { |
758 | { |
759 | struct drm_gem_object *obj = vma->vm_private_data; |
759 | struct drm_gem_object *obj = vma->vm_private_data; |
760 | struct drm_device *dev = obj->dev; |
760 | struct drm_device *dev = obj->dev; |
761 | 761 | ||
762 | mutex_lock(&dev->struct_mutex); |
762 | mutex_lock(&dev->struct_mutex); |
763 | drm_vm_close_locked(obj->dev, vma); |
763 | drm_vm_close_locked(obj->dev, vma); |
764 | drm_gem_object_unreference(obj); |
764 | drm_gem_object_unreference(obj); |
765 | mutex_unlock(&dev->struct_mutex); |
765 | mutex_unlock(&dev->struct_mutex); |
766 | } |
766 | } |
767 | EXPORT_SYMBOL(drm_gem_vm_close); |
767 | EXPORT_SYMBOL(drm_gem_vm_close); |
768 | 768 | ||
769 | #endif>>>> |
769 | #endif>>>> |