Rev 4569 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4075 | Serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
||
4 | * All Rights Reserved. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | **************************************************************************/ |
||
27 | |||
28 | #include "vmwgfx_drv.h" |
||
29 | #include |
||
30 | #include |
||
31 | #include |
||
32 | #include |
||
33 | #include "vmwgfx_resource_priv.h" |
||
34 | |||
4569 | Serge | 35 | #define VMW_RES_EVICT_ERR_COUNT 10 |
36 | |||
4075 | Serge | 37 | struct vmw_user_dma_buffer { |
4569 | Serge | 38 | struct ttm_prime_object prime; |
4075 | Serge | 39 | struct vmw_dma_buffer dma; |
40 | }; |
||
41 | |||
42 | struct vmw_bo_user_rep { |
||
43 | uint32_t handle; |
||
44 | uint64_t map_handle; |
||
45 | }; |
||
46 | |||
47 | struct vmw_stream { |
||
48 | struct vmw_resource res; |
||
49 | uint32_t stream_id; |
||
50 | }; |
||
51 | |||
52 | struct vmw_user_stream { |
||
53 | struct ttm_base_object base; |
||
54 | struct vmw_stream stream; |
||
55 | }; |
||
56 | |||
57 | |||
58 | static uint64_t vmw_user_stream_size; |
||
59 | |||
60 | static const struct vmw_res_func vmw_stream_func = { |
||
61 | .res_type = vmw_res_stream, |
||
62 | .needs_backup = false, |
||
63 | .may_evict = false, |
||
64 | .type_name = "video streams", |
||
65 | .backup_placement = NULL, |
||
66 | .create = NULL, |
||
67 | .destroy = NULL, |
||
68 | .bind = NULL, |
||
69 | .unbind = NULL |
||
70 | }; |
||
71 | |||
72 | static inline struct vmw_dma_buffer * |
||
73 | vmw_dma_buffer(struct ttm_buffer_object *bo) |
||
74 | { |
||
75 | return container_of(bo, struct vmw_dma_buffer, base); |
||
76 | } |
||
77 | |||
78 | static inline struct vmw_user_dma_buffer * |
||
79 | vmw_user_dma_buffer(struct ttm_buffer_object *bo) |
||
80 | { |
||
81 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); |
||
82 | return container_of(vmw_bo, struct vmw_user_dma_buffer, dma); |
||
83 | } |
||
84 | |||
85 | struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) |
||
86 | { |
||
87 | kref_get(&res->kref); |
||
88 | return res; |
||
89 | } |
||
90 | |||
91 | |||
92 | /** |
||
93 | * vmw_resource_release_id - release a resource id to the id manager. |
||
94 | * |
||
95 | * @res: Pointer to the resource. |
||
96 | * |
||
97 | * Release the resource id to the resource id manager and set it to -1 |
||
98 | */ |
||
99 | void vmw_resource_release_id(struct vmw_resource *res) |
||
100 | { |
||
101 | struct vmw_private *dev_priv = res->dev_priv; |
||
102 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
||
103 | |||
104 | write_lock(&dev_priv->resource_lock); |
||
105 | if (res->id != -1) |
||
106 | idr_remove(idr, res->id); |
||
107 | res->id = -1; |
||
108 | write_unlock(&dev_priv->resource_lock); |
||
109 | } |
||
110 | |||
111 | static void vmw_resource_release(struct kref *kref) |
||
112 | { |
||
113 | struct vmw_resource *res = |
||
114 | container_of(kref, struct vmw_resource, kref); |
||
115 | struct vmw_private *dev_priv = res->dev_priv; |
||
116 | int id; |
||
117 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
||
118 | |||
119 | res->avail = false; |
||
120 | list_del_init(&res->lru_head); |
||
121 | write_unlock(&dev_priv->resource_lock); |
||
122 | if (res->backup) { |
||
123 | struct ttm_buffer_object *bo = &res->backup->base; |
||
124 | |||
5078 | serge | 125 | ttm_bo_reserve(bo, false, false, false, NULL); |
4075 | Serge | 126 | if (!list_empty(&res->mob_head) && |
127 | res->func->unbind != NULL) { |
||
128 | struct ttm_validate_buffer val_buf; |
||
129 | |||
130 | val_buf.bo = bo; |
||
131 | res->func->unbind(res, false, &val_buf); |
||
132 | } |
||
133 | res->backup_dirty = false; |
||
134 | list_del_init(&res->mob_head); |
||
135 | ttm_bo_unreserve(bo); |
||
136 | vmw_dmabuf_unreference(&res->backup); |
||
137 | } |
||
138 | |||
5078 | serge | 139 | if (likely(res->hw_destroy != NULL)) { |
4075 | Serge | 140 | res->hw_destroy(res); |
5078 | serge | 141 | mutex_lock(&dev_priv->binding_mutex); |
142 | vmw_context_binding_res_list_kill(&res->binding_head); |
||
143 | mutex_unlock(&dev_priv->binding_mutex); |
||
144 | } |
||
4075 | Serge | 145 | |
146 | id = res->id; |
||
147 | if (res->res_free != NULL) |
||
148 | res->res_free(res); |
||
149 | else |
||
150 | kfree(res); |
||
151 | |||
152 | write_lock(&dev_priv->resource_lock); |
||
153 | |||
154 | if (id != -1) |
||
155 | idr_remove(idr, id); |
||
156 | } |
||
157 | |||
158 | void vmw_resource_unreference(struct vmw_resource **p_res) |
||
159 | { |
||
160 | struct vmw_resource *res = *p_res; |
||
161 | struct vmw_private *dev_priv = res->dev_priv; |
||
162 | |||
163 | *p_res = NULL; |
||
164 | write_lock(&dev_priv->resource_lock); |
||
165 | kref_put(&res->kref, vmw_resource_release); |
||
166 | write_unlock(&dev_priv->resource_lock); |
||
167 | } |
||
168 | |||
169 | |||
170 | /** |
||
171 | * vmw_resource_alloc_id - release a resource id to the id manager. |
||
172 | * |
||
173 | * @res: Pointer to the resource. |
||
174 | * |
||
175 | * Allocate the lowest free resource from the resource manager, and set |
||
176 | * @res->id to that id. Returns 0 on success and -ENOMEM on failure. |
||
177 | */ |
||
178 | int vmw_resource_alloc_id(struct vmw_resource *res) |
||
179 | { |
||
180 | struct vmw_private *dev_priv = res->dev_priv; |
||
181 | int ret; |
||
182 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
||
183 | |||
184 | BUG_ON(res->id != -1); |
||
185 | |||
186 | idr_preload(GFP_KERNEL); |
||
187 | write_lock(&dev_priv->resource_lock); |
||
188 | |||
189 | ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT); |
||
190 | if (ret >= 0) |
||
191 | res->id = ret; |
||
192 | |||
193 | write_unlock(&dev_priv->resource_lock); |
||
194 | idr_preload_end(); |
||
195 | return ret < 0 ? ret : 0; |
||
196 | } |
||
197 | |||
198 | /** |
||
199 | * vmw_resource_init - initialize a struct vmw_resource |
||
200 | * |
||
201 | * @dev_priv: Pointer to a device private struct. |
||
202 | * @res: The struct vmw_resource to initialize. |
||
203 | * @obj_type: Resource object type. |
||
204 | * @delay_id: Boolean whether to defer device id allocation until |
||
205 | * the first validation. |
||
206 | * @res_free: Resource destructor. |
||
207 | * @func: Resource function table. |
||
208 | */ |
||
209 | int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, |
||
210 | bool delay_id, |
||
211 | void (*res_free) (struct vmw_resource *res), |
||
212 | const struct vmw_res_func *func) |
||
213 | { |
||
214 | kref_init(&res->kref); |
||
215 | res->hw_destroy = NULL; |
||
216 | res->res_free = res_free; |
||
217 | res->avail = false; |
||
218 | res->dev_priv = dev_priv; |
||
219 | res->func = func; |
||
220 | INIT_LIST_HEAD(&res->lru_head); |
||
221 | INIT_LIST_HEAD(&res->mob_head); |
||
4569 | Serge | 222 | INIT_LIST_HEAD(&res->binding_head); |
4075 | Serge | 223 | res->id = -1; |
224 | res->backup = NULL; |
||
225 | res->backup_offset = 0; |
||
226 | res->backup_dirty = false; |
||
227 | res->res_dirty = false; |
||
228 | if (delay_id) |
||
229 | return 0; |
||
230 | else |
||
231 | return vmw_resource_alloc_id(res); |
||
232 | } |
||
233 | |||
234 | /** |
||
235 | * vmw_resource_activate |
||
236 | * |
||
237 | * @res: Pointer to the newly created resource |
||
238 | * @hw_destroy: Destroy function. NULL if none. |
||
239 | * |
||
240 | * Activate a resource after the hardware has been made aware of it. |
||
241 | * Set tye destroy function to @destroy. Typically this frees the |
||
242 | * resource and destroys the hardware resources associated with it. |
||
243 | * Activate basically means that the function vmw_resource_lookup will |
||
244 | * find it. |
||
245 | */ |
||
246 | void vmw_resource_activate(struct vmw_resource *res, |
||
247 | void (*hw_destroy) (struct vmw_resource *)) |
||
248 | { |
||
249 | struct vmw_private *dev_priv = res->dev_priv; |
||
250 | |||
251 | write_lock(&dev_priv->resource_lock); |
||
252 | res->avail = true; |
||
253 | res->hw_destroy = hw_destroy; |
||
254 | write_unlock(&dev_priv->resource_lock); |
||
255 | } |
||
256 | |||
257 | struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, |
||
258 | struct idr *idr, int id) |
||
259 | { |
||
260 | struct vmw_resource *res; |
||
261 | |||
262 | read_lock(&dev_priv->resource_lock); |
||
263 | res = idr_find(idr, id); |
||
264 | if (res && res->avail) |
||
265 | kref_get(&res->kref); |
||
266 | else |
||
267 | res = NULL; |
||
268 | read_unlock(&dev_priv->resource_lock); |
||
269 | |||
270 | if (unlikely(res == NULL)) |
||
271 | return NULL; |
||
272 | |||
273 | return res; |
||
274 | } |
||
275 | |||
276 | /** |
||
277 | * vmw_user_resource_lookup_handle - lookup a struct resource from a |
||
278 | * TTM user-space handle and perform basic type checks |
||
279 | * |
||
280 | * @dev_priv: Pointer to a device private struct |
||
281 | * @tfile: Pointer to a struct ttm_object_file identifying the caller |
||
282 | * @handle: The TTM user-space handle |
||
283 | * @converter: Pointer to an object describing the resource type |
||
284 | * @p_res: On successful return the location pointed to will contain |
||
285 | * a pointer to a refcounted struct vmw_resource. |
||
286 | * |
||
287 | * If the handle can't be found or is associated with an incorrect resource |
||
288 | * type, -EINVAL will be returned. |
||
289 | */ |
||
290 | int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, |
||
291 | struct ttm_object_file *tfile, |
||
292 | uint32_t handle, |
||
293 | const struct vmw_user_resource_conv |
||
294 | *converter, |
||
295 | struct vmw_resource **p_res) |
||
296 | { |
||
297 | struct ttm_base_object *base; |
||
298 | struct vmw_resource *res; |
||
299 | int ret = -EINVAL; |
||
300 | |||
301 | base = ttm_base_object_lookup(tfile, handle); |
||
302 | if (unlikely(base == NULL)) |
||
303 | return -EINVAL; |
||
304 | |||
4569 | Serge | 305 | if (unlikely(ttm_base_object_type(base) != converter->object_type)) |
4075 | Serge | 306 | goto out_bad_resource; |
307 | |||
308 | res = converter->base_obj_to_res(base); |
||
309 | |||
310 | read_lock(&dev_priv->resource_lock); |
||
311 | if (!res->avail || res->res_free != converter->res_free) { |
||
312 | read_unlock(&dev_priv->resource_lock); |
||
313 | goto out_bad_resource; |
||
314 | } |
||
315 | |||
316 | kref_get(&res->kref); |
||
317 | read_unlock(&dev_priv->resource_lock); |
||
318 | |||
319 | *p_res = res; |
||
320 | ret = 0; |
||
321 | |||
322 | out_bad_resource: |
||
323 | ttm_base_object_unref(&base); |
||
324 | |||
325 | return ret; |
||
326 | } |
||
327 | |||
328 | /** |
||
329 | * Helper function that looks either a surface or dmabuf. |
||
330 | * |
||
331 | * The pointer this pointed at by out_surf and out_buf needs to be null. |
||
332 | */ |
||
333 | int vmw_user_lookup_handle(struct vmw_private *dev_priv, |
||
334 | struct ttm_object_file *tfile, |
||
335 | uint32_t handle, |
||
336 | struct vmw_surface **out_surf, |
||
337 | struct vmw_dma_buffer **out_buf) |
||
338 | { |
||
339 | struct vmw_resource *res; |
||
340 | int ret; |
||
341 | |||
342 | BUG_ON(*out_surf || *out_buf); |
||
343 | |||
344 | ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle, |
||
345 | user_surface_converter, |
||
346 | &res); |
||
347 | if (!ret) { |
||
348 | *out_surf = vmw_res_to_srf(res); |
||
349 | return 0; |
||
350 | } |
||
351 | |||
352 | *out_surf = NULL; |
||
353 | ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); |
||
354 | return ret; |
||
355 | } |
||
356 | |||
357 | /** |
||
358 | * Buffer management. |
||
359 | */ |
||
4569 | Serge | 360 | |
361 | /** |
||
362 | * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers |
||
363 | * |
||
364 | * @dev_priv: Pointer to a struct vmw_private identifying the device. |
||
365 | * @size: The requested buffer size. |
||
366 | * @user: Whether this is an ordinary dma buffer or a user dma buffer. |
||
367 | */ |
||
368 | static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size, |
||
369 | bool user) |
||
370 | { |
||
371 | static size_t struct_size, user_struct_size; |
||
372 | size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; |
||
373 | size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *)); |
||
374 | |||
375 | if (unlikely(struct_size == 0)) { |
||
376 | size_t backend_size = ttm_round_pot(vmw_tt_size); |
||
377 | |||
378 | struct_size = backend_size + |
||
379 | ttm_round_pot(sizeof(struct vmw_dma_buffer)); |
||
380 | user_struct_size = backend_size + |
||
381 | ttm_round_pot(sizeof(struct vmw_user_dma_buffer)); |
||
382 | } |
||
383 | |||
384 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) |
||
385 | page_array_size += |
||
386 | ttm_round_pot(num_pages * sizeof(dma_addr_t)); |
||
387 | |||
388 | return ((user) ? user_struct_size : struct_size) + |
||
389 | page_array_size; |
||
390 | } |
||
391 | |||
4075 | Serge | 392 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) |
393 | { |
||
394 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); |
||
395 | |||
396 | kfree(vmw_bo); |
||
397 | } |
||
398 | |||
4569 | Serge | 399 | static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) |
400 | { |
||
401 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); |
||
402 | |||
403 | // ttm_prime_object_kfree(vmw_user_bo, prime); |
||
404 | } |
||
405 | |||
4075 | Serge | 406 | int vmw_dmabuf_init(struct vmw_private *dev_priv, |
407 | struct vmw_dma_buffer *vmw_bo, |
||
408 | size_t size, struct ttm_placement *placement, |
||
409 | bool interruptible, |
||
410 | void (*bo_free) (struct ttm_buffer_object *bo)) |
||
411 | { |
||
412 | struct ttm_bo_device *bdev = &dev_priv->bdev; |
||
413 | size_t acc_size; |
||
414 | int ret; |
||
4569 | Serge | 415 | bool user = (bo_free == &vmw_user_dmabuf_destroy); |
4075 | Serge | 416 | |
4569 | Serge | 417 | BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free))); |
4075 | Serge | 418 | |
4569 | Serge | 419 | acc_size = vmw_dmabuf_acc_size(dev_priv, size, user); |
4075 | Serge | 420 | memset(vmw_bo, 0, sizeof(*vmw_bo)); |
421 | |||
422 | INIT_LIST_HEAD(&vmw_bo->res_list); |
||
423 | |||
424 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
||
5078 | serge | 425 | ttm_bo_type_device, placement, |
4075 | Serge | 426 | 0, interruptible, |
427 | NULL, acc_size, NULL, bo_free); |
||
428 | return ret; |
||
429 | } |
||
430 | |||
431 | static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) |
||
432 | { |
||
433 | struct vmw_user_dma_buffer *vmw_user_bo; |
||
434 | struct ttm_base_object *base = *p_base; |
||
435 | struct ttm_buffer_object *bo; |
||
436 | |||
437 | *p_base = NULL; |
||
438 | |||
439 | if (unlikely(base == NULL)) |
||
440 | return; |
||
441 | |||
4569 | Serge | 442 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, |
443 | prime.base); |
||
4075 | Serge | 444 | bo = &vmw_user_bo->dma.base; |
445 | ttm_bo_unref(&bo); |
||
446 | } |
||
447 | |||
4569 | Serge | 448 | static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base, |
449 | enum ttm_ref_type ref_type) |
||
450 | { |
||
451 | struct vmw_user_dma_buffer *user_bo; |
||
452 | user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base); |
||
453 | |||
454 | switch (ref_type) { |
||
455 | case TTM_REF_SYNCCPU_WRITE: |
||
456 | ttm_bo_synccpu_write_release(&user_bo->dma.base); |
||
457 | break; |
||
458 | default: |
||
459 | BUG(); |
||
460 | } |
||
461 | } |
||
462 | |||
4075 | Serge | 463 | /** |
464 | * vmw_user_dmabuf_alloc - Allocate a user dma buffer |
||
465 | * |
||
466 | * @dev_priv: Pointer to a struct device private. |
||
467 | * @tfile: Pointer to a struct ttm_object_file on which to register the user |
||
468 | * object. |
||
469 | * @size: Size of the dma buffer. |
||
470 | * @shareable: Boolean whether the buffer is shareable with other open files. |
||
471 | * @handle: Pointer to where the handle value should be assigned. |
||
472 | * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer |
||
473 | * should be assigned. |
||
474 | */ |
||
475 | int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, |
||
476 | struct ttm_object_file *tfile, |
||
477 | uint32_t size, |
||
478 | bool shareable, |
||
479 | uint32_t *handle, |
||
480 | struct vmw_dma_buffer **p_dma_buf) |
||
481 | { |
||
482 | struct vmw_user_dma_buffer *user_bo; |
||
483 | struct ttm_buffer_object *tmp; |
||
484 | int ret; |
||
485 | |||
486 | user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); |
||
487 | if (unlikely(user_bo == NULL)) { |
||
488 | DRM_ERROR("Failed to allocate a buffer.\n"); |
||
489 | return -ENOMEM; |
||
490 | } |
||
491 | |||
492 | ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, |
||
4569 | Serge | 493 | (dev_priv->has_mob) ? |
494 | &vmw_sys_placement : |
||
4075 | Serge | 495 | &vmw_vram_sys_placement, true, |
496 | &vmw_user_dmabuf_destroy); |
||
497 | if (unlikely(ret != 0)) |
||
498 | return ret; |
||
499 | |||
500 | tmp = ttm_bo_reference(&user_bo->dma.base); |
||
4569 | Serge | 501 | /* |
502 | ret = ttm_prime_object_init(tfile, |
||
503 | size, |
||
504 | &user_bo->prime, |
||
4075 | Serge | 505 | shareable, |
506 | ttm_buffer_type, |
||
4569 | Serge | 507 | &vmw_user_dmabuf_release, |
508 | &vmw_user_dmabuf_ref_obj_release); |
||
4075 | Serge | 509 | if (unlikely(ret != 0)) { |
510 | ttm_bo_unref(&tmp); |
||
511 | goto out_no_base_object; |
||
512 | } |
||
4569 | Serge | 513 | */ |
4075 | Serge | 514 | |
515 | *p_dma_buf = &user_bo->dma; |
||
4569 | Serge | 516 | *handle = user_bo->prime.base.hash.key; |
4075 | Serge | 517 | |
518 | out_no_base_object: |
||
519 | return ret; |
||
520 | } |
||
521 | |||
522 | /** |
||
523 | * vmw_user_dmabuf_verify_access - verify access permissions on this |
||
524 | * buffer object. |
||
525 | * |
||
526 | * @bo: Pointer to the buffer object being accessed |
||
527 | * @tfile: Identifying the caller. |
||
528 | */ |
||
529 | int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, |
||
530 | struct ttm_object_file *tfile) |
||
531 | { |
||
532 | struct vmw_user_dma_buffer *vmw_user_bo; |
||
533 | |||
534 | if (unlikely(bo->destroy != vmw_user_dmabuf_destroy)) |
||
535 | return -EPERM; |
||
536 | |||
537 | vmw_user_bo = vmw_user_dma_buffer(bo); |
||
5078 | serge | 538 | |
539 | /* Check that the caller has opened the object. */ |
||
540 | if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base))) |
||
541 | return 0; |
||
542 | |||
543 | DRM_ERROR("Could not grant buffer access.\n"); |
||
544 | return -EPERM; |
||
4075 | Serge | 545 | } |
546 | |||
4569 | Serge | 547 | /** |
548 | * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu |
||
549 | * access, idling previous GPU operations on the buffer and optionally |
||
550 | * blocking it for further command submissions. |
||
551 | * |
||
552 | * @user_bo: Pointer to the buffer object being grabbed for CPU access |
||
553 | * @tfile: Identifying the caller. |
||
554 | * @flags: Flags indicating how the grab should be performed. |
||
555 | * |
||
556 | * A blocking grab will be automatically released when @tfile is closed. |
||
557 | */ |
||
558 | static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, |
||
559 | struct ttm_object_file *tfile, |
||
560 | uint32_t flags) |
||
561 | { |
||
562 | struct ttm_buffer_object *bo = &user_bo->dma.base; |
||
563 | bool existed; |
||
5078 | serge | 564 | int ret; |
4569 | Serge | 565 | |
566 | if (flags & drm_vmw_synccpu_allow_cs) { |
||
567 | struct ttm_bo_device *bdev = bo->bdev; |
||
568 | |||
569 | // spin_lock(&bdev->fence_lock); |
||
570 | // ret = ttm_bo_wait(bo, false, true, |
||
571 | // !!(flags & drm_vmw_synccpu_dontblock)); |
||
572 | // spin_unlock(&bdev->fence_lock); |
||
573 | return ret; |
||
574 | } |
||
575 | |||
576 | // ret = ttm_bo_synccpu_write_grab |
||
577 | // (bo, !!(flags & drm_vmw_synccpu_dontblock)); |
||
578 | // if (unlikely(ret != 0)) |
||
579 | // return ret; |
||
580 | |||
581 | ret = ttm_ref_object_add(tfile, &user_bo->prime.base, |
||
582 | TTM_REF_SYNCCPU_WRITE, &existed); |
||
583 | // if (ret != 0 || existed) |
||
584 | // ttm_bo_synccpu_write_release(&user_bo->dma.base); |
||
585 | |||
586 | return ret; |
||
587 | } |
||
588 | |||
589 | /** |
||
590 | * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access, |
||
591 | * and unblock command submission on the buffer if blocked. |
||
592 | * |
||
593 | * @handle: Handle identifying the buffer object. |
||
594 | * @tfile: Identifying the caller. |
||
595 | * @flags: Flags indicating the type of release. |
||
596 | */ |
||
597 | static int vmw_user_dmabuf_synccpu_release(uint32_t handle, |
||
598 | struct ttm_object_file *tfile, |
||
599 | uint32_t flags) |
||
600 | { |
||
601 | if (!(flags & drm_vmw_synccpu_allow_cs)) |
||
602 | return ttm_ref_object_base_unref(tfile, handle, |
||
603 | TTM_REF_SYNCCPU_WRITE); |
||
604 | |||
605 | return 0; |
||
606 | } |
||
607 | |||
608 | /** |
||
609 | * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu |
||
610 | * functionality. |
||
611 | * |
||
612 | * @dev: Identifies the drm device. |
||
613 | * @data: Pointer to the ioctl argument. |
||
614 | * @file_priv: Identifies the caller. |
||
615 | * |
||
616 | * This function checks the ioctl arguments for validity and calls the |
||
617 | * relevant synccpu functions. |
||
618 | */ |
||
619 | int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, |
||
620 | struct drm_file *file_priv) |
||
621 | { |
||
622 | struct drm_vmw_synccpu_arg *arg = |
||
623 | (struct drm_vmw_synccpu_arg *) data; |
||
624 | struct vmw_dma_buffer *dma_buf; |
||
625 | struct vmw_user_dma_buffer *user_bo; |
||
626 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
||
627 | int ret; |
||
628 | |||
629 | if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 |
||
630 | || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | |
||
631 | drm_vmw_synccpu_dontblock | |
||
632 | drm_vmw_synccpu_allow_cs)) != 0) { |
||
633 | DRM_ERROR("Illegal synccpu flags.\n"); |
||
634 | return -EINVAL; |
||
635 | } |
||
636 | |||
637 | switch (arg->op) { |
||
638 | case drm_vmw_synccpu_grab: |
||
639 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); |
||
640 | if (unlikely(ret != 0)) |
||
641 | return ret; |
||
642 | |||
643 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, |
||
644 | dma); |
||
645 | ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); |
||
646 | vmw_dmabuf_unreference(&dma_buf); |
||
647 | if (unlikely(ret != 0 && ret != -ERESTARTSYS && |
||
648 | ret != -EBUSY)) { |
||
649 | DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", |
||
650 | (unsigned int) arg->handle); |
||
651 | return ret; |
||
652 | } |
||
653 | break; |
||
654 | case drm_vmw_synccpu_release: |
||
655 | ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile, |
||
656 | arg->flags); |
||
657 | if (unlikely(ret != 0)) { |
||
658 | DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", |
||
659 | (unsigned int) arg->handle); |
||
660 | return ret; |
||
661 | } |
||
662 | break; |
||
663 | default: |
||
664 | DRM_ERROR("Invalid synccpu operation.\n"); |
||
665 | return -EINVAL; |
||
666 | } |
||
667 | |||
668 | return 0; |
||
669 | } |
||
670 | |||
4075 | Serge | 671 | #if 0 |
672 | int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, |
||
673 | struct drm_file *file_priv) |
||
674 | { |
||
675 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
676 | union drm_vmw_alloc_dmabuf_arg *arg = |
||
677 | (union drm_vmw_alloc_dmabuf_arg *)data; |
||
678 | struct drm_vmw_alloc_dmabuf_req *req = &arg->req; |
||
679 | struct drm_vmw_dmabuf_rep *rep = &arg->rep; |
||
680 | struct vmw_dma_buffer *dma_buf; |
||
681 | uint32_t handle; |
||
682 | int ret; |
||
683 | |||
5078 | serge | 684 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
4075 | Serge | 685 | if (unlikely(ret != 0)) |
686 | return ret; |
||
687 | |||
688 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
||
689 | req->size, false, &handle, &dma_buf); |
||
690 | if (unlikely(ret != 0)) |
||
691 | goto out_no_dmabuf; |
||
692 | |||
693 | rep->handle = handle; |
||
4111 | Serge | 694 | rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node); |
4075 | Serge | 695 | rep->cur_gmr_id = handle; |
696 | rep->cur_gmr_offset = 0; |
||
697 | |||
698 | vmw_dmabuf_unreference(&dma_buf); |
||
699 | |||
700 | out_no_dmabuf: |
||
5078 | serge | 701 | ttm_read_unlock(&dev_priv->reservation_sem); |
4075 | Serge | 702 | |
703 | return ret; |
||
704 | } |
||
705 | |||
706 | int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, |
||
707 | struct drm_file *file_priv) |
||
708 | { |
||
709 | struct drm_vmw_unref_dmabuf_arg *arg = |
||
710 | (struct drm_vmw_unref_dmabuf_arg *)data; |
||
711 | |||
712 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, |
||
713 | arg->handle, |
||
714 | TTM_REF_USAGE); |
||
715 | } |
||
716 | #endif |
||
717 | |||
718 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
||
719 | uint32_t handle, struct vmw_dma_buffer **out) |
||
720 | { |
||
721 | struct vmw_user_dma_buffer *vmw_user_bo; |
||
722 | struct ttm_base_object *base; |
||
723 | |||
724 | base = ttm_base_object_lookup(tfile, handle); |
||
725 | if (unlikely(base == NULL)) { |
||
726 | printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", |
||
727 | (unsigned long)handle); |
||
728 | return -ESRCH; |
||
729 | } |
||
730 | |||
4569 | Serge | 731 | if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { |
4075 | Serge | 732 | ttm_base_object_unref(&base); |
733 | printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", |
||
734 | (unsigned long)handle); |
||
735 | return -EINVAL; |
||
736 | } |
||
737 | |||
4569 | Serge | 738 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, |
739 | prime.base); |
||
4075 | Serge | 740 | (void)ttm_bo_reference(&vmw_user_bo->dma.base); |
741 | ttm_base_object_unref(&base); |
||
742 | *out = &vmw_user_bo->dma; |
||
743 | |||
744 | return 0; |
||
745 | } |
||
746 | |||
747 | int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, |
||
4569 | Serge | 748 | struct vmw_dma_buffer *dma_buf, |
749 | uint32_t *handle) |
||
4075 | Serge | 750 | { |
751 | struct vmw_user_dma_buffer *user_bo; |
||
752 | |||
753 | if (dma_buf->base.destroy != vmw_user_dmabuf_destroy) |
||
754 | return -EINVAL; |
||
755 | |||
756 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); |
||
4569 | Serge | 757 | |
758 | *handle = user_bo->prime.base.hash.key; |
||
759 | return ttm_ref_object_add(tfile, &user_bo->prime.base, |
||
760 | TTM_REF_USAGE, NULL); |
||
4075 | Serge | 761 | } |
762 | |||
763 | /* |
||
764 | * Stream management |
||
765 | */ |
||
766 | |||
767 | static void vmw_stream_destroy(struct vmw_resource *res) |
||
768 | { |
||
769 | struct vmw_private *dev_priv = res->dev_priv; |
||
770 | struct vmw_stream *stream; |
||
771 | int ret; |
||
772 | |||
773 | DRM_INFO("%s: unref\n", __func__); |
||
774 | stream = container_of(res, struct vmw_stream, res); |
||
775 | |||
776 | ret = vmw_overlay_unref(dev_priv, stream->stream_id); |
||
777 | WARN_ON(ret != 0); |
||
778 | } |
||
779 | |||
780 | static int vmw_stream_init(struct vmw_private *dev_priv, |
||
781 | struct vmw_stream *stream, |
||
782 | void (*res_free) (struct vmw_resource *res)) |
||
783 | { |
||
784 | struct vmw_resource *res = &stream->res; |
||
785 | int ret; |
||
786 | |||
787 | ret = vmw_resource_init(dev_priv, res, false, res_free, |
||
788 | &vmw_stream_func); |
||
789 | |||
790 | if (unlikely(ret != 0)) { |
||
791 | if (res_free == NULL) |
||
792 | kfree(stream); |
||
793 | else |
||
794 | res_free(&stream->res); |
||
795 | return ret; |
||
796 | } |
||
797 | |||
798 | ret = vmw_overlay_claim(dev_priv, &stream->stream_id); |
||
799 | if (ret) { |
||
800 | vmw_resource_unreference(&res); |
||
801 | return ret; |
||
802 | } |
||
803 | |||
804 | DRM_INFO("%s: claimed\n", __func__); |
||
805 | |||
806 | vmw_resource_activate(&stream->res, vmw_stream_destroy); |
||
807 | return 0; |
||
808 | } |
||
809 | |||
810 | static void vmw_user_stream_free(struct vmw_resource *res) |
||
811 | { |
||
812 | struct vmw_user_stream *stream = |
||
813 | container_of(res, struct vmw_user_stream, stream.res); |
||
814 | struct vmw_private *dev_priv = res->dev_priv; |
||
815 | |||
5078 | serge | 816 | ttm_base_object_kfree(stream, base); |
4075 | Serge | 817 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
818 | vmw_user_stream_size); |
||
819 | } |
||
820 | |||
821 | /** |
||
822 | * This function is called when user space has no more references on the |
||
823 | * base object. It releases the base-object's reference on the resource object. |
||
824 | */ |
||
825 | |||
826 | static void vmw_user_stream_base_release(struct ttm_base_object **p_base) |
||
827 | { |
||
828 | struct ttm_base_object *base = *p_base; |
||
829 | struct vmw_user_stream *stream = |
||
830 | container_of(base, struct vmw_user_stream, base); |
||
831 | struct vmw_resource *res = &stream->stream.res; |
||
832 | |||
833 | *p_base = NULL; |
||
834 | vmw_resource_unreference(&res); |
||
835 | } |
||
836 | |||
837 | #if 0 |
||
838 | int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, |
||
839 | struct drm_file *file_priv) |
||
840 | { |
||
841 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
842 | struct vmw_resource *res; |
||
843 | struct vmw_user_stream *stream; |
||
844 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; |
||
845 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
||
846 | struct idr *idr = &dev_priv->res_idr[vmw_res_stream]; |
||
847 | int ret = 0; |
||
848 | |||
849 | |||
850 | res = vmw_resource_lookup(dev_priv, idr, arg->stream_id); |
||
851 | if (unlikely(res == NULL)) |
||
852 | return -EINVAL; |
||
853 | |||
854 | if (res->res_free != &vmw_user_stream_free) { |
||
855 | ret = -EINVAL; |
||
856 | goto out; |
||
857 | } |
||
858 | |||
859 | stream = container_of(res, struct vmw_user_stream, stream.res); |
||
860 | if (stream->base.tfile != tfile) { |
||
861 | ret = -EINVAL; |
||
862 | goto out; |
||
863 | } |
||
864 | |||
865 | ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE); |
||
866 | out: |
||
867 | vmw_resource_unreference(&res); |
||
868 | return ret; |
||
869 | } |
||
870 | |||
871 | int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, |
||
872 | struct drm_file *file_priv) |
||
873 | { |
||
874 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
875 | struct vmw_user_stream *stream; |
||
876 | struct vmw_resource *res; |
||
877 | struct vmw_resource *tmp; |
||
878 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; |
||
879 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
||
880 | int ret; |
||
881 | |||
882 | /* |
||
883 | * Approximate idr memory usage with 128 bytes. It will be limited |
||
884 | * by maximum number_of streams anyway? |
||
885 | */ |
||
886 | |||
887 | if (unlikely(vmw_user_stream_size == 0)) |
||
888 | vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128; |
||
889 | |||
5078 | serge | 890 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
4075 | Serge | 891 | if (unlikely(ret != 0)) |
892 | return ret; |
||
893 | |||
894 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), |
||
895 | vmw_user_stream_size, |
||
896 | false, true); |
||
897 | if (unlikely(ret != 0)) { |
||
898 | if (ret != -ERESTARTSYS) |
||
899 | DRM_ERROR("Out of graphics memory for stream" |
||
900 | " creation.\n"); |
||
901 | goto out_unlock; |
||
902 | } |
||
903 | |||
904 | |||
905 | stream = kmalloc(sizeof(*stream), GFP_KERNEL); |
||
906 | if (unlikely(stream == NULL)) { |
||
907 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
||
908 | vmw_user_stream_size); |
||
909 | ret = -ENOMEM; |
||
910 | goto out_unlock; |
||
911 | } |
||
912 | |||
913 | res = &stream->stream.res; |
||
914 | stream->base.shareable = false; |
||
915 | stream->base.tfile = NULL; |
||
916 | |||
917 | /* |
||
918 | * From here on, the destructor takes over resource freeing. |
||
919 | */ |
||
920 | |||
921 | ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); |
||
922 | if (unlikely(ret != 0)) |
||
923 | goto out_unlock; |
||
924 | |||
925 | tmp = vmw_resource_reference(res); |
||
926 | ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM, |
||
927 | &vmw_user_stream_base_release, NULL); |
||
928 | |||
929 | if (unlikely(ret != 0)) { |
||
930 | vmw_resource_unreference(&tmp); |
||
931 | goto out_err; |
||
932 | } |
||
933 | |||
934 | arg->stream_id = res->id; |
||
935 | out_err: |
||
936 | vmw_resource_unreference(&res); |
||
937 | out_unlock: |
||
5078 | serge | 938 | ttm_read_unlock(&dev_priv->reservation_sem); |
4075 | Serge | 939 | return ret; |
940 | } |
||
941 | #endif |
||
942 | |||
943 | int vmw_user_stream_lookup(struct vmw_private *dev_priv, |
||
944 | struct ttm_object_file *tfile, |
||
945 | uint32_t *inout_id, struct vmw_resource **out) |
||
946 | { |
||
947 | struct vmw_user_stream *stream; |
||
948 | struct vmw_resource *res; |
||
949 | int ret; |
||
950 | |||
951 | res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream], |
||
952 | *inout_id); |
||
953 | if (unlikely(res == NULL)) |
||
954 | return -EINVAL; |
||
955 | |||
956 | if (res->res_free != &vmw_user_stream_free) { |
||
957 | ret = -EINVAL; |
||
958 | goto err_ref; |
||
959 | } |
||
960 | |||
961 | stream = container_of(res, struct vmw_user_stream, stream.res); |
||
962 | if (stream->base.tfile != tfile) { |
||
963 | ret = -EPERM; |
||
964 | goto err_ref; |
||
965 | } |
||
966 | |||
967 | *inout_id = stream->stream.stream_id; |
||
968 | *out = res; |
||
969 | return 0; |
||
970 | err_ref: |
||
971 | vmw_resource_unreference(&res); |
||
972 | return ret; |
||
973 | } |
||
974 | |||
975 | #if 0 |
||
976 | int vmw_dumb_create(struct drm_file *file_priv, |
||
977 | struct drm_device *dev, |
||
978 | struct drm_mode_create_dumb *args) |
||
979 | { |
||
980 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
981 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
||
4569 | Serge | 982 | struct vmw_dma_buffer *dma_buf; |
4075 | Serge | 983 | int ret; |
984 | |||
985 | args->pitch = args->width * ((args->bpp + 7) / 8); |
||
986 | args->size = args->pitch * args->height; |
||
987 | |||
5078 | serge | 988 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
4569 | Serge | 989 | if (unlikely(ret != 0)) |
4075 | Serge | 990 | return ret; |
991 | |||
4569 | Serge | 992 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
993 | args->size, false, &args->handle, |
||
994 | &dma_buf); |
||
995 | if (unlikely(ret != 0)) |
||
4075 | Serge | 996 | goto out_no_dmabuf; |
997 | |||
4569 | Serge | 998 | vmw_dmabuf_unreference(&dma_buf); |
4075 | Serge | 999 | out_no_dmabuf: |
5078 | serge | 1000 | ttm_read_unlock(&dev_priv->reservation_sem); |
4075 | Serge | 1001 | return ret; |
1002 | } |
||
1003 | #endif |
||
1004 | |||
4569 | Serge | 1005 | /** |
1006 | * vmw_dumb_map_offset - Return the address space offset of a dumb buffer |
||
1007 | * |
||
1008 | * @file_priv: Pointer to a struct drm_file identifying the caller. |
||
1009 | * @dev: Pointer to the drm device. |
||
1010 | * @handle: Handle identifying the dumb buffer. |
||
1011 | * @offset: The address space offset returned. |
||
1012 | * |
||
1013 | * This is a driver callback for the core drm dumb_map_offset functionality. |
||
1014 | */ |
||
4075 | Serge | 1015 | int vmw_dumb_map_offset(struct drm_file *file_priv, |
1016 | struct drm_device *dev, uint32_t handle, |
||
1017 | uint64_t *offset) |
||
1018 | { |
||
1019 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
||
1020 | struct vmw_dma_buffer *out_buf; |
||
1021 | int ret; |
||
1022 | |||
1023 | ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf); |
||
1024 | if (ret != 0) |
||
1025 | return -EINVAL; |
||
1026 | |||
4111 | Serge | 1027 | *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node); |
4075 | Serge | 1028 | vmw_dmabuf_unreference(&out_buf); |
1029 | return 0; |
||
1030 | } |
||
1031 | |||
4569 | Serge | 1032 | /** |
1033 | * vmw_dumb_destroy - Destroy a dumb boffer |
||
1034 | * |
||
1035 | * @file_priv: Pointer to a struct drm_file identifying the caller. |
||
1036 | * @dev: Pointer to the drm device. |
||
1037 | * @handle: Handle identifying the dumb buffer. |
||
1038 | * |
||
1039 | * This is a driver callback for the core drm dumb_destroy functionality. |
||
1040 | */ |
||
4075 | Serge | 1041 | int vmw_dumb_destroy(struct drm_file *file_priv, |
1042 | struct drm_device *dev, |
||
1043 | uint32_t handle) |
||
1044 | { |
||
1045 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, |
||
1046 | handle, TTM_REF_USAGE); |
||
1047 | } |
||
1048 | |||
1049 | /** |
||
1050 | * vmw_resource_buf_alloc - Allocate a backup buffer for a resource. |
||
1051 | * |
||
1052 | * @res: The resource for which to allocate a backup buffer. |
||
1053 | * @interruptible: Whether any sleeps during allocation should be |
||
1054 | * performed while interruptible. |
||
1055 | */ |
||
1056 | static int vmw_resource_buf_alloc(struct vmw_resource *res, |
||
1057 | bool interruptible) |
||
1058 | { |
||
1059 | unsigned long size = |
||
1060 | (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK; |
||
1061 | struct vmw_dma_buffer *backup; |
||
1062 | int ret; |
||
1063 | |||
1064 | if (likely(res->backup)) { |
||
1065 | BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size); |
||
1066 | return 0; |
||
1067 | } |
||
1068 | |||
1069 | backup = kzalloc(sizeof(*backup), GFP_KERNEL); |
||
1070 | if (unlikely(backup == NULL)) |
||
1071 | return -ENOMEM; |
||
1072 | |||
1073 | ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, |
||
1074 | res->func->backup_placement, |
||
1075 | interruptible, |
||
1076 | &vmw_dmabuf_bo_free); |
||
1077 | if (unlikely(ret != 0)) |
||
1078 | goto out_no_dmabuf; |
||
1079 | |||
1080 | res->backup = backup; |
||
1081 | |||
1082 | out_no_dmabuf: |
||
1083 | return ret; |
||
1084 | } |
||
1085 | |||
1086 | /** |
||
1087 | * vmw_resource_do_validate - Make a resource up-to-date and visible |
||
1088 | * to the device. |
||
1089 | * |
||
1090 | * @res: The resource to make visible to the device. |
||
1091 | * @val_buf: Information about a buffer possibly |
||
1092 | * containing backup data if a bind operation is needed. |
||
1093 | * |
||
1094 | * On hardware resource shortage, this function returns -EBUSY and |
||
1095 | * should be retried once resources have been freed up. |
||
1096 | */ |
||
1097 | static int vmw_resource_do_validate(struct vmw_resource *res, |
||
1098 | struct ttm_validate_buffer *val_buf) |
||
1099 | { |
||
1100 | int ret = 0; |
||
1101 | const struct vmw_res_func *func = res->func; |
||
1102 | |||
1103 | if (unlikely(res->id == -1)) { |
||
1104 | ret = func->create(res); |
||
1105 | if (unlikely(ret != 0)) |
||
1106 | return ret; |
||
1107 | } |
||
1108 | |||
1109 | if (func->bind && |
||
1110 | ((func->needs_backup && list_empty(&res->mob_head) && |
||
1111 | val_buf->bo != NULL) || |
||
1112 | (!func->needs_backup && val_buf->bo != NULL))) { |
||
1113 | ret = func->bind(res, val_buf); |
||
1114 | if (unlikely(ret != 0)) |
||
1115 | goto out_bind_failed; |
||
1116 | if (func->needs_backup) |
||
1117 | list_add_tail(&res->mob_head, &res->backup->res_list); |
||
1118 | } |
||
1119 | |||
1120 | /* |
||
1121 | * Only do this on write operations, and move to |
||
1122 | * vmw_resource_unreserve if it can be called after |
||
1123 | * backup buffers have been unreserved. Otherwise |
||
1124 | * sort out locking. |
||
1125 | */ |
||
1126 | res->res_dirty = true; |
||
1127 | |||
1128 | return 0; |
||
1129 | |||
1130 | out_bind_failed: |
||
1131 | func->destroy(res); |
||
1132 | |||
1133 | return ret; |
||
1134 | } |
||
1135 | |||
1136 | /** |
||
1137 | * vmw_resource_unreserve - Unreserve a resource previously reserved for |
||
1138 | * command submission. |
||
1139 | * |
||
1140 | * @res: Pointer to the struct vmw_resource to unreserve. |
||
1141 | * @new_backup: Pointer to new backup buffer if command submission |
||
1142 | * switched. |
||
1143 | * @new_backup_offset: New backup offset if @new_backup is !NULL. |
||
1144 | * |
||
1145 | * Currently unreserving a resource means putting it back on the device's |
||
1146 | * resource lru list, so that it can be evicted if necessary. |
||
1147 | */ |
||
1148 | void vmw_resource_unreserve(struct vmw_resource *res, |
||
1149 | struct vmw_dma_buffer *new_backup, |
||
1150 | unsigned long new_backup_offset) |
||
1151 | { |
||
1152 | struct vmw_private *dev_priv = res->dev_priv; |
||
1153 | |||
1154 | if (!list_empty(&res->lru_head)) |
||
1155 | return; |
||
1156 | |||
1157 | if (new_backup && new_backup != res->backup) { |
||
1158 | |||
1159 | if (res->backup) { |
||
1160 | lockdep_assert_held(&res->backup->base.resv->lock.base); |
||
1161 | list_del_init(&res->mob_head); |
||
1162 | vmw_dmabuf_unreference(&res->backup); |
||
1163 | } |
||
1164 | |||
1165 | res->backup = vmw_dmabuf_reference(new_backup); |
||
1166 | lockdep_assert_held(&new_backup->base.resv->lock.base); |
||
1167 | list_add_tail(&res->mob_head, &new_backup->res_list); |
||
1168 | } |
||
1169 | if (new_backup) |
||
1170 | res->backup_offset = new_backup_offset; |
||
1171 | |||
4569 | Serge | 1172 | if (!res->func->may_evict || res->id == -1) |
4075 | Serge | 1173 | return; |
1174 | |||
1175 | write_lock(&dev_priv->resource_lock); |
||
1176 | list_add_tail(&res->lru_head, |
||
1177 | &res->dev_priv->res_lru[res->func->res_type]); |
||
1178 | write_unlock(&dev_priv->resource_lock); |
||
1179 | } |
||
1180 | |||
1181 | /** |
||
1182 | * vmw_resource_check_buffer - Check whether a backup buffer is needed |
||
1183 | * for a resource and in that case, allocate |
||
1184 | * one, reserve and validate it. |
||
1185 | * |
||
1186 | * @res: The resource for which to allocate a backup buffer. |
||
1187 | * @interruptible: Whether any sleeps during allocation should be |
||
1188 | * performed while interruptible. |
||
1189 | * @val_buf: On successful return contains data about the |
||
1190 | * reserved and validated backup buffer. |
||
1191 | */ |
||
1192 | static int |
||
1193 | vmw_resource_check_buffer(struct vmw_resource *res, |
||
1194 | bool interruptible, |
||
1195 | struct ttm_validate_buffer *val_buf) |
||
1196 | { |
||
1197 | struct list_head val_list; |
||
1198 | bool backup_dirty = false; |
||
1199 | int ret; |
||
1200 | |||
1201 | if (unlikely(res->backup == NULL)) { |
||
1202 | ret = vmw_resource_buf_alloc(res, interruptible); |
||
1203 | if (unlikely(ret != 0)) |
||
1204 | return ret; |
||
1205 | } |
||
1206 | |||
1207 | INIT_LIST_HEAD(&val_list); |
||
1208 | val_buf->bo = ttm_bo_reference(&res->backup->base); |
||
1209 | list_add_tail(&val_buf->head, &val_list); |
||
4569 | Serge | 1210 | ret = ttm_eu_reserve_buffers(NULL, &val_list); |
4075 | Serge | 1211 | if (unlikely(ret != 0)) |
1212 | goto out_no_reserve; |
||
1213 | |||
1214 | if (res->func->needs_backup && list_empty(&res->mob_head)) |
||
1215 | return 0; |
||
1216 | |||
1217 | backup_dirty = res->backup_dirty; |
||
1218 | ret = ttm_bo_validate(&res->backup->base, |
||
1219 | res->func->backup_placement, |
||
1220 | true, false); |
||
1221 | |||
1222 | if (unlikely(ret != 0)) |
||
1223 | goto out_no_validate; |
||
1224 | |||
1225 | return 0; |
||
1226 | |||
1227 | out_no_validate: |
||
4569 | Serge | 1228 | ttm_eu_backoff_reservation(NULL, &val_list); |
4075 | Serge | 1229 | out_no_reserve: |
1230 | ttm_bo_unref(&val_buf->bo); |
||
1231 | if (backup_dirty) |
||
1232 | vmw_dmabuf_unreference(&res->backup); |
||
1233 | |||
1234 | return ret; |
||
1235 | } |
||
1236 | |||
1237 | /** |
||
1238 | * vmw_resource_reserve - Reserve a resource for command submission |
||
1239 | * |
||
1240 | * @res: The resource to reserve. |
||
1241 | * |
||
1242 | * This function takes the resource off the LRU list and make sure |
||
1243 | * a backup buffer is present for guest-backed resources. However, |
||
1244 | * the buffer may not be bound to the resource at this point. |
||
1245 | * |
||
1246 | */ |
||
1247 | int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) |
||
1248 | { |
||
1249 | struct vmw_private *dev_priv = res->dev_priv; |
||
1250 | int ret; |
||
1251 | |||
1252 | write_lock(&dev_priv->resource_lock); |
||
1253 | list_del_init(&res->lru_head); |
||
1254 | write_unlock(&dev_priv->resource_lock); |
||
1255 | |||
1256 | if (res->func->needs_backup && res->backup == NULL && |
||
1257 | !no_backup) { |
||
1258 | ret = vmw_resource_buf_alloc(res, true); |
||
1259 | if (unlikely(ret != 0)) |
||
1260 | return ret; |
||
1261 | } |
||
1262 | |||
1263 | return 0; |
||
1264 | } |
||
1265 | |||
1266 | /** |
||
1267 | * vmw_resource_backoff_reservation - Unreserve and unreference a |
||
1268 | * backup buffer |
||
1269 | *. |
||
1270 | * @val_buf: Backup buffer information. |
||
1271 | */ |
||
1272 | static void |
||
4569 | Serge | 1273 | vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) |
4075 | Serge | 1274 | { |
1275 | struct list_head val_list; |
||
1276 | |||
1277 | if (likely(val_buf->bo == NULL)) |
||
1278 | return; |
||
1279 | |||
1280 | INIT_LIST_HEAD(&val_list); |
||
1281 | list_add_tail(&val_buf->head, &val_list); |
||
4569 | Serge | 1282 | ttm_eu_backoff_reservation(NULL, &val_list); |
4075 | Serge | 1283 | ttm_bo_unref(&val_buf->bo); |
1284 | } |
||
1285 | |||
1286 | /** |
||
1287 | * vmw_resource_do_evict - Evict a resource, and transfer its data |
||
1288 | * to a backup buffer. |
||
1289 | * |
||
1290 | * @res: The resource to evict. |
||
4569 | Serge | 1291 | * @interruptible: Whether to wait interruptible. |
4075 | Serge | 1292 | */ |
4569 | Serge | 1293 | int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) |
4075 | Serge | 1294 | { |
1295 | struct ttm_validate_buffer val_buf; |
||
1296 | const struct vmw_res_func *func = res->func; |
||
1297 | int ret; |
||
1298 | |||
1299 | BUG_ON(!func->may_evict); |
||
1300 | |||
1301 | val_buf.bo = NULL; |
||
4569 | Serge | 1302 | ret = vmw_resource_check_buffer(res, interruptible, &val_buf); |
4075 | Serge | 1303 | if (unlikely(ret != 0)) |
1304 | return ret; |
||
1305 | |||
1306 | if (unlikely(func->unbind != NULL && |
||
1307 | (!func->needs_backup || !list_empty(&res->mob_head)))) { |
||
1308 | ret = func->unbind(res, res->res_dirty, &val_buf); |
||
1309 | if (unlikely(ret != 0)) |
||
1310 | goto out_no_unbind; |
||
1311 | list_del_init(&res->mob_head); |
||
1312 | } |
||
1313 | ret = func->destroy(res); |
||
1314 | res->backup_dirty = true; |
||
1315 | res->res_dirty = false; |
||
1316 | out_no_unbind: |
||
4569 | Serge | 1317 | vmw_resource_backoff_reservation(&val_buf); |
4075 | Serge | 1318 | |
1319 | return ret; |
||
1320 | } |
||
1321 | |||
1322 | |||
1323 | /** |
||
1324 | * vmw_resource_validate - Make a resource up-to-date and visible |
||
1325 | * to the device. |
||
1326 | * |
||
1327 | * @res: The resource to make visible to the device. |
||
1328 | * |
||
1329 | * On succesful return, any backup DMA buffer pointed to by @res->backup will |
||
1330 | * be reserved and validated. |
||
1331 | * On hardware resource shortage, this function will repeatedly evict |
||
1332 | * resources of the same type until the validation succeeds. |
||
1333 | */ |
||
1334 | int vmw_resource_validate(struct vmw_resource *res) |
||
1335 | { |
||
1336 | int ret; |
||
1337 | struct vmw_resource *evict_res; |
||
1338 | struct vmw_private *dev_priv = res->dev_priv; |
||
1339 | struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; |
||
1340 | struct ttm_validate_buffer val_buf; |
||
4569 | Serge | 1341 | unsigned err_count = 0; |
4075 | Serge | 1342 | |
1343 | if (likely(!res->func->may_evict)) |
||
1344 | return 0; |
||
1345 | |||
1346 | val_buf.bo = NULL; |
||
1347 | if (res->backup) |
||
1348 | val_buf.bo = &res->backup->base; |
||
1349 | do { |
||
1350 | ret = vmw_resource_do_validate(res, &val_buf); |
||
1351 | if (likely(ret != -EBUSY)) |
||
1352 | break; |
||
1353 | |||
1354 | write_lock(&dev_priv->resource_lock); |
||
1355 | if (list_empty(lru_list) || !res->func->may_evict) { |
||
4569 | Serge | 1356 | DRM_ERROR("Out of device device resources " |
4075 | Serge | 1357 | "for %s.\n", res->func->type_name); |
1358 | ret = -EBUSY; |
||
1359 | write_unlock(&dev_priv->resource_lock); |
||
1360 | break; |
||
1361 | } |
||
1362 | |||
1363 | evict_res = vmw_resource_reference |
||
1364 | (list_first_entry(lru_list, struct vmw_resource, |
||
1365 | lru_head)); |
||
1366 | list_del_init(&evict_res->lru_head); |
||
1367 | |||
1368 | write_unlock(&dev_priv->resource_lock); |
||
4569 | Serge | 1369 | |
1370 | ret = vmw_resource_do_evict(evict_res, true); |
||
1371 | if (unlikely(ret != 0)) { |
||
1372 | write_lock(&dev_priv->resource_lock); |
||
1373 | list_add_tail(&evict_res->lru_head, lru_list); |
||
1374 | write_unlock(&dev_priv->resource_lock); |
||
1375 | if (ret == -ERESTARTSYS || |
||
1376 | ++err_count > VMW_RES_EVICT_ERR_COUNT) { |
||
1377 | vmw_resource_unreference(&evict_res); |
||
1378 | goto out_no_validate; |
||
1379 | } |
||
1380 | } |
||
1381 | |||
4075 | Serge | 1382 | vmw_resource_unreference(&evict_res); |
1383 | } while (1); |
||
1384 | |||
1385 | if (unlikely(ret != 0)) |
||
1386 | goto out_no_validate; |
||
1387 | else if (!res->func->needs_backup && res->backup) { |
||
1388 | list_del_init(&res->mob_head); |
||
1389 | vmw_dmabuf_unreference(&res->backup); |
||
1390 | } |
||
1391 | |||
1392 | return 0; |
||
1393 | |||
1394 | out_no_validate: |
||
1395 | return ret; |
||
1396 | } |
||
1397 | |||
1398 | /** |
||
1399 | * vmw_fence_single_bo - Utility function to fence a single TTM buffer |
||
1400 | * object without unreserving it. |
||
1401 | * |
||
1402 | * @bo: Pointer to the struct ttm_buffer_object to fence. |
||
1403 | * @fence: Pointer to the fence. If NULL, this function will |
||
1404 | * insert a fence into the command stream.. |
||
1405 | * |
||
1406 | * Contrary to the ttm_eu version of this function, it takes only |
||
1407 | * a single buffer object instead of a list, and it also doesn't |
||
1408 | * unreserve the buffer object, which needs to be done separately. |
||
1409 | */ |
||
1410 | void vmw_fence_single_bo(struct ttm_buffer_object *bo, |
||
1411 | struct vmw_fence_obj *fence) |
||
1412 | { |
||
1413 | struct ttm_bo_device *bdev = bo->bdev; |
||
1414 | struct ttm_bo_driver *driver = bdev->driver; |
||
1415 | struct vmw_fence_obj *old_fence_obj; |
||
1416 | struct vmw_private *dev_priv = |
||
1417 | container_of(bdev, struct vmw_private, bdev); |
||
1418 | |||
1419 | if (fence == NULL) |
||
1420 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); |
||
1421 | else |
||
1422 | driver->sync_obj_ref(fence); |
||
1423 | |||
1424 | spin_lock(&bdev->fence_lock); |
||
1425 | |||
1426 | old_fence_obj = bo->sync_obj; |
||
1427 | bo->sync_obj = fence; |
||
1428 | |||
1429 | spin_unlock(&bdev->fence_lock); |
||
1430 | |||
1431 | if (old_fence_obj) |
||
1432 | vmw_fence_obj_unreference(&old_fence_obj); |
||
1433 | } |
||
1434 | |||
1435 | /** |
||
1436 | * vmw_resource_move_notify - TTM move_notify_callback |
||
1437 | * |
||
1438 | * @bo: The TTM buffer object about to move. |
||
1439 | * @mem: The truct ttm_mem_reg indicating to what memory |
||
1440 | * region the move is taking place. |
||
1441 | * |
||
4569 | Serge | 1442 | * Evicts the Guest Backed hardware resource if the backup |
1443 | * buffer is being moved out of MOB memory. |
||
1444 | * Note that this function should not race with the resource |
||
1445 | * validation code as long as it accesses only members of struct |
||
1446 | * resource that remain static while bo::res is !NULL and |
||
1447 | * while we have @bo reserved. struct resource::backup is *not* a |
||
1448 | * static member. The resource validation code will take care |
||
1449 | * to set @bo::res to NULL, while having @bo reserved when the |
||
1450 | * buffer is no longer bound to the resource, so @bo:res can be |
||
1451 | * used to determine whether there is a need to unbind and whether |
||
1452 | * it is safe to unbind. |
||
4075 | Serge | 1453 | */ |
1454 | void vmw_resource_move_notify(struct ttm_buffer_object *bo, |
||
1455 | struct ttm_mem_reg *mem) |
||
1456 | { |
||
1457 | } |
||
1458 | |||
1459 | /** |
||
1460 | * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. |
||
1461 | * |
||
1462 | * @res: The resource being queried. |
||
1463 | */ |
||
1464 | bool vmw_resource_needs_backup(const struct vmw_resource *res) |
||
1465 | { |
||
1466 | return res->func->needs_backup; |
||
1467 | } |
||
1468 | |||
1469 | /** |
||
1470 | * vmw_resource_evict_type - Evict all resources of a specific type |
||
1471 | * |
||
1472 | * @dev_priv: Pointer to a device private struct |
||
1473 | * @type: The resource type to evict |
||
1474 | * |
||
1475 | * To avoid thrashing starvation or as part of the hibernation sequence, |
||
4569 | Serge | 1476 | * try to evict all evictable resources of a specific type. |
4075 | Serge | 1477 | */ |
1478 | static void vmw_resource_evict_type(struct vmw_private *dev_priv, |
||
1479 | enum vmw_res_type type) |
||
1480 | { |
||
1481 | struct list_head *lru_list = &dev_priv->res_lru[type]; |
||
1482 | struct vmw_resource *evict_res; |
||
4569 | Serge | 1483 | unsigned err_count = 0; |
1484 | int ret; |
||
4075 | Serge | 1485 | |
1486 | do { |
||
1487 | write_lock(&dev_priv->resource_lock); |
||
1488 | |||
1489 | if (list_empty(lru_list)) |
||
1490 | goto out_unlock; |
||
1491 | |||
1492 | evict_res = vmw_resource_reference( |
||
1493 | list_first_entry(lru_list, struct vmw_resource, |
||
1494 | lru_head)); |
||
1495 | list_del_init(&evict_res->lru_head); |
||
1496 | write_unlock(&dev_priv->resource_lock); |
||
4569 | Serge | 1497 | |
1498 | ret = vmw_resource_do_evict(evict_res, false); |
||
1499 | if (unlikely(ret != 0)) { |
||
1500 | write_lock(&dev_priv->resource_lock); |
||
1501 | list_add_tail(&evict_res->lru_head, lru_list); |
||
1502 | write_unlock(&dev_priv->resource_lock); |
||
1503 | if (++err_count > VMW_RES_EVICT_ERR_COUNT) { |
||
1504 | vmw_resource_unreference(&evict_res); |
||
1505 | return; |
||
1506 | } |
||
1507 | } |
||
1508 | |||
4075 | Serge | 1509 | vmw_resource_unreference(&evict_res); |
1510 | } while (1); |
||
1511 | |||
1512 | out_unlock: |
||
1513 | write_unlock(&dev_priv->resource_lock); |
||
1514 | } |
||
1515 | |||
1516 | /** |
||
1517 | * vmw_resource_evict_all - Evict all evictable resources |
||
1518 | * |
||
1519 | * @dev_priv: Pointer to a device private struct |
||
1520 | * |
||
1521 | * To avoid thrashing starvation or as part of the hibernation sequence, |
||
1522 | * evict all evictable resources. In particular this means that all |
||
1523 | * guest-backed resources that are registered with the device are |
||
1524 | * evicted and the OTable becomes clean. |
||
1525 | */ |
||
1526 | void vmw_resource_evict_all(struct vmw_private *dev_priv) |
||
1527 | { |
||
1528 | enum vmw_res_type type; |
||
1529 | |||
1530 | mutex_lock(&dev_priv->cmdbuf_mutex); |
||
1531 | |||
1532 | for (type = 0; type < vmw_res_max; ++type) |
||
1533 | vmw_resource_evict_type(dev_priv, type); |
||
1534 | |||
1535 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
||
1536 | }>>> |