Rev 5078 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4075 | Serge | 1 | /************************************************************************** |
2 | * |
||
6296 | serge | 3 | * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA |
4075 | Serge | 4 | * All Rights Reserved. |
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | **************************************************************************/ |
||
27 | |||
28 | #include "vmwgfx_drv.h" |
||
29 | #include |
||
30 | #include |
||
31 | #include |
||
32 | #include |
||
33 | #include "vmwgfx_resource_priv.h" |
||
6296 | serge | 34 | #include "vmwgfx_binding.h" |
4075 | Serge | 35 | |
4569 | Serge | 36 | #define VMW_RES_EVICT_ERR_COUNT 10 |
37 | |||
4075 | Serge | 38 | struct vmw_user_dma_buffer { |
4569 | Serge | 39 | struct ttm_prime_object prime; |
4075 | Serge | 40 | struct vmw_dma_buffer dma; |
41 | }; |
||
42 | |||
43 | struct vmw_bo_user_rep { |
||
44 | uint32_t handle; |
||
45 | uint64_t map_handle; |
||
46 | }; |
||
47 | |||
48 | struct vmw_stream { |
||
49 | struct vmw_resource res; |
||
50 | uint32_t stream_id; |
||
51 | }; |
||
52 | |||
53 | struct vmw_user_stream { |
||
54 | struct ttm_base_object base; |
||
55 | struct vmw_stream stream; |
||
56 | }; |
||
57 | |||
58 | |||
59 | static uint64_t vmw_user_stream_size; |
||
60 | |||
61 | static const struct vmw_res_func vmw_stream_func = { |
||
62 | .res_type = vmw_res_stream, |
||
63 | .needs_backup = false, |
||
64 | .may_evict = false, |
||
65 | .type_name = "video streams", |
||
66 | .backup_placement = NULL, |
||
67 | .create = NULL, |
||
68 | .destroy = NULL, |
||
69 | .bind = NULL, |
||
70 | .unbind = NULL |
||
71 | }; |
||
72 | |||
73 | static inline struct vmw_dma_buffer * |
||
74 | vmw_dma_buffer(struct ttm_buffer_object *bo) |
||
75 | { |
||
76 | return container_of(bo, struct vmw_dma_buffer, base); |
||
77 | } |
||
78 | |||
79 | static inline struct vmw_user_dma_buffer * |
||
80 | vmw_user_dma_buffer(struct ttm_buffer_object *bo) |
||
81 | { |
||
82 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); |
||
83 | return container_of(vmw_bo, struct vmw_user_dma_buffer, dma); |
||
84 | } |
||
85 | |||
86 | struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) |
||
87 | { |
||
88 | kref_get(&res->kref); |
||
89 | return res; |
||
90 | } |
||
91 | |||
6296 | serge | 92 | struct vmw_resource * |
93 | vmw_resource_reference_unless_doomed(struct vmw_resource *res) |
||
94 | { |
||
95 | return kref_get_unless_zero(&res->kref) ? res : NULL; |
||
96 | } |
||
4075 | Serge | 97 | |
98 | /** |
||
99 | * vmw_resource_release_id - release a resource id to the id manager. |
||
100 | * |
||
101 | * @res: Pointer to the resource. |
||
102 | * |
||
103 | * Release the resource id to the resource id manager and set it to -1 |
||
104 | */ |
||
105 | void vmw_resource_release_id(struct vmw_resource *res) |
||
106 | { |
||
107 | struct vmw_private *dev_priv = res->dev_priv; |
||
108 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
||
109 | |||
110 | write_lock(&dev_priv->resource_lock); |
||
111 | if (res->id != -1) |
||
112 | idr_remove(idr, res->id); |
||
113 | res->id = -1; |
||
114 | write_unlock(&dev_priv->resource_lock); |
||
115 | } |
||
116 | |||
117 | static void vmw_resource_release(struct kref *kref) |
||
118 | { |
||
119 | struct vmw_resource *res = |
||
120 | container_of(kref, struct vmw_resource, kref); |
||
121 | struct vmw_private *dev_priv = res->dev_priv; |
||
122 | int id; |
||
123 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
||
124 | |||
6296 | serge | 125 | write_lock(&dev_priv->resource_lock); |
4075 | Serge | 126 | res->avail = false; |
127 | list_del_init(&res->lru_head); |
||
128 | write_unlock(&dev_priv->resource_lock); |
||
129 | if (res->backup) { |
||
130 | struct ttm_buffer_object *bo = &res->backup->base; |
||
131 | |||
5078 | serge | 132 | ttm_bo_reserve(bo, false, false, false, NULL); |
4075 | Serge | 133 | if (!list_empty(&res->mob_head) && |
134 | res->func->unbind != NULL) { |
||
135 | struct ttm_validate_buffer val_buf; |
||
136 | |||
137 | val_buf.bo = bo; |
||
6296 | serge | 138 | val_buf.shared = false; |
4075 | Serge | 139 | res->func->unbind(res, false, &val_buf); |
140 | } |
||
141 | res->backup_dirty = false; |
||
142 | list_del_init(&res->mob_head); |
||
143 | ttm_bo_unreserve(bo); |
||
144 | vmw_dmabuf_unreference(&res->backup); |
||
145 | } |
||
146 | |||
5078 | serge | 147 | if (likely(res->hw_destroy != NULL)) { |
148 | mutex_lock(&dev_priv->binding_mutex); |
||
6296 | serge | 149 | vmw_binding_res_list_kill(&res->binding_head); |
5078 | serge | 150 | mutex_unlock(&dev_priv->binding_mutex); |
6296 | serge | 151 | res->hw_destroy(res); |
5078 | serge | 152 | } |
4075 | Serge | 153 | |
154 | id = res->id; |
||
155 | if (res->res_free != NULL) |
||
156 | res->res_free(res); |
||
157 | else |
||
158 | kfree(res); |
||
159 | |||
160 | write_lock(&dev_priv->resource_lock); |
||
161 | if (id != -1) |
||
162 | idr_remove(idr, id); |
||
6296 | serge | 163 | write_unlock(&dev_priv->resource_lock); |
4075 | Serge | 164 | } |
165 | |||
166 | void vmw_resource_unreference(struct vmw_resource **p_res) |
||
167 | { |
||
168 | struct vmw_resource *res = *p_res; |
||
169 | |||
170 | *p_res = NULL; |
||
171 | kref_put(&res->kref, vmw_resource_release); |
||
172 | } |
||
173 | |||
174 | |||
175 | /** |
||
176 | * vmw_resource_alloc_id - release a resource id to the id manager. |
||
177 | * |
||
178 | * @res: Pointer to the resource. |
||
179 | * |
||
180 | * Allocate the lowest free resource from the resource manager, and set |
||
181 | * @res->id to that id. Returns 0 on success and -ENOMEM on failure. |
||
182 | */ |
||
183 | int vmw_resource_alloc_id(struct vmw_resource *res) |
||
184 | { |
||
185 | struct vmw_private *dev_priv = res->dev_priv; |
||
186 | int ret; |
||
187 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
||
188 | |||
189 | BUG_ON(res->id != -1); |
||
190 | |||
191 | idr_preload(GFP_KERNEL); |
||
192 | write_lock(&dev_priv->resource_lock); |
||
193 | |||
194 | ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT); |
||
195 | if (ret >= 0) |
||
196 | res->id = ret; |
||
197 | |||
198 | write_unlock(&dev_priv->resource_lock); |
||
199 | idr_preload_end(); |
||
200 | return ret < 0 ? ret : 0; |
||
201 | } |
||
202 | |||
203 | /** |
||
204 | * vmw_resource_init - initialize a struct vmw_resource |
||
205 | * |
||
206 | * @dev_priv: Pointer to a device private struct. |
||
207 | * @res: The struct vmw_resource to initialize. |
||
208 | * @obj_type: Resource object type. |
||
209 | * @delay_id: Boolean whether to defer device id allocation until |
||
210 | * the first validation. |
||
211 | * @res_free: Resource destructor. |
||
212 | * @func: Resource function table. |
||
213 | */ |
||
214 | int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, |
||
215 | bool delay_id, |
||
216 | void (*res_free) (struct vmw_resource *res), |
||
217 | const struct vmw_res_func *func) |
||
218 | { |
||
219 | kref_init(&res->kref); |
||
220 | res->hw_destroy = NULL; |
||
221 | res->res_free = res_free; |
||
222 | res->avail = false; |
||
223 | res->dev_priv = dev_priv; |
||
224 | res->func = func; |
||
225 | INIT_LIST_HEAD(&res->lru_head); |
||
226 | INIT_LIST_HEAD(&res->mob_head); |
||
4569 | Serge | 227 | INIT_LIST_HEAD(&res->binding_head); |
4075 | Serge | 228 | res->id = -1; |
229 | res->backup = NULL; |
||
230 | res->backup_offset = 0; |
||
231 | res->backup_dirty = false; |
||
232 | res->res_dirty = false; |
||
233 | if (delay_id) |
||
234 | return 0; |
||
235 | else |
||
236 | return vmw_resource_alloc_id(res); |
||
237 | } |
||
238 | |||
239 | /** |
||
240 | * vmw_resource_activate |
||
241 | * |
||
242 | * @res: Pointer to the newly created resource |
||
243 | * @hw_destroy: Destroy function. NULL if none. |
||
244 | * |
||
245 | * Activate a resource after the hardware has been made aware of it. |
||
246 | * Set tye destroy function to @destroy. Typically this frees the |
||
247 | * resource and destroys the hardware resources associated with it. |
||
248 | * Activate basically means that the function vmw_resource_lookup will |
||
249 | * find it. |
||
250 | */ |
||
251 | void vmw_resource_activate(struct vmw_resource *res, |
||
252 | void (*hw_destroy) (struct vmw_resource *)) |
||
253 | { |
||
254 | struct vmw_private *dev_priv = res->dev_priv; |
||
255 | |||
256 | write_lock(&dev_priv->resource_lock); |
||
257 | res->avail = true; |
||
258 | res->hw_destroy = hw_destroy; |
||
259 | write_unlock(&dev_priv->resource_lock); |
||
260 | } |
||
261 | |||
6296 | serge | 262 | static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, |
263 | struct idr *idr, int id) |
||
4075 | Serge | 264 | { |
265 | struct vmw_resource *res; |
||
266 | |||
267 | read_lock(&dev_priv->resource_lock); |
||
268 | res = idr_find(idr, id); |
||
6296 | serge | 269 | if (!res || !res->avail || !kref_get_unless_zero(&res->kref)) |
4075 | Serge | 270 | res = NULL; |
6296 | serge | 271 | |
4075 | Serge | 272 | read_unlock(&dev_priv->resource_lock); |
273 | |||
274 | if (unlikely(res == NULL)) |
||
275 | return NULL; |
||
276 | |||
277 | return res; |
||
278 | } |
||
279 | |||
280 | /** |
||
281 | * vmw_user_resource_lookup_handle - lookup a struct resource from a |
||
282 | * TTM user-space handle and perform basic type checks |
||
283 | * |
||
284 | * @dev_priv: Pointer to a device private struct |
||
285 | * @tfile: Pointer to a struct ttm_object_file identifying the caller |
||
286 | * @handle: The TTM user-space handle |
||
287 | * @converter: Pointer to an object describing the resource type |
||
288 | * @p_res: On successful return the location pointed to will contain |
||
289 | * a pointer to a refcounted struct vmw_resource. |
||
290 | * |
||
291 | * If the handle can't be found or is associated with an incorrect resource |
||
292 | * type, -EINVAL will be returned. |
||
293 | */ |
||
294 | int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, |
||
295 | struct ttm_object_file *tfile, |
||
296 | uint32_t handle, |
||
297 | const struct vmw_user_resource_conv |
||
298 | *converter, |
||
299 | struct vmw_resource **p_res) |
||
300 | { |
||
301 | struct ttm_base_object *base; |
||
302 | struct vmw_resource *res; |
||
303 | int ret = -EINVAL; |
||
304 | |||
305 | base = ttm_base_object_lookup(tfile, handle); |
||
306 | if (unlikely(base == NULL)) |
||
307 | return -EINVAL; |
||
308 | |||
4569 | Serge | 309 | if (unlikely(ttm_base_object_type(base) != converter->object_type)) |
4075 | Serge | 310 | goto out_bad_resource; |
311 | |||
312 | res = converter->base_obj_to_res(base); |
||
313 | |||
314 | read_lock(&dev_priv->resource_lock); |
||
315 | if (!res->avail || res->res_free != converter->res_free) { |
||
316 | read_unlock(&dev_priv->resource_lock); |
||
317 | goto out_bad_resource; |
||
318 | } |
||
319 | |||
320 | kref_get(&res->kref); |
||
321 | read_unlock(&dev_priv->resource_lock); |
||
322 | |||
323 | *p_res = res; |
||
324 | ret = 0; |
||
325 | |||
326 | out_bad_resource: |
||
327 | ttm_base_object_unref(&base); |
||
328 | |||
329 | return ret; |
||
330 | } |
||
331 | |||
332 | /** |
||
333 | * Helper function that looks either a surface or dmabuf. |
||
334 | * |
||
335 | * The pointer this pointed at by out_surf and out_buf needs to be null. |
||
336 | */ |
||
337 | int vmw_user_lookup_handle(struct vmw_private *dev_priv, |
||
338 | struct ttm_object_file *tfile, |
||
339 | uint32_t handle, |
||
340 | struct vmw_surface **out_surf, |
||
341 | struct vmw_dma_buffer **out_buf) |
||
342 | { |
||
343 | struct vmw_resource *res; |
||
344 | int ret; |
||
345 | |||
346 | BUG_ON(*out_surf || *out_buf); |
||
347 | |||
348 | ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle, |
||
349 | user_surface_converter, |
||
350 | &res); |
||
351 | if (!ret) { |
||
352 | *out_surf = vmw_res_to_srf(res); |
||
353 | return 0; |
||
354 | } |
||
355 | |||
356 | *out_surf = NULL; |
||
6296 | serge | 357 | ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL); |
4075 | Serge | 358 | return ret; |
359 | } |
||
360 | |||
361 | /** |
||
362 | * Buffer management. |
||
363 | */ |
||
4569 | Serge | 364 | |
365 | /** |
||
366 | * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers |
||
367 | * |
||
368 | * @dev_priv: Pointer to a struct vmw_private identifying the device. |
||
369 | * @size: The requested buffer size. |
||
370 | * @user: Whether this is an ordinary dma buffer or a user dma buffer. |
||
371 | */ |
||
372 | static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size, |
||
373 | bool user) |
||
374 | { |
||
375 | static size_t struct_size, user_struct_size; |
||
376 | size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; |
||
377 | size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *)); |
||
378 | |||
379 | if (unlikely(struct_size == 0)) { |
||
380 | size_t backend_size = ttm_round_pot(vmw_tt_size); |
||
381 | |||
382 | struct_size = backend_size + |
||
383 | ttm_round_pot(sizeof(struct vmw_dma_buffer)); |
||
384 | user_struct_size = backend_size + |
||
385 | ttm_round_pot(sizeof(struct vmw_user_dma_buffer)); |
||
386 | } |
||
387 | |||
388 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) |
||
389 | page_array_size += |
||
390 | ttm_round_pot(num_pages * sizeof(dma_addr_t)); |
||
391 | |||
392 | return ((user) ? user_struct_size : struct_size) + |
||
393 | page_array_size; |
||
394 | } |
||
395 | |||
4075 | Serge | 396 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) |
397 | { |
||
398 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); |
||
399 | |||
400 | kfree(vmw_bo); |
||
401 | } |
||
402 | |||
4569 | Serge | 403 | static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) |
404 | { |
||
405 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); |
||
406 | |||
6296 | serge | 407 | ttm_prime_object_kfree(vmw_user_bo, prime); |
4569 | Serge | 408 | } |
409 | |||
4075 | Serge | 410 | int vmw_dmabuf_init(struct vmw_private *dev_priv, |
411 | struct vmw_dma_buffer *vmw_bo, |
||
412 | size_t size, struct ttm_placement *placement, |
||
413 | bool interruptible, |
||
414 | void (*bo_free) (struct ttm_buffer_object *bo)) |
||
415 | { |
||
416 | struct ttm_bo_device *bdev = &dev_priv->bdev; |
||
417 | size_t acc_size; |
||
418 | int ret; |
||
4569 | Serge | 419 | bool user = (bo_free == &vmw_user_dmabuf_destroy); |
4075 | Serge | 420 | |
4569 | Serge | 421 | BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free))); |
4075 | Serge | 422 | |
4569 | Serge | 423 | acc_size = vmw_dmabuf_acc_size(dev_priv, size, user); |
4075 | Serge | 424 | memset(vmw_bo, 0, sizeof(*vmw_bo)); |
425 | |||
426 | INIT_LIST_HEAD(&vmw_bo->res_list); |
||
427 | |||
428 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
||
5078 | serge | 429 | ttm_bo_type_device, placement, |
4075 | Serge | 430 | 0, interruptible, |
6296 | serge | 431 | NULL, acc_size, NULL, NULL, bo_free); |
4075 | Serge | 432 | return ret; |
433 | } |
||
434 | |||
435 | static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) |
||
436 | { |
||
437 | struct vmw_user_dma_buffer *vmw_user_bo; |
||
438 | struct ttm_base_object *base = *p_base; |
||
439 | struct ttm_buffer_object *bo; |
||
440 | |||
441 | *p_base = NULL; |
||
442 | |||
443 | if (unlikely(base == NULL)) |
||
444 | return; |
||
445 | |||
4569 | Serge | 446 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, |
447 | prime.base); |
||
4075 | Serge | 448 | bo = &vmw_user_bo->dma.base; |
449 | ttm_bo_unref(&bo); |
||
450 | } |
||
451 | |||
4569 | Serge | 452 | static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base, |
453 | enum ttm_ref_type ref_type) |
||
454 | { |
||
455 | struct vmw_user_dma_buffer *user_bo; |
||
456 | user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base); |
||
457 | |||
458 | switch (ref_type) { |
||
459 | case TTM_REF_SYNCCPU_WRITE: |
||
460 | ttm_bo_synccpu_write_release(&user_bo->dma.base); |
||
461 | break; |
||
462 | default: |
||
463 | BUG(); |
||
464 | } |
||
465 | } |
||
466 | |||
4075 | Serge | 467 | /** |
468 | * vmw_user_dmabuf_alloc - Allocate a user dma buffer |
||
469 | * |
||
470 | * @dev_priv: Pointer to a struct device private. |
||
471 | * @tfile: Pointer to a struct ttm_object_file on which to register the user |
||
472 | * object. |
||
473 | * @size: Size of the dma buffer. |
||
474 | * @shareable: Boolean whether the buffer is shareable with other open files. |
||
475 | * @handle: Pointer to where the handle value should be assigned. |
||
476 | * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer |
||
477 | * should be assigned. |
||
478 | */ |
||
479 | int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, |
||
480 | struct ttm_object_file *tfile, |
||
481 | uint32_t size, |
||
482 | bool shareable, |
||
483 | uint32_t *handle, |
||
6296 | serge | 484 | struct vmw_dma_buffer **p_dma_buf, |
485 | struct ttm_base_object **p_base) |
||
4075 | Serge | 486 | { |
487 | struct vmw_user_dma_buffer *user_bo; |
||
488 | struct ttm_buffer_object *tmp; |
||
489 | int ret; |
||
490 | |||
491 | user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); |
||
492 | if (unlikely(user_bo == NULL)) { |
||
493 | DRM_ERROR("Failed to allocate a buffer.\n"); |
||
494 | return -ENOMEM; |
||
495 | } |
||
496 | |||
497 | ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, |
||
4569 | Serge | 498 | (dev_priv->has_mob) ? |
499 | &vmw_sys_placement : |
||
4075 | Serge | 500 | &vmw_vram_sys_placement, true, |
501 | &vmw_user_dmabuf_destroy); |
||
502 | if (unlikely(ret != 0)) |
||
503 | return ret; |
||
504 | |||
505 | tmp = ttm_bo_reference(&user_bo->dma.base); |
||
6296 | serge | 506 | ret = ttm_prime_object_init(tfile, |
4569 | Serge | 507 | size, |
508 | &user_bo->prime, |
||
6296 | serge | 509 | shareable, |
510 | ttm_buffer_type, |
||
4569 | Serge | 511 | &vmw_user_dmabuf_release, |
512 | &vmw_user_dmabuf_ref_obj_release); |
||
4075 | Serge | 513 | if (unlikely(ret != 0)) { |
514 | ttm_bo_unref(&tmp); |
||
515 | goto out_no_base_object; |
||
516 | } |
||
517 | |||
518 | *p_dma_buf = &user_bo->dma; |
||
6296 | serge | 519 | if (p_base) { |
520 | *p_base = &user_bo->prime.base; |
||
521 | kref_get(&(*p_base)->refcount); |
||
522 | } |
||
4569 | Serge | 523 | *handle = user_bo->prime.base.hash.key; |
4075 | Serge | 524 | |
525 | out_no_base_object: |
||
526 | return ret; |
||
527 | } |
||
528 | |||
529 | /** |
||
530 | * vmw_user_dmabuf_verify_access - verify access permissions on this |
||
531 | * buffer object. |
||
532 | * |
||
533 | * @bo: Pointer to the buffer object being accessed |
||
534 | * @tfile: Identifying the caller. |
||
535 | */ |
||
536 | int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, |
||
537 | struct ttm_object_file *tfile) |
||
538 | { |
||
539 | struct vmw_user_dma_buffer *vmw_user_bo; |
||
540 | |||
541 | if (unlikely(bo->destroy != vmw_user_dmabuf_destroy)) |
||
542 | return -EPERM; |
||
543 | |||
544 | vmw_user_bo = vmw_user_dma_buffer(bo); |
||
5078 | serge | 545 | |
546 | /* Check that the caller has opened the object. */ |
||
547 | if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base))) |
||
548 | return 0; |
||
549 | |||
550 | DRM_ERROR("Could not grant buffer access.\n"); |
||
551 | return -EPERM; |
||
4075 | Serge | 552 | } |
553 | |||
4569 | Serge | 554 | /** |
555 | * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu |
||
556 | * access, idling previous GPU operations on the buffer and optionally |
||
557 | * blocking it for further command submissions. |
||
558 | * |
||
559 | * @user_bo: Pointer to the buffer object being grabbed for CPU access |
||
560 | * @tfile: Identifying the caller. |
||
561 | * @flags: Flags indicating how the grab should be performed. |
||
562 | * |
||
563 | * A blocking grab will be automatically released when @tfile is closed. |
||
564 | */ |
||
565 | static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, |
||
566 | struct ttm_object_file *tfile, |
||
567 | uint32_t flags) |
||
568 | { |
||
569 | struct ttm_buffer_object *bo = &user_bo->dma.base; |
||
570 | bool existed; |
||
5078 | serge | 571 | int ret; |
4569 | Serge | 572 | |
573 | if (flags & drm_vmw_synccpu_allow_cs) { |
||
6296 | serge | 574 | bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); |
575 | long lret; |
||
4569 | Serge | 576 | |
6296 | serge | 577 | if (nonblock) |
578 | return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY; |
||
579 | |||
4569 | Serge | 580 | // spin_lock(&bdev->fence_lock); |
581 | // ret = ttm_bo_wait(bo, false, true, |
||
582 | // !!(flags & drm_vmw_synccpu_dontblock)); |
||
583 | // spin_unlock(&bdev->fence_lock); |
||
584 | return ret; |
||
585 | } |
||
586 | |||
6296 | serge | 587 | ret = ttm_bo_synccpu_write_grab |
588 | (bo, !!(flags & drm_vmw_synccpu_dontblock)); |
||
589 | if (unlikely(ret != 0)) |
||
590 | return ret; |
||
4569 | Serge | 591 | |
592 | ret = ttm_ref_object_add(tfile, &user_bo->prime.base, |
||
593 | TTM_REF_SYNCCPU_WRITE, &existed); |
||
594 | // if (ret != 0 || existed) |
||
595 | // ttm_bo_synccpu_write_release(&user_bo->dma.base); |
||
596 | |||
597 | return ret; |
||
598 | } |
||
599 | |||
600 | /** |
||
601 | * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access, |
||
602 | * and unblock command submission on the buffer if blocked. |
||
603 | * |
||
604 | * @handle: Handle identifying the buffer object. |
||
605 | * @tfile: Identifying the caller. |
||
606 | * @flags: Flags indicating the type of release. |
||
607 | */ |
||
608 | static int vmw_user_dmabuf_synccpu_release(uint32_t handle, |
||
609 | struct ttm_object_file *tfile, |
||
610 | uint32_t flags) |
||
611 | { |
||
612 | if (!(flags & drm_vmw_synccpu_allow_cs)) |
||
613 | return ttm_ref_object_base_unref(tfile, handle, |
||
614 | TTM_REF_SYNCCPU_WRITE); |
||
615 | |||
616 | return 0; |
||
617 | } |
||
618 | |||
619 | /** |
||
620 | * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu |
||
621 | * functionality. |
||
622 | * |
||
623 | * @dev: Identifies the drm device. |
||
624 | * @data: Pointer to the ioctl argument. |
||
625 | * @file_priv: Identifies the caller. |
||
626 | * |
||
627 | * This function checks the ioctl arguments for validity and calls the |
||
628 | * relevant synccpu functions. |
||
629 | */ |
||
630 | int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, |
||
631 | struct drm_file *file_priv) |
||
632 | { |
||
633 | struct drm_vmw_synccpu_arg *arg = |
||
634 | (struct drm_vmw_synccpu_arg *) data; |
||
635 | struct vmw_dma_buffer *dma_buf; |
||
636 | struct vmw_user_dma_buffer *user_bo; |
||
637 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
||
6296 | serge | 638 | struct ttm_base_object *buffer_base; |
4569 | Serge | 639 | int ret; |
640 | |||
641 | if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 |
||
642 | || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | |
||
643 | drm_vmw_synccpu_dontblock | |
||
644 | drm_vmw_synccpu_allow_cs)) != 0) { |
||
645 | DRM_ERROR("Illegal synccpu flags.\n"); |
||
646 | return -EINVAL; |
||
647 | } |
||
648 | |||
649 | switch (arg->op) { |
||
650 | case drm_vmw_synccpu_grab: |
||
6296 | serge | 651 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf, |
652 | &buffer_base); |
||
4569 | Serge | 653 | if (unlikely(ret != 0)) |
654 | return ret; |
||
655 | |||
656 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, |
||
657 | dma); |
||
658 | ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); |
||
659 | vmw_dmabuf_unreference(&dma_buf); |
||
6296 | serge | 660 | ttm_base_object_unref(&buffer_base); |
4569 | Serge | 661 | if (unlikely(ret != 0 && ret != -ERESTARTSYS && |
662 | ret != -EBUSY)) { |
||
663 | DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", |
||
664 | (unsigned int) arg->handle); |
||
665 | return ret; |
||
666 | } |
||
667 | break; |
||
668 | case drm_vmw_synccpu_release: |
||
669 | ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile, |
||
670 | arg->flags); |
||
671 | if (unlikely(ret != 0)) { |
||
672 | DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", |
||
673 | (unsigned int) arg->handle); |
||
674 | return ret; |
||
675 | } |
||
676 | break; |
||
677 | default: |
||
678 | DRM_ERROR("Invalid synccpu operation.\n"); |
||
679 | return -EINVAL; |
||
680 | } |
||
681 | |||
682 | return 0; |
||
683 | } |
||
684 | |||
4075 | Serge | 685 | #if 0 |
686 | int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, |
||
687 | struct drm_file *file_priv) |
||
688 | { |
||
689 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
690 | union drm_vmw_alloc_dmabuf_arg *arg = |
||
691 | (union drm_vmw_alloc_dmabuf_arg *)data; |
||
692 | struct drm_vmw_alloc_dmabuf_req *req = &arg->req; |
||
693 | struct drm_vmw_dmabuf_rep *rep = &arg->rep; |
||
694 | struct vmw_dma_buffer *dma_buf; |
||
695 | uint32_t handle; |
||
696 | int ret; |
||
697 | |||
5078 | serge | 698 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
4075 | Serge | 699 | if (unlikely(ret != 0)) |
700 | return ret; |
||
701 | |||
702 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
||
6296 | serge | 703 | req->size, false, &handle, &dma_buf, |
704 | NULL); |
||
4075 | Serge | 705 | if (unlikely(ret != 0)) |
706 | goto out_no_dmabuf; |
||
707 | |||
708 | rep->handle = handle; |
||
4111 | Serge | 709 | rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node); |
4075 | Serge | 710 | rep->cur_gmr_id = handle; |
711 | rep->cur_gmr_offset = 0; |
||
712 | |||
713 | vmw_dmabuf_unreference(&dma_buf); |
||
714 | |||
715 | out_no_dmabuf: |
||
5078 | serge | 716 | ttm_read_unlock(&dev_priv->reservation_sem); |
4075 | Serge | 717 | |
718 | return ret; |
||
719 | } |
||
720 | |||
721 | int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, |
||
722 | struct drm_file *file_priv) |
||
723 | { |
||
724 | struct drm_vmw_unref_dmabuf_arg *arg = |
||
725 | (struct drm_vmw_unref_dmabuf_arg *)data; |
||
726 | |||
727 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, |
||
728 | arg->handle, |
||
729 | TTM_REF_USAGE); |
||
730 | } |
||
731 | #endif |
||
732 | |||
733 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
||
6296 | serge | 734 | uint32_t handle, struct vmw_dma_buffer **out, |
735 | struct ttm_base_object **p_base) |
||
4075 | Serge | 736 | { |
737 | struct vmw_user_dma_buffer *vmw_user_bo; |
||
738 | struct ttm_base_object *base; |
||
739 | |||
740 | base = ttm_base_object_lookup(tfile, handle); |
||
741 | if (unlikely(base == NULL)) { |
||
742 | printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", |
||
743 | (unsigned long)handle); |
||
744 | return -ESRCH; |
||
745 | } |
||
746 | |||
4569 | Serge | 747 | if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { |
4075 | Serge | 748 | ttm_base_object_unref(&base); |
749 | printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", |
||
750 | (unsigned long)handle); |
||
751 | return -EINVAL; |
||
752 | } |
||
753 | |||
4569 | Serge | 754 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, |
755 | prime.base); |
||
4075 | Serge | 756 | (void)ttm_bo_reference(&vmw_user_bo->dma.base); |
6296 | serge | 757 | if (p_base) |
758 | *p_base = base; |
||
759 | else |
||
760 | ttm_base_object_unref(&base); |
||
4075 | Serge | 761 | *out = &vmw_user_bo->dma; |
762 | |||
763 | return 0; |
||
764 | } |
||
765 | |||
766 | int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, |
||
4569 | Serge | 767 | struct vmw_dma_buffer *dma_buf, |
768 | uint32_t *handle) |
||
4075 | Serge | 769 | { |
770 | struct vmw_user_dma_buffer *user_bo; |
||
771 | |||
772 | if (dma_buf->base.destroy != vmw_user_dmabuf_destroy) |
||
773 | return -EINVAL; |
||
774 | |||
775 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); |
||
4569 | Serge | 776 | |
777 | *handle = user_bo->prime.base.hash.key; |
||
778 | return ttm_ref_object_add(tfile, &user_bo->prime.base, |
||
779 | TTM_REF_USAGE, NULL); |
||
4075 | Serge | 780 | } |
781 | |||
782 | /* |
||
783 | * Stream management |
||
784 | */ |
||
785 | |||
786 | static void vmw_stream_destroy(struct vmw_resource *res) |
||
787 | { |
||
788 | struct vmw_private *dev_priv = res->dev_priv; |
||
789 | struct vmw_stream *stream; |
||
790 | int ret; |
||
791 | |||
792 | DRM_INFO("%s: unref\n", __func__); |
||
793 | stream = container_of(res, struct vmw_stream, res); |
||
794 | |||
795 | ret = vmw_overlay_unref(dev_priv, stream->stream_id); |
||
796 | WARN_ON(ret != 0); |
||
797 | } |
||
798 | |||
799 | static int vmw_stream_init(struct vmw_private *dev_priv, |
||
800 | struct vmw_stream *stream, |
||
801 | void (*res_free) (struct vmw_resource *res)) |
||
802 | { |
||
803 | struct vmw_resource *res = &stream->res; |
||
804 | int ret; |
||
805 | |||
806 | ret = vmw_resource_init(dev_priv, res, false, res_free, |
||
807 | &vmw_stream_func); |
||
808 | |||
809 | if (unlikely(ret != 0)) { |
||
810 | if (res_free == NULL) |
||
811 | kfree(stream); |
||
812 | else |
||
813 | res_free(&stream->res); |
||
814 | return ret; |
||
815 | } |
||
816 | |||
817 | ret = vmw_overlay_claim(dev_priv, &stream->stream_id); |
||
818 | if (ret) { |
||
819 | vmw_resource_unreference(&res); |
||
820 | return ret; |
||
821 | } |
||
822 | |||
823 | DRM_INFO("%s: claimed\n", __func__); |
||
824 | |||
825 | vmw_resource_activate(&stream->res, vmw_stream_destroy); |
||
826 | return 0; |
||
827 | } |
||
828 | |||
829 | static void vmw_user_stream_free(struct vmw_resource *res) |
||
830 | { |
||
831 | struct vmw_user_stream *stream = |
||
832 | container_of(res, struct vmw_user_stream, stream.res); |
||
833 | struct vmw_private *dev_priv = res->dev_priv; |
||
834 | |||
5078 | serge | 835 | ttm_base_object_kfree(stream, base); |
4075 | Serge | 836 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
837 | vmw_user_stream_size); |
||
838 | } |
||
839 | |||
840 | /** |
||
841 | * This function is called when user space has no more references on the |
||
842 | * base object. It releases the base-object's reference on the resource object. |
||
843 | */ |
||
844 | |||
845 | static void vmw_user_stream_base_release(struct ttm_base_object **p_base) |
||
846 | { |
||
847 | struct ttm_base_object *base = *p_base; |
||
848 | struct vmw_user_stream *stream = |
||
849 | container_of(base, struct vmw_user_stream, base); |
||
850 | struct vmw_resource *res = &stream->stream.res; |
||
851 | |||
852 | *p_base = NULL; |
||
853 | vmw_resource_unreference(&res); |
||
854 | } |
||
855 | |||
856 | #if 0 |
||
857 | int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, |
||
858 | struct drm_file *file_priv) |
||
859 | { |
||
860 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
861 | struct vmw_resource *res; |
||
862 | struct vmw_user_stream *stream; |
||
863 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; |
||
864 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
||
865 | struct idr *idr = &dev_priv->res_idr[vmw_res_stream]; |
||
866 | int ret = 0; |
||
867 | |||
868 | |||
869 | res = vmw_resource_lookup(dev_priv, idr, arg->stream_id); |
||
870 | if (unlikely(res == NULL)) |
||
871 | return -EINVAL; |
||
872 | |||
873 | if (res->res_free != &vmw_user_stream_free) { |
||
874 | ret = -EINVAL; |
||
875 | goto out; |
||
876 | } |
||
877 | |||
878 | stream = container_of(res, struct vmw_user_stream, stream.res); |
||
879 | if (stream->base.tfile != tfile) { |
||
880 | ret = -EINVAL; |
||
881 | goto out; |
||
882 | } |
||
883 | |||
884 | ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE); |
||
885 | out: |
||
886 | vmw_resource_unreference(&res); |
||
887 | return ret; |
||
888 | } |
||
889 | |||
890 | int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, |
||
891 | struct drm_file *file_priv) |
||
892 | { |
||
893 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
894 | struct vmw_user_stream *stream; |
||
895 | struct vmw_resource *res; |
||
896 | struct vmw_resource *tmp; |
||
897 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; |
||
898 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
||
899 | int ret; |
||
900 | |||
901 | /* |
||
902 | * Approximate idr memory usage with 128 bytes. It will be limited |
||
903 | * by maximum number_of streams anyway? |
||
904 | */ |
||
905 | |||
906 | if (unlikely(vmw_user_stream_size == 0)) |
||
907 | vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128; |
||
908 | |||
5078 | serge | 909 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
4075 | Serge | 910 | if (unlikely(ret != 0)) |
911 | return ret; |
||
912 | |||
913 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), |
||
914 | vmw_user_stream_size, |
||
915 | false, true); |
||
6296 | serge | 916 | ttm_read_unlock(&dev_priv->reservation_sem); |
4075 | Serge | 917 | if (unlikely(ret != 0)) { |
918 | if (ret != -ERESTARTSYS) |
||
919 | DRM_ERROR("Out of graphics memory for stream" |
||
920 | " creation.\n"); |
||
6296 | serge | 921 | |
922 | goto out_ret; |
||
4075 | Serge | 923 | } |
924 | |||
925 | stream = kmalloc(sizeof(*stream), GFP_KERNEL); |
||
926 | if (unlikely(stream == NULL)) { |
||
927 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
||
928 | vmw_user_stream_size); |
||
929 | ret = -ENOMEM; |
||
6296 | serge | 930 | goto out_ret; |
4075 | Serge | 931 | } |
932 | |||
933 | res = &stream->stream.res; |
||
934 | stream->base.shareable = false; |
||
935 | stream->base.tfile = NULL; |
||
936 | |||
937 | /* |
||
938 | * From here on, the destructor takes over resource freeing. |
||
939 | */ |
||
940 | |||
941 | ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); |
||
942 | if (unlikely(ret != 0)) |
||
6296 | serge | 943 | goto out_ret; |
4075 | Serge | 944 | |
945 | tmp = vmw_resource_reference(res); |
||
946 | ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM, |
||
947 | &vmw_user_stream_base_release, NULL); |
||
948 | |||
949 | if (unlikely(ret != 0)) { |
||
950 | vmw_resource_unreference(&tmp); |
||
951 | goto out_err; |
||
952 | } |
||
953 | |||
954 | arg->stream_id = res->id; |
||
955 | out_err: |
||
956 | vmw_resource_unreference(&res); |
||
6296 | serge | 957 | out_ret: |
4075 | Serge | 958 | return ret; |
959 | } |
||
960 | #endif |
||
961 | |||
962 | int vmw_user_stream_lookup(struct vmw_private *dev_priv, |
||
963 | struct ttm_object_file *tfile, |
||
964 | uint32_t *inout_id, struct vmw_resource **out) |
||
965 | { |
||
966 | struct vmw_user_stream *stream; |
||
967 | struct vmw_resource *res; |
||
968 | int ret; |
||
969 | |||
970 | res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream], |
||
971 | *inout_id); |
||
972 | if (unlikely(res == NULL)) |
||
973 | return -EINVAL; |
||
974 | |||
975 | if (res->res_free != &vmw_user_stream_free) { |
||
976 | ret = -EINVAL; |
||
977 | goto err_ref; |
||
978 | } |
||
979 | |||
980 | stream = container_of(res, struct vmw_user_stream, stream.res); |
||
981 | if (stream->base.tfile != tfile) { |
||
982 | ret = -EPERM; |
||
983 | goto err_ref; |
||
984 | } |
||
985 | |||
986 | *inout_id = stream->stream.stream_id; |
||
987 | *out = res; |
||
988 | return 0; |
||
989 | err_ref: |
||
990 | vmw_resource_unreference(&res); |
||
991 | return ret; |
||
992 | } |
||
993 | |||
6296 | serge | 994 | |
995 | /** |
||
996 | * vmw_dumb_create - Create a dumb kms buffer |
||
997 | * |
||
998 | * @file_priv: Pointer to a struct drm_file identifying the caller. |
||
999 | * @dev: Pointer to the drm device. |
||
1000 | * @args: Pointer to a struct drm_mode_create_dumb structure |
||
1001 | * |
||
1002 | * This is a driver callback for the core drm create_dumb functionality. |
||
1003 | * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except |
||
1004 | * that the arguments have a different format. |
||
1005 | */ |
||
4075 | Serge | 1006 | int vmw_dumb_create(struct drm_file *file_priv, |
1007 | struct drm_device *dev, |
||
1008 | struct drm_mode_create_dumb *args) |
||
1009 | { |
||
1010 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
4569 | Serge | 1011 | struct vmw_dma_buffer *dma_buf; |
4075 | Serge | 1012 | int ret; |
1013 | |||
1014 | args->pitch = args->width * ((args->bpp + 7) / 8); |
||
1015 | args->size = args->pitch * args->height; |
||
1016 | |||
5078 | serge | 1017 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
4569 | Serge | 1018 | if (unlikely(ret != 0)) |
4075 | Serge | 1019 | return ret; |
1020 | |||
4569 | Serge | 1021 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
1022 | args->size, false, &args->handle, |
||
6296 | serge | 1023 | &dma_buf, NULL); |
4569 | Serge | 1024 | if (unlikely(ret != 0)) |
4075 | Serge | 1025 | goto out_no_dmabuf; |
1026 | |||
4569 | Serge | 1027 | vmw_dmabuf_unreference(&dma_buf); |
4075 | Serge | 1028 | out_no_dmabuf: |
5078 | serge | 1029 | ttm_read_unlock(&dev_priv->reservation_sem); |
4075 | Serge | 1030 | return ret; |
1031 | } |
||
1032 | |||
4569 | Serge | 1033 | /** |
1034 | * vmw_dumb_map_offset - Return the address space offset of a dumb buffer |
||
1035 | * |
||
1036 | * @file_priv: Pointer to a struct drm_file identifying the caller. |
||
1037 | * @dev: Pointer to the drm device. |
||
1038 | * @handle: Handle identifying the dumb buffer. |
||
1039 | * @offset: The address space offset returned. |
||
1040 | * |
||
1041 | * This is a driver callback for the core drm dumb_map_offset functionality. |
||
1042 | */ |
||
4075 | Serge | 1043 | int vmw_dumb_map_offset(struct drm_file *file_priv, |
1044 | struct drm_device *dev, uint32_t handle, |
||
1045 | uint64_t *offset) |
||
1046 | { |
||
1047 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
||
1048 | struct vmw_dma_buffer *out_buf; |
||
1049 | int ret; |
||
1050 | |||
6296 | serge | 1051 | ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL); |
4075 | Serge | 1052 | if (ret != 0) |
1053 | return -EINVAL; |
||
1054 | |||
4111 | Serge | 1055 | *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node); |
4075 | Serge | 1056 | vmw_dmabuf_unreference(&out_buf); |
1057 | return 0; |
||
1058 | } |
||
1059 | |||
4569 | Serge | 1060 | /** |
1061 | * vmw_dumb_destroy - Destroy a dumb boffer |
||
1062 | * |
||
1063 | * @file_priv: Pointer to a struct drm_file identifying the caller. |
||
1064 | * @dev: Pointer to the drm device. |
||
1065 | * @handle: Handle identifying the dumb buffer. |
||
1066 | * |
||
1067 | * This is a driver callback for the core drm dumb_destroy functionality. |
||
1068 | */ |
||
4075 | Serge | 1069 | int vmw_dumb_destroy(struct drm_file *file_priv, |
1070 | struct drm_device *dev, |
||
1071 | uint32_t handle) |
||
1072 | { |
||
1073 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, |
||
1074 | handle, TTM_REF_USAGE); |
||
1075 | } |
||
1076 | |||
1077 | /** |
||
1078 | * vmw_resource_buf_alloc - Allocate a backup buffer for a resource. |
||
1079 | * |
||
1080 | * @res: The resource for which to allocate a backup buffer. |
||
1081 | * @interruptible: Whether any sleeps during allocation should be |
||
1082 | * performed while interruptible. |
||
1083 | */ |
||
1084 | static int vmw_resource_buf_alloc(struct vmw_resource *res, |
||
1085 | bool interruptible) |
||
1086 | { |
||
1087 | unsigned long size = |
||
1088 | (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK; |
||
1089 | struct vmw_dma_buffer *backup; |
||
1090 | int ret; |
||
1091 | |||
1092 | if (likely(res->backup)) { |
||
1093 | BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size); |
||
1094 | return 0; |
||
1095 | } |
||
1096 | |||
1097 | backup = kzalloc(sizeof(*backup), GFP_KERNEL); |
||
1098 | if (unlikely(backup == NULL)) |
||
1099 | return -ENOMEM; |
||
1100 | |||
1101 | ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, |
||
1102 | res->func->backup_placement, |
||
1103 | interruptible, |
||
1104 | &vmw_dmabuf_bo_free); |
||
1105 | if (unlikely(ret != 0)) |
||
1106 | goto out_no_dmabuf; |
||
1107 | |||
1108 | res->backup = backup; |
||
1109 | |||
1110 | out_no_dmabuf: |
||
1111 | return ret; |
||
1112 | } |
||
1113 | |||
1114 | /** |
||
1115 | * vmw_resource_do_validate - Make a resource up-to-date and visible |
||
1116 | * to the device. |
||
1117 | * |
||
1118 | * @res: The resource to make visible to the device. |
||
1119 | * @val_buf: Information about a buffer possibly |
||
1120 | * containing backup data if a bind operation is needed. |
||
1121 | * |
||
1122 | * On hardware resource shortage, this function returns -EBUSY and |
||
1123 | * should be retried once resources have been freed up. |
||
1124 | */ |
||
1125 | static int vmw_resource_do_validate(struct vmw_resource *res, |
||
1126 | struct ttm_validate_buffer *val_buf) |
||
1127 | { |
||
1128 | int ret = 0; |
||
1129 | const struct vmw_res_func *func = res->func; |
||
1130 | |||
1131 | if (unlikely(res->id == -1)) { |
||
1132 | ret = func->create(res); |
||
1133 | if (unlikely(ret != 0)) |
||
1134 | return ret; |
||
1135 | } |
||
1136 | |||
1137 | if (func->bind && |
||
1138 | ((func->needs_backup && list_empty(&res->mob_head) && |
||
1139 | val_buf->bo != NULL) || |
||
1140 | (!func->needs_backup && val_buf->bo != NULL))) { |
||
1141 | ret = func->bind(res, val_buf); |
||
1142 | if (unlikely(ret != 0)) |
||
1143 | goto out_bind_failed; |
||
1144 | if (func->needs_backup) |
||
1145 | list_add_tail(&res->mob_head, &res->backup->res_list); |
||
1146 | } |
||
1147 | |||
1148 | /* |
||
1149 | * Only do this on write operations, and move to |
||
1150 | * vmw_resource_unreserve if it can be called after |
||
1151 | * backup buffers have been unreserved. Otherwise |
||
1152 | * sort out locking. |
||
1153 | */ |
||
1154 | res->res_dirty = true; |
||
1155 | |||
1156 | return 0; |
||
1157 | |||
1158 | out_bind_failed: |
||
1159 | func->destroy(res); |
||
1160 | |||
1161 | return ret; |
||
1162 | } |
||
1163 | |||
1164 | /** |
||
1165 | * vmw_resource_unreserve - Unreserve a resource previously reserved for |
||
1166 | * command submission. |
||
1167 | * |
||
1168 | * @res: Pointer to the struct vmw_resource to unreserve. |
||
6296 | serge | 1169 | * @switch_backup: Backup buffer has been switched. |
4075 | Serge | 1170 | * @new_backup: Pointer to new backup buffer if command submission |
6296 | serge | 1171 | * switched. May be NULL. |
1172 | * @new_backup_offset: New backup offset if @switch_backup is true. |
||
4075 | Serge | 1173 | * |
1174 | * Currently unreserving a resource means putting it back on the device's |
||
1175 | * resource lru list, so that it can be evicted if necessary. |
||
1176 | */ |
||
1177 | void vmw_resource_unreserve(struct vmw_resource *res, |
||
6296 | serge | 1178 | bool switch_backup, |
4075 | Serge | 1179 | struct vmw_dma_buffer *new_backup, |
1180 | unsigned long new_backup_offset) |
||
1181 | { |
||
1182 | struct vmw_private *dev_priv = res->dev_priv; |
||
1183 | |||
1184 | if (!list_empty(&res->lru_head)) |
||
1185 | return; |
||
1186 | |||
6296 | serge | 1187 | if (switch_backup && new_backup != res->backup) { |
4075 | Serge | 1188 | if (res->backup) { |
1189 | lockdep_assert_held(&res->backup->base.resv->lock.base); |
||
1190 | list_del_init(&res->mob_head); |
||
1191 | vmw_dmabuf_unreference(&res->backup); |
||
1192 | } |
||
1193 | |||
6296 | serge | 1194 | if (new_backup) { |
1195 | res->backup = vmw_dmabuf_reference(new_backup); |
||
1196 | lockdep_assert_held(&new_backup->base.resv->lock.base); |
||
1197 | list_add_tail(&res->mob_head, &new_backup->res_list); |
||
1198 | } else { |
||
1199 | res->backup = NULL; |
||
1200 | } |
||
4075 | Serge | 1201 | } |
6296 | serge | 1202 | if (switch_backup) |
4075 | Serge | 1203 | res->backup_offset = new_backup_offset; |
1204 | |||
6296 | serge | 1205 | if (!res->func->may_evict || res->id == -1 || res->pin_count) |
4075 | Serge | 1206 | return; |
1207 | |||
1208 | write_lock(&dev_priv->resource_lock); |
||
1209 | list_add_tail(&res->lru_head, |
||
1210 | &res->dev_priv->res_lru[res->func->res_type]); |
||
1211 | write_unlock(&dev_priv->resource_lock); |
||
1212 | } |
||
1213 | |||
1214 | /** |
||
1215 | * vmw_resource_check_buffer - Check whether a backup buffer is needed |
||
1216 | * for a resource and in that case, allocate |
||
1217 | * one, reserve and validate it. |
||
1218 | * |
||
1219 | * @res: The resource for which to allocate a backup buffer. |
||
1220 | * @interruptible: Whether any sleeps during allocation should be |
||
1221 | * performed while interruptible. |
||
1222 | * @val_buf: On successful return contains data about the |
||
1223 | * reserved and validated backup buffer. |
||
1224 | */ |
||
1225 | static int |
||
1226 | vmw_resource_check_buffer(struct vmw_resource *res, |
||
1227 | bool interruptible, |
||
1228 | struct ttm_validate_buffer *val_buf) |
||
1229 | { |
||
1230 | struct list_head val_list; |
||
1231 | bool backup_dirty = false; |
||
1232 | int ret; |
||
1233 | |||
1234 | if (unlikely(res->backup == NULL)) { |
||
1235 | ret = vmw_resource_buf_alloc(res, interruptible); |
||
1236 | if (unlikely(ret != 0)) |
||
1237 | return ret; |
||
1238 | } |
||
1239 | |||
1240 | INIT_LIST_HEAD(&val_list); |
||
1241 | val_buf->bo = ttm_bo_reference(&res->backup->base); |
||
6296 | serge | 1242 | val_buf->shared = false; |
4075 | Serge | 1243 | list_add_tail(&val_buf->head, &val_list); |
6296 | serge | 1244 | ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL); |
4075 | Serge | 1245 | if (unlikely(ret != 0)) |
1246 | goto out_no_reserve; |
||
1247 | |||
1248 | if (res->func->needs_backup && list_empty(&res->mob_head)) |
||
1249 | return 0; |
||
1250 | |||
1251 | backup_dirty = res->backup_dirty; |
||
1252 | ret = ttm_bo_validate(&res->backup->base, |
||
1253 | res->func->backup_placement, |
||
1254 | true, false); |
||
1255 | |||
1256 | if (unlikely(ret != 0)) |
||
1257 | goto out_no_validate; |
||
1258 | |||
1259 | return 0; |
||
1260 | |||
1261 | out_no_validate: |
||
4569 | Serge | 1262 | ttm_eu_backoff_reservation(NULL, &val_list); |
4075 | Serge | 1263 | out_no_reserve: |
1264 | ttm_bo_unref(&val_buf->bo); |
||
1265 | if (backup_dirty) |
||
1266 | vmw_dmabuf_unreference(&res->backup); |
||
1267 | |||
1268 | return ret; |
||
1269 | } |
||
1270 | |||
1271 | /** |
||
1272 | * vmw_resource_reserve - Reserve a resource for command submission |
||
1273 | * |
||
1274 | * @res: The resource to reserve. |
||
1275 | * |
||
1276 | * This function takes the resource off the LRU list and make sure |
||
1277 | * a backup buffer is present for guest-backed resources. However, |
||
1278 | * the buffer may not be bound to the resource at this point. |
||
1279 | * |
||
1280 | */ |
||
6296 | serge | 1281 | int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, |
1282 | bool no_backup) |
||
4075 | Serge | 1283 | { |
1284 | struct vmw_private *dev_priv = res->dev_priv; |
||
1285 | int ret; |
||
1286 | |||
1287 | write_lock(&dev_priv->resource_lock); |
||
1288 | list_del_init(&res->lru_head); |
||
1289 | write_unlock(&dev_priv->resource_lock); |
||
1290 | |||
1291 | if (res->func->needs_backup && res->backup == NULL && |
||
1292 | !no_backup) { |
||
6296 | serge | 1293 | ret = vmw_resource_buf_alloc(res, interruptible); |
1294 | if (unlikely(ret != 0)) { |
||
1295 | DRM_ERROR("Failed to allocate a backup buffer " |
||
1296 | "of size %lu. bytes\n", |
||
1297 | (unsigned long) res->backup_size); |
||
4075 | Serge | 1298 | return ret; |
6296 | serge | 1299 | } |
4075 | Serge | 1300 | } |
1301 | |||
1302 | return 0; |
||
1303 | } |
||
1304 | |||
1305 | /** |
||
1306 | * vmw_resource_backoff_reservation - Unreserve and unreference a |
||
1307 | * backup buffer |
||
1308 | *. |
||
1309 | * @val_buf: Backup buffer information. |
||
1310 | */ |
||
1311 | static void |
||
4569 | Serge | 1312 | vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) |
4075 | Serge | 1313 | { |
1314 | struct list_head val_list; |
||
1315 | |||
1316 | if (likely(val_buf->bo == NULL)) |
||
1317 | return; |
||
1318 | |||
1319 | INIT_LIST_HEAD(&val_list); |
||
1320 | list_add_tail(&val_buf->head, &val_list); |
||
4569 | Serge | 1321 | ttm_eu_backoff_reservation(NULL, &val_list); |
4075 | Serge | 1322 | ttm_bo_unref(&val_buf->bo); |
1323 | } |
||
1324 | |||
1325 | /** |
||
1326 | * vmw_resource_do_evict - Evict a resource, and transfer its data |
||
1327 | * to a backup buffer. |
||
1328 | * |
||
1329 | * @res: The resource to evict. |
||
4569 | Serge | 1330 | * @interruptible: Whether to wait interruptible. |
4075 | Serge | 1331 | */ |
6296 | serge | 1332 | static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) |
4075 | Serge | 1333 | { |
1334 | struct ttm_validate_buffer val_buf; |
||
1335 | const struct vmw_res_func *func = res->func; |
||
1336 | int ret; |
||
1337 | |||
1338 | BUG_ON(!func->may_evict); |
||
1339 | |||
1340 | val_buf.bo = NULL; |
||
6296 | serge | 1341 | val_buf.shared = false; |
4569 | Serge | 1342 | ret = vmw_resource_check_buffer(res, interruptible, &val_buf); |
4075 | Serge | 1343 | if (unlikely(ret != 0)) |
1344 | return ret; |
||
1345 | |||
1346 | if (unlikely(func->unbind != NULL && |
||
1347 | (!func->needs_backup || !list_empty(&res->mob_head)))) { |
||
1348 | ret = func->unbind(res, res->res_dirty, &val_buf); |
||
1349 | if (unlikely(ret != 0)) |
||
1350 | goto out_no_unbind; |
||
1351 | list_del_init(&res->mob_head); |
||
1352 | } |
||
1353 | ret = func->destroy(res); |
||
1354 | res->backup_dirty = true; |
||
1355 | res->res_dirty = false; |
||
1356 | out_no_unbind: |
||
4569 | Serge | 1357 | vmw_resource_backoff_reservation(&val_buf); |
4075 | Serge | 1358 | |
1359 | return ret; |
||
1360 | } |
||
1361 | |||
1362 | |||
1363 | /** |
||
1364 | * vmw_resource_validate - Make a resource up-to-date and visible |
||
1365 | * to the device. |
||
1366 | * |
||
1367 | * @res: The resource to make visible to the device. |
||
1368 | * |
||
1369 | * On succesful return, any backup DMA buffer pointed to by @res->backup will |
||
1370 | * be reserved and validated. |
||
1371 | * On hardware resource shortage, this function will repeatedly evict |
||
1372 | * resources of the same type until the validation succeeds. |
||
1373 | */ |
||
1374 | int vmw_resource_validate(struct vmw_resource *res) |
||
1375 | { |
||
1376 | int ret; |
||
1377 | struct vmw_resource *evict_res; |
||
1378 | struct vmw_private *dev_priv = res->dev_priv; |
||
1379 | struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; |
||
1380 | struct ttm_validate_buffer val_buf; |
||
4569 | Serge | 1381 | unsigned err_count = 0; |
4075 | Serge | 1382 | |
6296 | serge | 1383 | if (!res->func->create) |
4075 | Serge | 1384 | return 0; |
1385 | |||
1386 | val_buf.bo = NULL; |
||
6296 | serge | 1387 | val_buf.shared = false; |
4075 | Serge | 1388 | if (res->backup) |
1389 | val_buf.bo = &res->backup->base; |
||
1390 | do { |
||
1391 | ret = vmw_resource_do_validate(res, &val_buf); |
||
1392 | if (likely(ret != -EBUSY)) |
||
1393 | break; |
||
1394 | |||
1395 | write_lock(&dev_priv->resource_lock); |
||
1396 | if (list_empty(lru_list) || !res->func->may_evict) { |
||
4569 | Serge | 1397 | DRM_ERROR("Out of device device resources " |
4075 | Serge | 1398 | "for %s.\n", res->func->type_name); |
1399 | ret = -EBUSY; |
||
1400 | write_unlock(&dev_priv->resource_lock); |
||
1401 | break; |
||
1402 | } |
||
1403 | |||
1404 | evict_res = vmw_resource_reference |
||
1405 | (list_first_entry(lru_list, struct vmw_resource, |
||
1406 | lru_head)); |
||
1407 | list_del_init(&evict_res->lru_head); |
||
1408 | |||
1409 | write_unlock(&dev_priv->resource_lock); |
||
4569 | Serge | 1410 | |
1411 | ret = vmw_resource_do_evict(evict_res, true); |
||
1412 | if (unlikely(ret != 0)) { |
||
1413 | write_lock(&dev_priv->resource_lock); |
||
1414 | list_add_tail(&evict_res->lru_head, lru_list); |
||
1415 | write_unlock(&dev_priv->resource_lock); |
||
1416 | if (ret == -ERESTARTSYS || |
||
1417 | ++err_count > VMW_RES_EVICT_ERR_COUNT) { |
||
1418 | vmw_resource_unreference(&evict_res); |
||
1419 | goto out_no_validate; |
||
1420 | } |
||
1421 | } |
||
1422 | |||
4075 | Serge | 1423 | vmw_resource_unreference(&evict_res); |
1424 | } while (1); |
||
1425 | |||
1426 | if (unlikely(ret != 0)) |
||
1427 | goto out_no_validate; |
||
1428 | else if (!res->func->needs_backup && res->backup) { |
||
1429 | list_del_init(&res->mob_head); |
||
1430 | vmw_dmabuf_unreference(&res->backup); |
||
1431 | } |
||
1432 | |||
1433 | return 0; |
||
1434 | |||
1435 | out_no_validate: |
||
1436 | return ret; |
||
1437 | } |
||
1438 | |||
1439 | /** |
||
1440 | * vmw_fence_single_bo - Utility function to fence a single TTM buffer |
||
1441 | * object without unreserving it. |
||
1442 | * |
||
1443 | * @bo: Pointer to the struct ttm_buffer_object to fence. |
||
1444 | * @fence: Pointer to the fence. If NULL, this function will |
||
1445 | * insert a fence into the command stream.. |
||
1446 | * |
||
1447 | * Contrary to the ttm_eu version of this function, it takes only |
||
1448 | * a single buffer object instead of a list, and it also doesn't |
||
1449 | * unreserve the buffer object, which needs to be done separately. |
||
1450 | */ |
||
1451 | void vmw_fence_single_bo(struct ttm_buffer_object *bo, |
||
1452 | struct vmw_fence_obj *fence) |
||
1453 | { |
||
1454 | struct ttm_bo_device *bdev = bo->bdev; |
||
6296 | serge | 1455 | |
4075 | Serge | 1456 | struct vmw_private *dev_priv = |
1457 | container_of(bdev, struct vmw_private, bdev); |
||
1458 | |||
6296 | serge | 1459 | if (fence == NULL) { |
4075 | Serge | 1460 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); |
6296 | serge | 1461 | reservation_object_add_excl_fence(bo->resv, &fence->base); |
1462 | fence_put(&fence->base); |
||
1463 | } else |
||
1464 | reservation_object_add_excl_fence(bo->resv, &fence->base); |
||
4075 | Serge | 1465 | } |
1466 | |||
1467 | /** |
||
1468 | * vmw_resource_move_notify - TTM move_notify_callback |
||
1469 | * |
||
6296 | serge | 1470 | * @bo: The TTM buffer object about to move. |
1471 | * @mem: The struct ttm_mem_reg indicating to what memory |
||
1472 | * region the move is taking place. |
||
4075 | Serge | 1473 | * |
4569 | Serge | 1474 | * Evicts the Guest Backed hardware resource if the backup |
1475 | * buffer is being moved out of MOB memory. |
||
1476 | * Note that this function should not race with the resource |
||
1477 | * validation code as long as it accesses only members of struct |
||
1478 | * resource that remain static while bo::res is !NULL and |
||
1479 | * while we have @bo reserved. struct resource::backup is *not* a |
||
1480 | * static member. The resource validation code will take care |
||
1481 | * to set @bo::res to NULL, while having @bo reserved when the |
||
1482 | * buffer is no longer bound to the resource, so @bo:res can be |
||
1483 | * used to determine whether there is a need to unbind and whether |
||
1484 | * it is safe to unbind. |
||
4075 | Serge | 1485 | */ |
1486 | void vmw_resource_move_notify(struct ttm_buffer_object *bo, |
||
1487 | struct ttm_mem_reg *mem) |
||
1488 | { |
||
6296 | serge | 1489 | /** |
1490 | * vmw_query_readback_all - Read back cached query states |
||
1491 | * |
||
1492 | * @dx_query_mob: Buffer containing the DX query MOB |
||
1493 | * |
||
1494 | * Read back cached states from the device if they exist. This function |
||
1495 | * assumings binding_mutex is held. |
||
1496 | */ |
||
1497 | int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob) |
||
1498 | { |
||
1499 | struct vmw_resource *dx_query_ctx; |
||
1500 | struct vmw_private *dev_priv; |
||
1501 | struct { |
||
1502 | SVGA3dCmdHeader header; |
||
1503 | SVGA3dCmdDXReadbackAllQuery body; |
||
1504 | } *cmd; |
||
1505 | |||
1506 | |||
1507 | /* No query bound, so do nothing */ |
||
1508 | if (!dx_query_mob || !dx_query_mob->dx_query_ctx) |
||
1509 | return 0; |
||
1510 | |||
1511 | dx_query_ctx = dx_query_mob->dx_query_ctx; |
||
1512 | dev_priv = dx_query_ctx->dev_priv; |
||
1513 | |||
1514 | cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id); |
||
1515 | if (unlikely(cmd == NULL)) { |
||
1516 | DRM_ERROR("Failed reserving FIFO space for " |
||
1517 | "query MOB read back.\n"); |
||
1518 | return -ENOMEM; |
||
1519 | } |
||
1520 | |||
1521 | cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY; |
||
1522 | cmd->header.size = sizeof(cmd->body); |
||
1523 | cmd->body.cid = dx_query_ctx->id; |
||
1524 | |||
1525 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
||
1526 | |||
1527 | /* Triggers a rebind the next time affected context is bound */ |
||
1528 | dx_query_mob->dx_query_ctx = NULL; |
||
1529 | |||
1530 | return 0; |
||
4075 | Serge | 1531 | } |
6296 | serge | 1532 | } |
4075 | Serge | 1533 | |
1534 | /** |
||
1535 | * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. |
||
1536 | * |
||
1537 | * @res: The resource being queried. |
||
1538 | */ |
||
1539 | bool vmw_resource_needs_backup(const struct vmw_resource *res) |
||
1540 | { |
||
1541 | return res->func->needs_backup; |
||
1542 | } |
||
1543 | |||
1544 | /** |
||
1545 | * vmw_resource_evict_type - Evict all resources of a specific type |
||
1546 | * |
||
1547 | * @dev_priv: Pointer to a device private struct |
||
1548 | * @type: The resource type to evict |
||
1549 | * |
||
1550 | * To avoid thrashing starvation or as part of the hibernation sequence, |
||
4569 | Serge | 1551 | * try to evict all evictable resources of a specific type. |
4075 | Serge | 1552 | */ |
1553 | static void vmw_resource_evict_type(struct vmw_private *dev_priv, |
||
1554 | enum vmw_res_type type) |
||
1555 | { |
||
1556 | struct list_head *lru_list = &dev_priv->res_lru[type]; |
||
1557 | struct vmw_resource *evict_res; |
||
4569 | Serge | 1558 | unsigned err_count = 0; |
1559 | int ret; |
||
4075 | Serge | 1560 | |
1561 | do { |
||
1562 | write_lock(&dev_priv->resource_lock); |
||
1563 | |||
1564 | if (list_empty(lru_list)) |
||
1565 | goto out_unlock; |
||
1566 | |||
1567 | evict_res = vmw_resource_reference( |
||
1568 | list_first_entry(lru_list, struct vmw_resource, |
||
1569 | lru_head)); |
||
1570 | list_del_init(&evict_res->lru_head); |
||
1571 | write_unlock(&dev_priv->resource_lock); |
||
4569 | Serge | 1572 | |
1573 | ret = vmw_resource_do_evict(evict_res, false); |
||
1574 | if (unlikely(ret != 0)) { |
||
1575 | write_lock(&dev_priv->resource_lock); |
||
1576 | list_add_tail(&evict_res->lru_head, lru_list); |
||
1577 | write_unlock(&dev_priv->resource_lock); |
||
1578 | if (++err_count > VMW_RES_EVICT_ERR_COUNT) { |
||
1579 | vmw_resource_unreference(&evict_res); |
||
1580 | return; |
||
1581 | } |
||
1582 | } |
||
1583 | |||
4075 | Serge | 1584 | vmw_resource_unreference(&evict_res); |
1585 | } while (1); |
||
1586 | |||
1587 | out_unlock: |
||
1588 | write_unlock(&dev_priv->resource_lock); |
||
1589 | } |
||
1590 | |||
1591 | /** |
||
1592 | * vmw_resource_evict_all - Evict all evictable resources |
||
1593 | * |
||
1594 | * @dev_priv: Pointer to a device private struct |
||
1595 | * |
||
1596 | * To avoid thrashing starvation or as part of the hibernation sequence, |
||
1597 | * evict all evictable resources. In particular this means that all |
||
1598 | * guest-backed resources that are registered with the device are |
||
1599 | * evicted and the OTable becomes clean. |
||
1600 | */ |
||
1601 | void vmw_resource_evict_all(struct vmw_private *dev_priv) |
||
1602 | { |
||
1603 | enum vmw_res_type type; |
||
1604 | |||
1605 | mutex_lock(&dev_priv->cmdbuf_mutex); |
||
1606 | |||
1607 | for (type = 0; type < vmw_res_max; ++type) |
||
1608 | vmw_resource_evict_type(dev_priv, type); |
||
1609 | |||
1610 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
||
1611 | } |
||
6296 | serge | 1612 | |
1613 | /** |
||
1614 | * vmw_resource_pin - Add a pin reference on a resource |
||
1615 | * |
||
1616 | * @res: The resource to add a pin reference on |
||
1617 | * |
||
1618 | * This function adds a pin reference, and if needed validates the resource. |
||
1619 | * Having a pin reference means that the resource can never be evicted, and |
||
1620 | * its id will never change as long as there is a pin reference. |
||
1621 | * This function returns 0 on success and a negative error code on failure. |
||
1622 | */ |
||
1623 | int vmw_resource_pin(struct vmw_resource *res, bool interruptible) |
||
1624 | { |
||
1625 | struct vmw_private *dev_priv = res->dev_priv; |
||
1626 | int ret; |
||
1627 | |||
1628 | ttm_write_lock(&dev_priv->reservation_sem, interruptible); |
||
1629 | mutex_lock(&dev_priv->cmdbuf_mutex); |
||
1630 | ret = vmw_resource_reserve(res, interruptible, false); |
||
1631 | if (ret) |
||
1632 | goto out_no_reserve; |
||
1633 | |||
1634 | if (res->pin_count == 0) { |
||
1635 | struct vmw_dma_buffer *vbo = NULL; |
||
1636 | |||
1637 | if (res->backup) { |
||
1638 | vbo = res->backup; |
||
1639 | |||
1640 | ttm_bo_reserve(&vbo->base, interruptible, false, false, |
||
1641 | NULL); |
||
1642 | if (!vbo->pin_count) { |
||
1643 | ret = ttm_bo_validate |
||
1644 | (&vbo->base, |
||
1645 | res->func->backup_placement, |
||
1646 | interruptible, false); |
||
1647 | if (ret) { |
||
1648 | ttm_bo_unreserve(&vbo->base); |
||
1649 | goto out_no_validate; |
||
1650 | } |
||
1651 | } |
||
1652 | |||
1653 | /* Do we really need to pin the MOB as well? */ |
||
1654 | vmw_bo_pin_reserved(vbo, true); |
||
1655 | } |
||
1656 | ret = vmw_resource_validate(res); |
||
1657 | if (vbo) |
||
1658 | ttm_bo_unreserve(&vbo->base); |
||
1659 | if (ret) |
||
1660 | goto out_no_validate; |
||
1661 | } |
||
1662 | res->pin_count++; |
||
1663 | |||
1664 | out_no_validate: |
||
1665 | vmw_resource_unreserve(res, false, NULL, 0UL); |
||
1666 | out_no_reserve: |
||
1667 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
||
1668 | ttm_write_unlock(&dev_priv->reservation_sem); |
||
1669 | |||
1670 | return ret; |
||
1671 | } |
||
1672 | |||
1673 | /** |
||
1674 | * vmw_resource_unpin - Remove a pin reference from a resource |
||
1675 | * |
||
1676 | * @res: The resource to remove a pin reference from |
||
1677 | * |
||
1678 | * Having a pin reference means that the resource can never be evicted, and |
||
1679 | * its id will never change as long as there is a pin reference. |
||
1680 | */ |
||
1681 | void vmw_resource_unpin(struct vmw_resource *res) |
||
1682 | { |
||
1683 | struct vmw_private *dev_priv = res->dev_priv; |
||
1684 | int ret; |
||
1685 | |||
1686 | ttm_read_lock(&dev_priv->reservation_sem, false); |
||
1687 | mutex_lock(&dev_priv->cmdbuf_mutex); |
||
1688 | |||
1689 | ret = vmw_resource_reserve(res, false, true); |
||
1690 | WARN_ON(ret); |
||
1691 | |||
1692 | WARN_ON(res->pin_count == 0); |
||
1693 | if (--res->pin_count == 0 && res->backup) { |
||
1694 | struct vmw_dma_buffer *vbo = res->backup; |
||
1695 | |||
1696 | ttm_bo_reserve(&vbo->base, false, false, false, NULL); |
||
1697 | vmw_bo_pin_reserved(vbo, false); |
||
1698 | ttm_bo_unreserve(&vbo->base); |
||
1699 | } |
||
1700 | |||
1701 | vmw_resource_unreserve(res, false, NULL, 0UL); |
||
1702 | |||
1703 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
||
1704 | ttm_read_unlock(&dev_priv->reservation_sem); |
||
1705 | } |
||
1706 | |||
1707 | /** |
||
1708 | * vmw_res_type - Return the resource type |
||
1709 | * |
||
1710 | * @res: Pointer to the resource |
||
1711 | */ |
||
1712 | enum vmw_res_type vmw_res_type(const struct vmw_resource *res) |
||
1713 | { |
||
1714 | return res->func->res_type; |
||
1715 | }>>> |