Rev 5078 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4075 | Serge | 1 | /************************************************************************** |
2 | * |
||
6296 | serge | 3 | * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA |
4075 | Serge | 4 | * All Rights Reserved. |
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | **************************************************************************/ |
||
27 | |||
28 | #include "vmwgfx_drv.h" |
||
29 | #include "vmwgfx_reg.h" |
||
30 | #include |
||
31 | #include |
||
6296 | serge | 32 | #include "vmwgfx_so.h" |
33 | #include "vmwgfx_binding.h" |
||
4075 | Serge | 34 | |
35 | #define VMW_RES_HT_ORDER 12 |
||
36 | |||
37 | /** |
||
38 | * struct vmw_resource_relocation - Relocation info for resources |
||
39 | * |
||
40 | * @head: List head for the software context's relocation list. |
||
41 | * @res: Non-ref-counted pointer to the resource. |
||
42 | * @offset: Offset of 4 byte entries into the command buffer where the |
||
43 | * id that needs fixup is located. |
||
44 | */ |
||
45 | struct vmw_resource_relocation { |
||
46 | struct list_head head; |
||
47 | const struct vmw_resource *res; |
||
48 | unsigned long offset; |
||
49 | }; |
||
50 | |||
51 | /** |
||
52 | * struct vmw_resource_val_node - Validation info for resources |
||
53 | * |
||
54 | * @head: List head for the software context's resource list. |
||
55 | * @hash: Hash entry for quick resouce to val_node lookup. |
||
56 | * @res: Ref-counted pointer to the resource. |
||
57 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. |
||
58 | * @new_backup: Refcounted pointer to the new backup buffer. |
||
4569 | Serge | 59 | * @staged_bindings: If @res is a context, tracks bindings set up during |
60 | * the command batch. Otherwise NULL. |
||
4075 | Serge | 61 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. |
62 | * @first_usage: Set to true the first time the resource is referenced in |
||
63 | * the command stream. |
||
6296 | serge | 64 | * @switching_backup: The command stream provides a new backup buffer for a |
65 | * resource. |
||
66 | * @no_buffer_needed: This means @switching_backup is true on first buffer |
||
67 | * reference. So resource reservation does not need to allocate a backup |
||
68 | * buffer for the resource. |
||
4075 | Serge | 69 | */ |
70 | struct vmw_resource_val_node { |
||
71 | struct list_head head; |
||
72 | struct drm_hash_item hash; |
||
73 | struct vmw_resource *res; |
||
74 | struct vmw_dma_buffer *new_backup; |
||
4569 | Serge | 75 | struct vmw_ctx_binding_state *staged_bindings; |
4075 | Serge | 76 | unsigned long new_backup_offset; |
6296 | serge | 77 | u32 first_usage : 1; |
78 | u32 switching_backup : 1; |
||
79 | u32 no_buffer_needed : 1; |
||
4075 | Serge | 80 | }; |
81 | |||
82 | /** |
||
4569 | Serge | 83 | * struct vmw_cmd_entry - Describe a command for the verifier |
84 | * |
||
85 | * @user_allow: Whether allowed from the execbuf ioctl. |
||
86 | * @gb_disable: Whether disabled if guest-backed objects are available. |
||
87 | * @gb_enable: Whether enabled iff guest-backed objects are available. |
||
88 | */ |
||
89 | struct vmw_cmd_entry { |
||
90 | int (*func) (struct vmw_private *, struct vmw_sw_context *, |
||
91 | SVGA3dCmdHeader *); |
||
92 | bool user_allow; |
||
93 | bool gb_disable; |
||
94 | bool gb_enable; |
||
95 | }; |
||
96 | |||
97 | #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ |
||
98 | [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ |
||
99 | (_gb_disable), (_gb_enable)} |
||
100 | |||
6296 | serge | 101 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, |
102 | struct vmw_sw_context *sw_context, |
||
103 | struct vmw_resource *ctx); |
||
104 | static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, |
||
105 | struct vmw_sw_context *sw_context, |
||
106 | SVGAMobId *id, |
||
107 | struct vmw_dma_buffer **vmw_bo_p); |
||
108 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, |
||
109 | struct vmw_dma_buffer *vbo, |
||
110 | bool validate_as_mob, |
||
111 | uint32_t *p_val_node); |
||
112 | |||
113 | |||
4569 | Serge | 114 | /** |
6296 | serge | 115 | * vmw_resources_unreserve - unreserve resources previously reserved for |
4075 | Serge | 116 | * command submission. |
117 | * |
||
6296 | serge | 118 | * @sw_context: pointer to the software context |
4075 | Serge | 119 | * @backoff: Whether command submission failed. |
120 | */ |
||
6296 | serge | 121 | static void vmw_resources_unreserve(struct vmw_sw_context *sw_context, |
122 | bool backoff) |
||
4075 | Serge | 123 | { |
124 | struct vmw_resource_val_node *val; |
||
6296 | serge | 125 | struct list_head *list = &sw_context->resource_list; |
4075 | Serge | 126 | |
6296 | serge | 127 | if (sw_context->dx_query_mob && !backoff) |
128 | vmw_context_bind_dx_query(sw_context->dx_query_ctx, |
||
129 | sw_context->dx_query_mob); |
||
130 | |||
4075 | Serge | 131 | list_for_each_entry(val, list, head) { |
132 | struct vmw_resource *res = val->res; |
||
6296 | serge | 133 | bool switch_backup = |
134 | (backoff) ? false : val->switching_backup; |
||
4075 | Serge | 135 | |
4569 | Serge | 136 | /* |
137 | * Transfer staged context bindings to the |
||
138 | * persistent context binding tracker. |
||
139 | */ |
||
140 | if (unlikely(val->staged_bindings)) { |
||
5078 | serge | 141 | if (!backoff) { |
6296 | serge | 142 | vmw_binding_state_commit |
143 | (vmw_context_binding_state(val->res), |
||
144 | val->staged_bindings); |
||
5078 | serge | 145 | } |
6296 | serge | 146 | |
147 | if (val->staged_bindings != sw_context->staged_bindings) |
||
148 | vmw_binding_state_free(val->staged_bindings); |
||
149 | else |
||
150 | sw_context->staged_bindings_inuse = false; |
||
4569 | Serge | 151 | val->staged_bindings = NULL; |
152 | } |
||
6296 | serge | 153 | vmw_resource_unreserve(res, switch_backup, val->new_backup, |
154 | val->new_backup_offset); |
||
4075 | Serge | 155 | vmw_dmabuf_unreference(&val->new_backup); |
156 | } |
||
157 | } |
||
158 | |||
6296 | serge | 159 | /** |
160 | * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is |
||
161 | * added to the validate list. |
||
162 | * |
||
163 | * @dev_priv: Pointer to the device private: |
||
164 | * @sw_context: The validation context: |
||
165 | * @node: The validation node holding this context. |
||
166 | */ |
||
167 | static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, |
||
168 | struct vmw_sw_context *sw_context, |
||
169 | struct vmw_resource_val_node *node) |
||
170 | { |
||
171 | int ret; |
||
4075 | Serge | 172 | |
6296 | serge | 173 | ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res); |
174 | if (unlikely(ret != 0)) |
||
175 | goto out_err; |
||
176 | |||
177 | if (!sw_context->staged_bindings) { |
||
178 | sw_context->staged_bindings = |
||
179 | vmw_binding_state_alloc(dev_priv); |
||
180 | if (IS_ERR(sw_context->staged_bindings)) { |
||
181 | DRM_ERROR("Failed to allocate context binding " |
||
182 | "information.\n"); |
||
183 | ret = PTR_ERR(sw_context->staged_bindings); |
||
184 | sw_context->staged_bindings = NULL; |
||
185 | goto out_err; |
||
186 | } |
||
187 | } |
||
188 | |||
189 | if (sw_context->staged_bindings_inuse) { |
||
190 | node->staged_bindings = vmw_binding_state_alloc(dev_priv); |
||
191 | if (IS_ERR(node->staged_bindings)) { |
||
192 | DRM_ERROR("Failed to allocate context binding " |
||
193 | "information.\n"); |
||
194 | ret = PTR_ERR(node->staged_bindings); |
||
195 | node->staged_bindings = NULL; |
||
196 | goto out_err; |
||
197 | } |
||
198 | } else { |
||
199 | node->staged_bindings = sw_context->staged_bindings; |
||
200 | sw_context->staged_bindings_inuse = true; |
||
201 | } |
||
202 | |||
203 | return 0; |
||
204 | out_err: |
||
205 | return ret; |
||
206 | } |
||
207 | |||
4075 | Serge | 208 | /** |
209 | * vmw_resource_val_add - Add a resource to the software context's |
||
210 | * resource list if it's not already on it. |
||
211 | * |
||
212 | * @sw_context: Pointer to the software context. |
||
213 | * @res: Pointer to the resource. |
||
214 | * @p_node On successful return points to a valid pointer to a |
||
215 | * struct vmw_resource_val_node, if non-NULL on entry. |
||
216 | */ |
||
217 | static int vmw_resource_val_add(struct vmw_sw_context *sw_context, |
||
218 | struct vmw_resource *res, |
||
219 | struct vmw_resource_val_node **p_node) |
||
220 | { |
||
6296 | serge | 221 | struct vmw_private *dev_priv = res->dev_priv; |
4075 | Serge | 222 | struct vmw_resource_val_node *node; |
223 | struct drm_hash_item *hash; |
||
224 | int ret; |
||
225 | |||
226 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res, |
||
227 | &hash) == 0)) { |
||
228 | node = container_of(hash, struct vmw_resource_val_node, hash); |
||
229 | node->first_usage = false; |
||
230 | if (unlikely(p_node != NULL)) |
||
231 | *p_node = node; |
||
232 | return 0; |
||
233 | } |
||
234 | |||
235 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
||
236 | if (unlikely(node == NULL)) { |
||
237 | DRM_ERROR("Failed to allocate a resource validation " |
||
238 | "entry.\n"); |
||
239 | return -ENOMEM; |
||
240 | } |
||
241 | |||
242 | node->hash.key = (unsigned long) res; |
||
243 | ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash); |
||
244 | if (unlikely(ret != 0)) { |
||
245 | DRM_ERROR("Failed to initialize a resource validation " |
||
246 | "entry.\n"); |
||
247 | kfree(node); |
||
248 | return ret; |
||
249 | } |
||
250 | node->res = vmw_resource_reference(res); |
||
251 | node->first_usage = true; |
||
252 | if (unlikely(p_node != NULL)) |
||
253 | *p_node = node; |
||
254 | |||
6296 | serge | 255 | if (!dev_priv->has_mob) { |
256 | list_add_tail(&node->head, &sw_context->resource_list); |
||
257 | return 0; |
||
258 | } |
||
259 | |||
260 | switch (vmw_res_type(res)) { |
||
261 | case vmw_res_context: |
||
262 | case vmw_res_dx_context: |
||
263 | list_add(&node->head, &sw_context->ctx_resource_list); |
||
264 | ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node); |
||
265 | break; |
||
266 | case vmw_res_cotable: |
||
267 | list_add_tail(&node->head, &sw_context->ctx_resource_list); |
||
268 | break; |
||
269 | default: |
||
270 | list_add_tail(&node->head, &sw_context->resource_list); |
||
271 | break; |
||
272 | } |
||
273 | |||
274 | return ret; |
||
4075 | Serge | 275 | } |
276 | |||
277 | /** |
||
6296 | serge | 278 | * vmw_view_res_val_add - Add a view and the surface it's pointing to |
279 | * to the validation list |
||
280 | * |
||
281 | * @sw_context: The software context holding the validation list. |
||
282 | * @view: Pointer to the view resource. |
||
283 | * |
||
284 | * Returns 0 if success, negative error code otherwise. |
||
285 | */ |
||
286 | static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, |
||
287 | struct vmw_resource *view) |
||
288 | { |
||
289 | int ret; |
||
290 | |||
291 | /* |
||
292 | * First add the resource the view is pointing to, otherwise |
||
293 | * it may be swapped out when the view is validated. |
||
294 | */ |
||
295 | ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL); |
||
296 | if (ret) |
||
297 | return ret; |
||
298 | |||
299 | return vmw_resource_val_add(sw_context, view, NULL); |
||
300 | } |
||
301 | |||
302 | /** |
||
303 | * vmw_view_id_val_add - Look up a view and add it and the surface it's |
||
304 | * pointing to to the validation list. |
||
305 | * |
||
306 | * @sw_context: The software context holding the validation list. |
||
307 | * @view_type: The view type to look up. |
||
308 | * @id: view id of the view. |
||
309 | * |
||
310 | * The view is represented by a view id and the DX context it's created on, |
||
311 | * or scheduled for creation on. If there is no DX context set, the function |
||
312 | * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure. |
||
313 | */ |
||
314 | static int vmw_view_id_val_add(struct vmw_sw_context *sw_context, |
||
315 | enum vmw_view_type view_type, u32 id) |
||
316 | { |
||
317 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
||
318 | struct vmw_resource *view; |
||
319 | int ret; |
||
320 | |||
321 | if (!ctx_node) { |
||
322 | DRM_ERROR("DX Context not set.\n"); |
||
323 | return -EINVAL; |
||
324 | } |
||
325 | |||
326 | view = vmw_view_lookup(sw_context->man, view_type, id); |
||
327 | if (IS_ERR(view)) |
||
328 | return PTR_ERR(view); |
||
329 | |||
330 | ret = vmw_view_res_val_add(sw_context, view); |
||
331 | vmw_resource_unreference(&view); |
||
332 | |||
333 | return ret; |
||
334 | } |
||
335 | |||
336 | /** |
||
5078 | serge | 337 | * vmw_resource_context_res_add - Put resources previously bound to a context on |
338 | * the validation list |
||
339 | * |
||
340 | * @dev_priv: Pointer to a device private structure |
||
341 | * @sw_context: Pointer to a software context used for this command submission |
||
342 | * @ctx: Pointer to the context resource |
||
343 | * |
||
344 | * This function puts all resources that were previously bound to @ctx on |
||
345 | * the resource validation list. This is part of the context state reemission |
||
346 | */ |
||
347 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, |
||
348 | struct vmw_sw_context *sw_context, |
||
349 | struct vmw_resource *ctx) |
||
350 | { |
||
351 | struct list_head *binding_list; |
||
6296 | serge | 352 | struct vmw_ctx_bindinfo *entry; |
5078 | serge | 353 | int ret = 0; |
354 | struct vmw_resource *res; |
||
6296 | serge | 355 | u32 i; |
5078 | serge | 356 | |
6296 | serge | 357 | /* Add all cotables to the validation list. */ |
358 | if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { |
||
359 | for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { |
||
360 | res = vmw_context_cotable(ctx, i); |
||
361 | if (IS_ERR(res)) |
||
362 | continue; |
||
363 | |||
364 | ret = vmw_resource_val_add(sw_context, res, NULL); |
||
365 | vmw_resource_unreference(&res); |
||
366 | if (unlikely(ret != 0)) |
||
367 | return ret; |
||
368 | } |
||
369 | } |
||
370 | |||
371 | |||
372 | /* Add all resources bound to the context to the validation list */ |
||
5078 | serge | 373 | mutex_lock(&dev_priv->binding_mutex); |
374 | binding_list = vmw_context_binding_list(ctx); |
||
375 | |||
376 | list_for_each_entry(entry, binding_list, ctx_list) { |
||
6296 | serge | 377 | /* entry->res is not refcounted */ |
378 | res = vmw_resource_reference_unless_doomed(entry->res); |
||
5078 | serge | 379 | if (unlikely(res == NULL)) |
380 | continue; |
||
381 | |||
6296 | serge | 382 | if (vmw_res_type(entry->res) == vmw_res_view) |
383 | ret = vmw_view_res_val_add(sw_context, entry->res); |
||
384 | else |
||
385 | ret = vmw_resource_val_add(sw_context, entry->res, |
||
386 | NULL); |
||
5078 | serge | 387 | vmw_resource_unreference(&res); |
388 | if (unlikely(ret != 0)) |
||
389 | break; |
||
390 | } |
||
391 | |||
6296 | serge | 392 | if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { |
393 | struct vmw_dma_buffer *dx_query_mob; |
||
394 | |||
395 | dx_query_mob = vmw_context_get_dx_query_mob(ctx); |
||
396 | if (dx_query_mob) |
||
397 | ret = vmw_bo_to_validate_list(sw_context, |
||
398 | dx_query_mob, |
||
399 | true, NULL); |
||
400 | } |
||
401 | |||
5078 | serge | 402 | mutex_unlock(&dev_priv->binding_mutex); |
403 | return ret; |
||
404 | } |
||
405 | |||
406 | /** |
||
4075 | Serge | 407 | * vmw_resource_relocation_add - Add a relocation to the relocation list |
408 | * |
||
409 | * @list: Pointer to head of relocation list. |
||
410 | * @res: The resource. |
||
411 | * @offset: Offset into the command buffer currently being parsed where the |
||
412 | * id that needs fixup is located. Granularity is 4 bytes. |
||
413 | */ |
||
414 | static int vmw_resource_relocation_add(struct list_head *list, |
||
415 | const struct vmw_resource *res, |
||
416 | unsigned long offset) |
||
417 | { |
||
418 | struct vmw_resource_relocation *rel; |
||
419 | |||
420 | rel = kmalloc(sizeof(*rel), GFP_KERNEL); |
||
421 | if (unlikely(rel == NULL)) { |
||
422 | DRM_ERROR("Failed to allocate a resource relocation.\n"); |
||
423 | return -ENOMEM; |
||
424 | } |
||
425 | |||
426 | rel->res = res; |
||
427 | rel->offset = offset; |
||
428 | list_add_tail(&rel->head, list); |
||
429 | |||
430 | return 0; |
||
431 | } |
||
432 | |||
433 | /** |
||
434 | * vmw_resource_relocations_free - Free all relocations on a list |
||
435 | * |
||
436 | * @list: Pointer to the head of the relocation list. |
||
437 | */ |
||
438 | static void vmw_resource_relocations_free(struct list_head *list) |
||
439 | { |
||
440 | struct vmw_resource_relocation *rel, *n; |
||
441 | |||
442 | list_for_each_entry_safe(rel, n, list, head) { |
||
443 | list_del(&rel->head); |
||
444 | kfree(rel); |
||
445 | } |
||
446 | } |
||
447 | |||
448 | /** |
||
449 | * vmw_resource_relocations_apply - Apply all relocations on a list |
||
450 | * |
||
451 | * @cb: Pointer to the start of the command buffer bein patch. This need |
||
452 | * not be the same buffer as the one being parsed when the relocation |
||
453 | * list was built, but the contents must be the same modulo the |
||
454 | * resource ids. |
||
455 | * @list: Pointer to the head of the relocation list. |
||
456 | */ |
||
457 | static void vmw_resource_relocations_apply(uint32_t *cb, |
||
458 | struct list_head *list) |
||
459 | { |
||
460 | struct vmw_resource_relocation *rel; |
||
461 | |||
5078 | serge | 462 | list_for_each_entry(rel, list, head) { |
463 | if (likely(rel->res != NULL)) |
||
6296 | serge | 464 | cb[rel->offset] = rel->res->id; |
5078 | serge | 465 | else |
466 | cb[rel->offset] = SVGA_3D_CMD_NOP; |
||
467 | } |
||
4075 | Serge | 468 | } |
469 | |||
470 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, |
||
471 | struct vmw_sw_context *sw_context, |
||
472 | SVGA3dCmdHeader *header) |
||
473 | { |
||
474 | return 0; //capable(CAP_SYS_ADMIN) ? : -EINVAL; |
||
475 | } |
||
476 | |||
477 | static int vmw_cmd_ok(struct vmw_private *dev_priv, |
||
478 | struct vmw_sw_context *sw_context, |
||
479 | SVGA3dCmdHeader *header) |
||
480 | { |
||
481 | return 0; |
||
482 | } |
||
483 | |||
484 | /** |
||
485 | * vmw_bo_to_validate_list - add a bo to a validate list |
||
486 | * |
||
487 | * @sw_context: The software context used for this command submission batch. |
||
488 | * @bo: The buffer object to add. |
||
4569 | Serge | 489 | * @validate_as_mob: Validate this buffer as a MOB. |
4075 | Serge | 490 | * @p_val_node: If non-NULL Will be updated with the validate node number |
491 | * on return. |
||
492 | * |
||
493 | * Returns -EINVAL if the limit of number of buffer objects per command |
||
494 | * submission is reached. |
||
495 | */ |
||
496 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, |
||
6296 | serge | 497 | struct vmw_dma_buffer *vbo, |
4569 | Serge | 498 | bool validate_as_mob, |
4075 | Serge | 499 | uint32_t *p_val_node) |
500 | { |
||
501 | uint32_t val_node; |
||
502 | struct vmw_validate_buffer *vval_buf; |
||
503 | struct ttm_validate_buffer *val_buf; |
||
504 | struct drm_hash_item *hash; |
||
505 | int ret; |
||
506 | |||
6296 | serge | 507 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo, |
4075 | Serge | 508 | &hash) == 0)) { |
509 | vval_buf = container_of(hash, struct vmw_validate_buffer, |
||
510 | hash); |
||
4569 | Serge | 511 | if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) { |
512 | DRM_ERROR("Inconsistent buffer usage.\n"); |
||
513 | return -EINVAL; |
||
514 | } |
||
4075 | Serge | 515 | val_buf = &vval_buf->base; |
516 | val_node = vval_buf - sw_context->val_bufs; |
||
517 | } else { |
||
518 | val_node = sw_context->cur_val_buf; |
||
519 | if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { |
||
520 | DRM_ERROR("Max number of DMA buffers per submission " |
||
521 | "exceeded.\n"); |
||
522 | return -EINVAL; |
||
523 | } |
||
524 | vval_buf = &sw_context->val_bufs[val_node]; |
||
6296 | serge | 525 | vval_buf->hash.key = (unsigned long) vbo; |
4075 | Serge | 526 | ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); |
527 | if (unlikely(ret != 0)) { |
||
528 | DRM_ERROR("Failed to initialize a buffer validation " |
||
529 | "entry.\n"); |
||
530 | return ret; |
||
531 | } |
||
532 | ++sw_context->cur_val_buf; |
||
533 | val_buf = &vval_buf->base; |
||
6296 | serge | 534 | val_buf->bo = ttm_bo_reference(&vbo->base); |
535 | val_buf->shared = false; |
||
4075 | Serge | 536 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
4569 | Serge | 537 | vval_buf->validate_as_mob = validate_as_mob; |
4075 | Serge | 538 | } |
539 | |||
540 | if (p_val_node) |
||
541 | *p_val_node = val_node; |
||
542 | |||
543 | return 0; |
||
544 | } |
||
545 | |||
546 | /** |
||
547 | * vmw_resources_reserve - Reserve all resources on the sw_context's |
||
548 | * resource list. |
||
549 | * |
||
550 | * @sw_context: Pointer to the software context. |
||
551 | * |
||
552 | * Note that since vmware's command submission currently is protected by |
||
553 | * the cmdbuf mutex, no fancy deadlock avoidance is required for resources, |
||
554 | * since only a single thread at once will attempt this. |
||
555 | */ |
||
556 | static int vmw_resources_reserve(struct vmw_sw_context *sw_context) |
||
557 | { |
||
558 | struct vmw_resource_val_node *val; |
||
6296 | serge | 559 | int ret = 0; |
4075 | Serge | 560 | |
561 | list_for_each_entry(val, &sw_context->resource_list, head) { |
||
562 | struct vmw_resource *res = val->res; |
||
563 | |||
6296 | serge | 564 | ret = vmw_resource_reserve(res, true, val->no_buffer_needed); |
4075 | Serge | 565 | if (unlikely(ret != 0)) |
566 | return ret; |
||
567 | |||
568 | if (res->backup) { |
||
6296 | serge | 569 | struct vmw_dma_buffer *vbo = res->backup; |
4075 | Serge | 570 | |
571 | ret = vmw_bo_to_validate_list |
||
6296 | serge | 572 | (sw_context, vbo, |
4569 | Serge | 573 | vmw_resource_needs_backup(res), NULL); |
4075 | Serge | 574 | |
575 | if (unlikely(ret != 0)) |
||
576 | return ret; |
||
577 | } |
||
578 | } |
||
6296 | serge | 579 | |
580 | if (sw_context->dx_query_mob) { |
||
581 | struct vmw_dma_buffer *expected_dx_query_mob; |
||
582 | |||
583 | expected_dx_query_mob = |
||
584 | vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); |
||
585 | if (expected_dx_query_mob && |
||
586 | expected_dx_query_mob != sw_context->dx_query_mob) { |
||
587 | ret = -EINVAL; |
||
588 | } |
||
589 | } |
||
590 | |||
591 | return ret; |
||
4075 | Serge | 592 | } |
593 | |||
594 | /** |
||
595 | * vmw_resources_validate - Validate all resources on the sw_context's |
||
596 | * resource list. |
||
597 | * |
||
598 | * @sw_context: Pointer to the software context. |
||
599 | * |
||
600 | * Before this function is called, all resource backup buffers must have |
||
601 | * been validated. |
||
602 | */ |
||
603 | static int vmw_resources_validate(struct vmw_sw_context *sw_context) |
||
604 | { |
||
605 | struct vmw_resource_val_node *val; |
||
606 | int ret; |
||
607 | |||
608 | list_for_each_entry(val, &sw_context->resource_list, head) { |
||
609 | struct vmw_resource *res = val->res; |
||
6296 | serge | 610 | struct vmw_dma_buffer *backup = res->backup; |
4075 | Serge | 611 | |
612 | ret = vmw_resource_validate(res); |
||
613 | if (unlikely(ret != 0)) { |
||
614 | if (ret != -ERESTARTSYS) |
||
615 | DRM_ERROR("Failed to validate resource.\n"); |
||
616 | return ret; |
||
617 | } |
||
6296 | serge | 618 | |
619 | /* Check if the resource switched backup buffer */ |
||
620 | if (backup && res->backup && (backup != res->backup)) { |
||
621 | struct vmw_dma_buffer *vbo = res->backup; |
||
622 | |||
623 | ret = vmw_bo_to_validate_list |
||
624 | (sw_context, vbo, |
||
625 | vmw_resource_needs_backup(res), NULL); |
||
626 | if (ret) { |
||
627 | ttm_bo_unreserve(&vbo->base); |
||
628 | return ret; |
||
629 | } |
||
630 | } |
||
4075 | Serge | 631 | } |
632 | return 0; |
||
633 | } |
||
634 | |||
635 | /** |
||
5078 | serge | 636 | * vmw_cmd_res_reloc_add - Add a resource to a software context's |
637 | * relocation- and validation lists. |
||
638 | * |
||
639 | * @dev_priv: Pointer to a struct vmw_private identifying the device. |
||
640 | * @sw_context: Pointer to the software context. |
||
641 | * @id_loc: Pointer to where the id that needs translation is located. |
||
642 | * @res: Valid pointer to a struct vmw_resource. |
||
643 | * @p_val: If non null, a pointer to the struct vmw_resource_validate_node |
||
644 | * used for this resource is returned here. |
||
645 | */ |
||
646 | static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, |
||
647 | struct vmw_sw_context *sw_context, |
||
648 | uint32_t *id_loc, |
||
649 | struct vmw_resource *res, |
||
650 | struct vmw_resource_val_node **p_val) |
||
651 | { |
||
652 | int ret; |
||
653 | struct vmw_resource_val_node *node; |
||
654 | |||
655 | *p_val = NULL; |
||
656 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, |
||
657 | res, |
||
658 | id_loc - sw_context->buf_start); |
||
659 | if (unlikely(ret != 0)) |
||
6296 | serge | 660 | return ret; |
5078 | serge | 661 | |
662 | ret = vmw_resource_val_add(sw_context, res, &node); |
||
663 | if (unlikely(ret != 0)) |
||
6296 | serge | 664 | return ret; |
5078 | serge | 665 | |
666 | if (p_val) |
||
667 | *p_val = node; |
||
668 | |||
6296 | serge | 669 | return 0; |
5078 | serge | 670 | } |
671 | |||
672 | |||
673 | /** |
||
4075 | Serge | 674 | * vmw_cmd_res_check - Check that a resource is present and if so, put it |
675 | * on the resource validate list unless it's already there. |
||
676 | * |
||
677 | * @dev_priv: Pointer to a device private structure. |
||
678 | * @sw_context: Pointer to the software context. |
||
679 | * @res_type: Resource type. |
||
680 | * @converter: User-space visisble type specific information. |
||
5078 | serge | 681 | * @id_loc: Pointer to the location in the command buffer currently being |
4075 | Serge | 682 | * parsed from where the user-space resource id handle is located. |
5078 | serge | 683 | * @p_val: Pointer to pointer to resource validalidation node. Populated |
684 | * on exit. |
||
4075 | Serge | 685 | */ |
5078 | serge | 686 | static int |
687 | vmw_cmd_res_check(struct vmw_private *dev_priv, |
||
6296 | serge | 688 | struct vmw_sw_context *sw_context, |
689 | enum vmw_res_type res_type, |
||
690 | const struct vmw_user_resource_conv *converter, |
||
691 | uint32_t *id_loc, |
||
692 | struct vmw_resource_val_node **p_val) |
||
4075 | Serge | 693 | { |
694 | struct vmw_res_cache_entry *rcache = |
||
695 | &sw_context->res_cache[res_type]; |
||
696 | struct vmw_resource *res; |
||
697 | struct vmw_resource_val_node *node; |
||
698 | int ret; |
||
699 | |||
5078 | serge | 700 | if (*id_loc == SVGA3D_INVALID_ID) { |
4569 | Serge | 701 | if (p_val) |
702 | *p_val = NULL; |
||
703 | if (res_type == vmw_res_context) { |
||
704 | DRM_ERROR("Illegal context invalid id.\n"); |
||
705 | return -EINVAL; |
||
706 | } |
||
4075 | Serge | 707 | return 0; |
4569 | Serge | 708 | } |
4075 | Serge | 709 | |
710 | /* |
||
711 | * Fastpath in case of repeated commands referencing the same |
||
712 | * resource |
||
713 | */ |
||
714 | |||
5078 | serge | 715 | if (likely(rcache->valid && *id_loc == rcache->handle)) { |
4075 | Serge | 716 | const struct vmw_resource *res = rcache->res; |
717 | |||
718 | rcache->node->first_usage = false; |
||
719 | if (p_val) |
||
720 | *p_val = rcache->node; |
||
721 | |||
722 | return vmw_resource_relocation_add |
||
723 | (&sw_context->res_relocations, res, |
||
5078 | serge | 724 | id_loc - sw_context->buf_start); |
4075 | Serge | 725 | } |
726 | |||
6296 | serge | 727 | ret = vmw_user_resource_lookup_handle(dev_priv, |
728 | sw_context->fp->tfile, |
||
5078 | serge | 729 | *id_loc, |
6296 | serge | 730 | converter, |
731 | &res); |
||
4075 | Serge | 732 | if (unlikely(ret != 0)) { |
733 | DRM_ERROR("Could not find or use resource 0x%08x.\n", |
||
5078 | serge | 734 | (unsigned) *id_loc); |
6296 | serge | 735 | // dump_stack(); |
4075 | Serge | 736 | return ret; |
737 | } |
||
738 | |||
739 | rcache->valid = true; |
||
740 | rcache->res = res; |
||
5078 | serge | 741 | rcache->handle = *id_loc; |
4075 | Serge | 742 | |
6296 | serge | 743 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc, |
5078 | serge | 744 | res, &node); |
4075 | Serge | 745 | if (unlikely(ret != 0)) |
746 | goto out_no_reloc; |
||
747 | |||
748 | rcache->node = node; |
||
749 | if (p_val) |
||
750 | *p_val = node; |
||
751 | vmw_resource_unreference(&res); |
||
752 | return 0; |
||
753 | |||
754 | out_no_reloc: |
||
755 | BUG_ON(sw_context->error_resource != NULL); |
||
756 | sw_context->error_resource = res; |
||
757 | |||
758 | return ret; |
||
759 | } |
||
760 | |||
761 | /** |
||
6296 | serge | 762 | * vmw_rebind_dx_query - Rebind DX query associated with the context |
763 | * |
||
764 | * @ctx_res: context the query belongs to |
||
765 | * |
||
766 | * This function assumes binding_mutex is held. |
||
767 | */ |
||
768 | static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) |
||
769 | { |
||
770 | struct vmw_private *dev_priv = ctx_res->dev_priv; |
||
771 | struct vmw_dma_buffer *dx_query_mob; |
||
772 | struct { |
||
773 | SVGA3dCmdHeader header; |
||
774 | SVGA3dCmdDXBindAllQuery body; |
||
775 | } *cmd; |
||
776 | |||
777 | |||
778 | dx_query_mob = vmw_context_get_dx_query_mob(ctx_res); |
||
779 | |||
780 | if (!dx_query_mob || dx_query_mob->dx_query_ctx) |
||
781 | return 0; |
||
782 | |||
783 | cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id); |
||
784 | |||
785 | if (cmd == NULL) { |
||
786 | DRM_ERROR("Failed to rebind queries.\n"); |
||
787 | return -ENOMEM; |
||
788 | } |
||
789 | |||
790 | cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY; |
||
791 | cmd->header.size = sizeof(cmd->body); |
||
792 | cmd->body.cid = ctx_res->id; |
||
793 | cmd->body.mobid = dx_query_mob->base.mem.start; |
||
794 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
||
795 | |||
796 | vmw_context_bind_dx_query(ctx_res, dx_query_mob); |
||
797 | |||
798 | return 0; |
||
799 | } |
||
800 | |||
801 | /** |
||
5078 | serge | 802 | * vmw_rebind_contexts - Rebind all resources previously bound to |
803 | * referenced contexts. |
||
804 | * |
||
805 | * @sw_context: Pointer to the software context. |
||
806 | * |
||
807 | * Rebind context binding points that have been scrubbed because of eviction. |
||
808 | */ |
||
809 | static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) |
||
810 | { |
||
811 | struct vmw_resource_val_node *val; |
||
812 | int ret; |
||
813 | |||
814 | list_for_each_entry(val, &sw_context->resource_list, head) { |
||
815 | if (unlikely(!val->staged_bindings)) |
||
816 | break; |
||
817 | |||
6296 | serge | 818 | ret = vmw_binding_rebind_all |
819 | (vmw_context_binding_state(val->res)); |
||
5078 | serge | 820 | if (unlikely(ret != 0)) { |
821 | if (ret != -ERESTARTSYS) |
||
822 | DRM_ERROR("Failed to rebind context.\n"); |
||
823 | return ret; |
||
824 | } |
||
6296 | serge | 825 | |
826 | ret = vmw_rebind_all_dx_query(val->res); |
||
827 | if (ret != 0) |
||
828 | return ret; |
||
5078 | serge | 829 | } |
830 | |||
831 | return 0; |
||
832 | } |
||
833 | |||
834 | /** |
||
6296 | serge | 835 | * vmw_view_bindings_add - Add an array of view bindings to a context |
836 | * binding state tracker. |
||
837 | * |
||
838 | * @sw_context: The execbuf state used for this command. |
||
839 | * @view_type: View type for the bindings. |
||
840 | * @binding_type: Binding type for the bindings. |
||
841 | * @shader_slot: The shader slot to user for the bindings. |
||
842 | * @view_ids: Array of view ids to be bound. |
||
843 | * @num_views: Number of view ids in @view_ids. |
||
844 | * @first_slot: The binding slot to be used for the first view id in @view_ids. |
||
845 | */ |
||
846 | static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, |
||
847 | enum vmw_view_type view_type, |
||
848 | enum vmw_ctx_binding_type binding_type, |
||
849 | uint32 shader_slot, |
||
850 | uint32 view_ids[], u32 num_views, |
||
851 | u32 first_slot) |
||
852 | { |
||
853 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
||
854 | struct vmw_cmdbuf_res_manager *man; |
||
855 | u32 i; |
||
856 | int ret; |
||
857 | |||
858 | if (!ctx_node) { |
||
859 | DRM_ERROR("DX Context not set.\n"); |
||
860 | return -EINVAL; |
||
861 | } |
||
862 | |||
863 | man = sw_context->man; |
||
864 | for (i = 0; i < num_views; ++i) { |
||
865 | struct vmw_ctx_bindinfo_view binding; |
||
866 | struct vmw_resource *view = NULL; |
||
867 | |||
868 | if (view_ids[i] != SVGA3D_INVALID_ID) { |
||
869 | view = vmw_view_lookup(man, view_type, view_ids[i]); |
||
870 | if (IS_ERR(view)) { |
||
871 | DRM_ERROR("View not found.\n"); |
||
872 | return PTR_ERR(view); |
||
873 | } |
||
874 | |||
875 | ret = vmw_view_res_val_add(sw_context, view); |
||
876 | if (ret) { |
||
877 | DRM_ERROR("Could not add view to " |
||
878 | "validation list.\n"); |
||
879 | vmw_resource_unreference(&view); |
||
880 | return ret; |
||
881 | } |
||
882 | } |
||
883 | binding.bi.ctx = ctx_node->res; |
||
884 | binding.bi.res = view; |
||
885 | binding.bi.bt = binding_type; |
||
886 | binding.shader_slot = shader_slot; |
||
887 | binding.slot = first_slot + i; |
||
888 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, |
||
889 | shader_slot, binding.slot); |
||
890 | if (view) |
||
891 | vmw_resource_unreference(&view); |
||
892 | } |
||
893 | |||
894 | return 0; |
||
895 | } |
||
896 | |||
897 | /** |
||
4075 | Serge | 898 | * vmw_cmd_cid_check - Check a command header for valid context information. |
899 | * |
||
900 | * @dev_priv: Pointer to a device private structure. |
||
901 | * @sw_context: Pointer to the software context. |
||
902 | * @header: A command header with an embedded user-space context handle. |
||
903 | * |
||
904 | * Convenience function: Call vmw_cmd_res_check with the user-space context |
||
905 | * handle embedded in @header. |
||
906 | */ |
||
907 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, |
||
908 | struct vmw_sw_context *sw_context, |
||
909 | SVGA3dCmdHeader *header) |
||
910 | { |
||
911 | struct vmw_cid_cmd { |
||
912 | SVGA3dCmdHeader header; |
||
5078 | serge | 913 | uint32_t cid; |
4075 | Serge | 914 | } *cmd; |
915 | |||
916 | cmd = container_of(header, struct vmw_cid_cmd, header); |
||
917 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
||
918 | user_context_converter, &cmd->cid, NULL); |
||
919 | } |
||
920 | |||
921 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, |
||
922 | struct vmw_sw_context *sw_context, |
||
923 | SVGA3dCmdHeader *header) |
||
924 | { |
||
925 | struct vmw_sid_cmd { |
||
926 | SVGA3dCmdHeader header; |
||
927 | SVGA3dCmdSetRenderTarget body; |
||
928 | } *cmd; |
||
4569 | Serge | 929 | struct vmw_resource_val_node *ctx_node; |
930 | struct vmw_resource_val_node *res_node; |
||
4075 | Serge | 931 | int ret; |
932 | |||
4569 | Serge | 933 | cmd = container_of(header, struct vmw_sid_cmd, header); |
934 | |||
6296 | serge | 935 | if (cmd->body.type >= SVGA3D_RT_MAX) { |
936 | DRM_ERROR("Illegal render target type %u.\n", |
||
937 | (unsigned) cmd->body.type); |
||
938 | return -EINVAL; |
||
939 | } |
||
940 | |||
4569 | Serge | 941 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
942 | user_context_converter, &cmd->body.cid, |
||
943 | &ctx_node); |
||
4075 | Serge | 944 | if (unlikely(ret != 0)) |
945 | return ret; |
||
946 | |||
947 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
948 | user_surface_converter, |
||
4569 | Serge | 949 | &cmd->body.target.sid, &res_node); |
950 | if (unlikely(ret != 0)) |
||
6296 | serge | 951 | return ret; |
4569 | Serge | 952 | |
953 | if (dev_priv->has_mob) { |
||
6296 | serge | 954 | struct vmw_ctx_bindinfo_view binding; |
4569 | Serge | 955 | |
6296 | serge | 956 | binding.bi.ctx = ctx_node->res; |
957 | binding.bi.res = res_node ? res_node->res : NULL; |
||
958 | binding.bi.bt = vmw_ctx_binding_rt; |
||
959 | binding.slot = cmd->body.type; |
||
960 | vmw_binding_add(ctx_node->staged_bindings, |
||
961 | &binding.bi, 0, binding.slot); |
||
4569 | Serge | 962 | } |
963 | |||
964 | return 0; |
||
4075 | Serge | 965 | } |
966 | |||
967 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, |
||
968 | struct vmw_sw_context *sw_context, |
||
969 | SVGA3dCmdHeader *header) |
||
970 | { |
||
971 | struct vmw_sid_cmd { |
||
972 | SVGA3dCmdHeader header; |
||
973 | SVGA3dCmdSurfaceCopy body; |
||
974 | } *cmd; |
||
975 | int ret; |
||
976 | |||
977 | cmd = container_of(header, struct vmw_sid_cmd, header); |
||
6296 | serge | 978 | |
4075 | Serge | 979 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
980 | user_surface_converter, |
||
981 | &cmd->body.src.sid, NULL); |
||
6296 | serge | 982 | if (ret) |
4075 | Serge | 983 | return ret; |
6296 | serge | 984 | |
4075 | Serge | 985 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
986 | user_surface_converter, |
||
987 | &cmd->body.dest.sid, NULL); |
||
988 | } |
||
989 | |||
6296 | serge | 990 | static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv, |
991 | struct vmw_sw_context *sw_context, |
||
992 | SVGA3dCmdHeader *header) |
||
993 | { |
||
994 | struct { |
||
995 | SVGA3dCmdHeader header; |
||
996 | SVGA3dCmdDXBufferCopy body; |
||
997 | } *cmd; |
||
998 | int ret; |
||
999 | |||
1000 | cmd = container_of(header, typeof(*cmd), header); |
||
1001 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1002 | user_surface_converter, |
||
1003 | &cmd->body.src, NULL); |
||
1004 | if (ret != 0) |
||
1005 | return ret; |
||
1006 | |||
1007 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1008 | user_surface_converter, |
||
1009 | &cmd->body.dest, NULL); |
||
1010 | } |
||
1011 | |||
1012 | static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv, |
||
1013 | struct vmw_sw_context *sw_context, |
||
1014 | SVGA3dCmdHeader *header) |
||
1015 | { |
||
1016 | struct { |
||
1017 | SVGA3dCmdHeader header; |
||
1018 | SVGA3dCmdDXPredCopyRegion body; |
||
1019 | } *cmd; |
||
1020 | int ret; |
||
1021 | |||
1022 | cmd = container_of(header, typeof(*cmd), header); |
||
1023 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1024 | user_surface_converter, |
||
1025 | &cmd->body.srcSid, NULL); |
||
1026 | if (ret != 0) |
||
1027 | return ret; |
||
1028 | |||
1029 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1030 | user_surface_converter, |
||
1031 | &cmd->body.dstSid, NULL); |
||
1032 | } |
||
1033 | |||
4075 | Serge | 1034 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, |
1035 | struct vmw_sw_context *sw_context, |
||
1036 | SVGA3dCmdHeader *header) |
||
1037 | { |
||
1038 | struct vmw_sid_cmd { |
||
1039 | SVGA3dCmdHeader header; |
||
1040 | SVGA3dCmdSurfaceStretchBlt body; |
||
1041 | } *cmd; |
||
1042 | int ret; |
||
1043 | |||
1044 | cmd = container_of(header, struct vmw_sid_cmd, header); |
||
1045 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1046 | user_surface_converter, |
||
1047 | &cmd->body.src.sid, NULL); |
||
1048 | if (unlikely(ret != 0)) |
||
1049 | return ret; |
||
1050 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1051 | user_surface_converter, |
||
1052 | &cmd->body.dest.sid, NULL); |
||
1053 | } |
||
1054 | |||
1055 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, |
||
1056 | struct vmw_sw_context *sw_context, |
||
1057 | SVGA3dCmdHeader *header) |
||
1058 | { |
||
1059 | struct vmw_sid_cmd { |
||
1060 | SVGA3dCmdHeader header; |
||
1061 | SVGA3dCmdBlitSurfaceToScreen body; |
||
1062 | } *cmd; |
||
1063 | |||
1064 | cmd = container_of(header, struct vmw_sid_cmd, header); |
||
1065 | |||
1066 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1067 | user_surface_converter, |
||
1068 | &cmd->body.srcImage.sid, NULL); |
||
1069 | } |
||
1070 | |||
1071 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, |
||
1072 | struct vmw_sw_context *sw_context, |
||
1073 | SVGA3dCmdHeader *header) |
||
1074 | { |
||
1075 | struct vmw_sid_cmd { |
||
1076 | SVGA3dCmdHeader header; |
||
1077 | SVGA3dCmdPresent body; |
||
1078 | } *cmd; |
||
1079 | |||
1080 | |||
1081 | cmd = container_of(header, struct vmw_sid_cmd, header); |
||
1082 | |||
1083 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1084 | user_surface_converter, &cmd->body.sid, |
||
1085 | NULL); |
||
1086 | } |
||
1087 | |||
1088 | /** |
||
1089 | * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. |
||
1090 | * |
||
1091 | * @dev_priv: The device private structure. |
||
1092 | * @new_query_bo: The new buffer holding query results. |
||
1093 | * @sw_context: The software context used for this command submission. |
||
1094 | * |
||
1095 | * This function checks whether @new_query_bo is suitable for holding |
||
1096 | * query results, and if another buffer currently is pinned for query |
||
1097 | * results. If so, the function prepares the state of @sw_context for |
||
1098 | * switching pinned buffers after successful submission of the current |
||
1099 | * command batch. |
||
1100 | */ |
||
1101 | static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, |
||
6296 | serge | 1102 | struct vmw_dma_buffer *new_query_bo, |
4075 | Serge | 1103 | struct vmw_sw_context *sw_context) |
1104 | { |
||
1105 | struct vmw_res_cache_entry *ctx_entry = |
||
1106 | &sw_context->res_cache[vmw_res_context]; |
||
1107 | int ret; |
||
1108 | |||
1109 | BUG_ON(!ctx_entry->valid); |
||
1110 | sw_context->last_query_ctx = ctx_entry->res; |
||
1111 | |||
1112 | if (unlikely(new_query_bo != sw_context->cur_query_bo)) { |
||
1113 | |||
6296 | serge | 1114 | if (unlikely(new_query_bo->base.num_pages > 4)) { |
4075 | Serge | 1115 | DRM_ERROR("Query buffer too large.\n"); |
1116 | return -EINVAL; |
||
1117 | } |
||
1118 | |||
1119 | if (unlikely(sw_context->cur_query_bo != NULL)) { |
||
1120 | sw_context->needs_post_query_barrier = true; |
||
1121 | ret = vmw_bo_to_validate_list(sw_context, |
||
1122 | sw_context->cur_query_bo, |
||
4569 | Serge | 1123 | dev_priv->has_mob, NULL); |
4075 | Serge | 1124 | if (unlikely(ret != 0)) |
1125 | return ret; |
||
1126 | } |
||
1127 | sw_context->cur_query_bo = new_query_bo; |
||
1128 | |||
1129 | ret = vmw_bo_to_validate_list(sw_context, |
||
1130 | dev_priv->dummy_query_bo, |
||
4569 | Serge | 1131 | dev_priv->has_mob, NULL); |
4075 | Serge | 1132 | if (unlikely(ret != 0)) |
1133 | return ret; |
||
1134 | |||
1135 | } |
||
1136 | |||
1137 | return 0; |
||
1138 | } |
||
1139 | |||
1140 | |||
1141 | /** |
||
1142 | * vmw_query_bo_switch_commit - Finalize switching pinned query buffer |
||
1143 | * |
||
1144 | * @dev_priv: The device private structure. |
||
1145 | * @sw_context: The software context used for this command submission batch. |
||
1146 | * |
||
1147 | * This function will check if we're switching query buffers, and will then, |
||
1148 | * issue a dummy occlusion query wait used as a query barrier. When the fence |
||
1149 | * object following that query wait has signaled, we are sure that all |
||
1150 | * preceding queries have finished, and the old query buffer can be unpinned. |
||
1151 | * However, since both the new query buffer and the old one are fenced with |
||
1152 | * that fence, we can do an asynchronus unpin now, and be sure that the |
||
1153 | * old query buffer won't be moved until the fence has signaled. |
||
1154 | * |
||
1155 | * As mentioned above, both the new - and old query buffers need to be fenced |
||
1156 | * using a sequence emitted *after* calling this function. |
||
1157 | */ |
||
1158 | static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, |
||
1159 | struct vmw_sw_context *sw_context) |
||
1160 | { |
||
1161 | /* |
||
1162 | * The validate list should still hold references to all |
||
1163 | * contexts here. |
||
1164 | */ |
||
1165 | |||
1166 | if (sw_context->needs_post_query_barrier) { |
||
1167 | struct vmw_res_cache_entry *ctx_entry = |
||
1168 | &sw_context->res_cache[vmw_res_context]; |
||
1169 | struct vmw_resource *ctx; |
||
1170 | int ret; |
||
1171 | |||
1172 | BUG_ON(!ctx_entry->valid); |
||
1173 | ctx = ctx_entry->res; |
||
1174 | |||
1175 | ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); |
||
1176 | |||
1177 | if (unlikely(ret != 0)) |
||
1178 | DRM_ERROR("Out of fifo space for dummy query.\n"); |
||
1179 | } |
||
1180 | |||
1181 | if (dev_priv->pinned_bo != sw_context->cur_query_bo) { |
||
1182 | if (dev_priv->pinned_bo) { |
||
6296 | serge | 1183 | vmw_bo_pin_reserved(dev_priv->pinned_bo, false); |
1184 | vmw_dmabuf_unreference(&dev_priv->pinned_bo); |
||
4075 | Serge | 1185 | } |
1186 | |||
1187 | if (!sw_context->needs_post_query_barrier) { |
||
6296 | serge | 1188 | vmw_bo_pin_reserved(sw_context->cur_query_bo, true); |
4075 | Serge | 1189 | |
1190 | /* |
||
1191 | * We pin also the dummy_query_bo buffer so that we |
||
1192 | * don't need to validate it when emitting |
||
1193 | * dummy queries in context destroy paths. |
||
1194 | */ |
||
1195 | |||
6296 | serge | 1196 | if (!dev_priv->dummy_query_bo_pinned) { |
1197 | vmw_bo_pin_reserved(dev_priv->dummy_query_bo, |
||
1198 | true); |
||
1199 | dev_priv->dummy_query_bo_pinned = true; |
||
1200 | } |
||
4075 | Serge | 1201 | |
1202 | BUG_ON(sw_context->last_query_ctx == NULL); |
||
1203 | dev_priv->query_cid = sw_context->last_query_ctx->id; |
||
1204 | dev_priv->query_cid_valid = true; |
||
1205 | dev_priv->pinned_bo = |
||
6296 | serge | 1206 | vmw_dmabuf_reference(sw_context->cur_query_bo); |
4075 | Serge | 1207 | } |
1208 | } |
||
1209 | } |
||
1210 | |||
1211 | /** |
||
4569 | Serge | 1212 | * vmw_translate_mob_pointer - Prepare to translate a user-space buffer |
1213 | * handle to a MOB id. |
||
1214 | * |
||
1215 | * @dev_priv: Pointer to a device private structure. |
||
1216 | * @sw_context: The software context used for this command batch validation. |
||
1217 | * @id: Pointer to the user-space handle to be translated. |
||
1218 | * @vmw_bo_p: Points to a location that, on successful return will carry |
||
1219 | * a reference-counted pointer to the DMA buffer identified by the |
||
1220 | * user-space handle in @id. |
||
1221 | * |
||
1222 | * This function saves information needed to translate a user-space buffer |
||
1223 | * handle to a MOB id. The translation does not take place immediately, but |
||
1224 | * during a call to vmw_apply_relocations(). This function builds a relocation |
||
1225 | * list and a list of buffers to validate. The former needs to be freed using |
||
1226 | * either vmw_apply_relocations() or vmw_free_relocations(). The latter |
||
1227 | * needs to be freed using vmw_clear_validations. |
||
1228 | */ |
||
1229 | static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, |
||
1230 | struct vmw_sw_context *sw_context, |
||
1231 | SVGAMobId *id, |
||
1232 | struct vmw_dma_buffer **vmw_bo_p) |
||
1233 | { |
||
1234 | struct vmw_dma_buffer *vmw_bo = NULL; |
||
1235 | uint32_t handle = *id; |
||
1236 | struct vmw_relocation *reloc; |
||
1237 | int ret; |
||
1238 | |||
6296 | serge | 1239 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, |
1240 | NULL); |
||
4569 | Serge | 1241 | if (unlikely(ret != 0)) { |
1242 | DRM_ERROR("Could not find or use MOB buffer.\n"); |
||
6296 | serge | 1243 | ret = -EINVAL; |
1244 | goto out_no_reloc; |
||
4569 | Serge | 1245 | } |
1246 | |||
1247 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
||
1248 | DRM_ERROR("Max number relocations per submission" |
||
1249 | " exceeded\n"); |
||
1250 | ret = -EINVAL; |
||
1251 | goto out_no_reloc; |
||
1252 | } |
||
1253 | |||
1254 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
||
1255 | reloc->mob_loc = id; |
||
1256 | reloc->location = NULL; |
||
1257 | |||
6296 | serge | 1258 | ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index); |
4569 | Serge | 1259 | if (unlikely(ret != 0)) |
1260 | goto out_no_reloc; |
||
1261 | |||
1262 | *vmw_bo_p = vmw_bo; |
||
1263 | return 0; |
||
1264 | |||
1265 | out_no_reloc: |
||
1266 | vmw_dmabuf_unreference(&vmw_bo); |
||
6296 | serge | 1267 | *vmw_bo_p = NULL; |
4569 | Serge | 1268 | return ret; |
1269 | } |
||
1270 | |||
1271 | /** |
||
4075 | Serge | 1272 | * vmw_translate_guest_pointer - Prepare to translate a user-space buffer |
1273 | * handle to a valid SVGAGuestPtr |
||
1274 | * |
||
1275 | * @dev_priv: Pointer to a device private structure. |
||
1276 | * @sw_context: The software context used for this command batch validation. |
||
1277 | * @ptr: Pointer to the user-space handle to be translated. |
||
1278 | * @vmw_bo_p: Points to a location that, on successful return will carry |
||
1279 | * a reference-counted pointer to the DMA buffer identified by the |
||
1280 | * user-space handle in @id. |
||
1281 | * |
||
1282 | * This function saves information needed to translate a user-space buffer |
||
1283 | * handle to a valid SVGAGuestPtr. The translation does not take place |
||
1284 | * immediately, but during a call to vmw_apply_relocations(). |
||
1285 | * This function builds a relocation list and a list of buffers to validate. |
||
1286 | * The former needs to be freed using either vmw_apply_relocations() or |
||
1287 | * vmw_free_relocations(). The latter needs to be freed using |
||
1288 | * vmw_clear_validations. |
||
1289 | */ |
||
1290 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
||
1291 | struct vmw_sw_context *sw_context, |
||
1292 | SVGAGuestPtr *ptr, |
||
1293 | struct vmw_dma_buffer **vmw_bo_p) |
||
1294 | { |
||
1295 | struct vmw_dma_buffer *vmw_bo = NULL; |
||
1296 | uint32_t handle = ptr->gmrId; |
||
1297 | struct vmw_relocation *reloc; |
||
1298 | int ret; |
||
1299 | |||
6296 | serge | 1300 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, |
1301 | NULL); |
||
4075 | Serge | 1302 | if (unlikely(ret != 0)) { |
1303 | DRM_ERROR("Could not find or use GMR region.\n"); |
||
6296 | serge | 1304 | ret = -EINVAL; |
1305 | goto out_no_reloc; |
||
4075 | Serge | 1306 | } |
1307 | |||
1308 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
||
1309 | DRM_ERROR("Max number relocations per submission" |
||
1310 | " exceeded\n"); |
||
1311 | ret = -EINVAL; |
||
1312 | goto out_no_reloc; |
||
1313 | } |
||
1314 | |||
1315 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
||
1316 | reloc->location = ptr; |
||
1317 | |||
6296 | serge | 1318 | ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index); |
4075 | Serge | 1319 | if (unlikely(ret != 0)) |
1320 | goto out_no_reloc; |
||
1321 | |||
1322 | *vmw_bo_p = vmw_bo; |
||
1323 | return 0; |
||
1324 | |||
1325 | out_no_reloc: |
||
1326 | vmw_dmabuf_unreference(&vmw_bo); |
||
6296 | serge | 1327 | *vmw_bo_p = NULL; |
4075 | Serge | 1328 | return ret; |
1329 | } |
||
1330 | |||
6296 | serge | 1331 | |
1332 | |||
4075 | Serge | 1333 | /** |
6296 | serge | 1334 | * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command. |
1335 | * |
||
1336 | * @dev_priv: Pointer to a device private struct. |
||
1337 | * @sw_context: The software context used for this command submission. |
||
1338 | * @header: Pointer to the command header in the command stream. |
||
1339 | * |
||
1340 | * This function adds the new query into the query COTABLE |
||
1341 | */ |
||
1342 | static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, |
||
1343 | struct vmw_sw_context *sw_context, |
||
1344 | SVGA3dCmdHeader *header) |
||
1345 | { |
||
1346 | struct vmw_dx_define_query_cmd { |
||
1347 | SVGA3dCmdHeader header; |
||
1348 | SVGA3dCmdDXDefineQuery q; |
||
1349 | } *cmd; |
||
1350 | |||
1351 | int ret; |
||
1352 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
||
1353 | struct vmw_resource *cotable_res; |
||
1354 | |||
1355 | |||
1356 | if (ctx_node == NULL) { |
||
1357 | DRM_ERROR("DX Context not set for query.\n"); |
||
1358 | return -EINVAL; |
||
1359 | } |
||
1360 | |||
1361 | cmd = container_of(header, struct vmw_dx_define_query_cmd, header); |
||
1362 | |||
1363 | if (cmd->q.type < SVGA3D_QUERYTYPE_MIN || |
||
1364 | cmd->q.type >= SVGA3D_QUERYTYPE_MAX) |
||
1365 | return -EINVAL; |
||
1366 | |||
1367 | cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY); |
||
1368 | ret = vmw_cotable_notify(cotable_res, cmd->q.queryId); |
||
1369 | vmw_resource_unreference(&cotable_res); |
||
1370 | |||
1371 | return ret; |
||
1372 | } |
||
1373 | |||
1374 | |||
1375 | |||
1376 | /** |
||
1377 | * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command. |
||
1378 | * |
||
1379 | * @dev_priv: Pointer to a device private struct. |
||
1380 | * @sw_context: The software context used for this command submission. |
||
1381 | * @header: Pointer to the command header in the command stream. |
||
1382 | * |
||
1383 | * The query bind operation will eventually associate the query ID |
||
1384 | * with its backing MOB. In this function, we take the user mode |
||
1385 | * MOB ID and use vmw_translate_mob_ptr() to translate it to its |
||
1386 | * kernel mode equivalent. |
||
1387 | */ |
||
1388 | static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, |
||
1389 | struct vmw_sw_context *sw_context, |
||
1390 | SVGA3dCmdHeader *header) |
||
1391 | { |
||
1392 | struct vmw_dx_bind_query_cmd { |
||
1393 | SVGA3dCmdHeader header; |
||
1394 | SVGA3dCmdDXBindQuery q; |
||
1395 | } *cmd; |
||
1396 | |||
1397 | struct vmw_dma_buffer *vmw_bo; |
||
1398 | int ret; |
||
1399 | |||
1400 | |||
1401 | cmd = container_of(header, struct vmw_dx_bind_query_cmd, header); |
||
1402 | |||
1403 | /* |
||
1404 | * Look up the buffer pointed to by q.mobid, put it on the relocation |
||
1405 | * list so its kernel mode MOB ID can be filled in later |
||
1406 | */ |
||
1407 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid, |
||
1408 | &vmw_bo); |
||
1409 | |||
1410 | if (ret != 0) |
||
1411 | return ret; |
||
1412 | |||
1413 | sw_context->dx_query_mob = vmw_bo; |
||
1414 | sw_context->dx_query_ctx = sw_context->dx_ctx_node->res; |
||
1415 | |||
1416 | vmw_dmabuf_unreference(&vmw_bo); |
||
1417 | |||
1418 | return ret; |
||
1419 | } |
||
1420 | |||
1421 | |||
1422 | |||
1423 | /** |
||
4569 | Serge | 1424 | * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. |
1425 | * |
||
1426 | * @dev_priv: Pointer to a device private struct. |
||
1427 | * @sw_context: The software context used for this command submission. |
||
1428 | * @header: Pointer to the command header in the command stream. |
||
1429 | */ |
||
1430 | static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, |
||
1431 | struct vmw_sw_context *sw_context, |
||
1432 | SVGA3dCmdHeader *header) |
||
1433 | { |
||
1434 | struct vmw_begin_gb_query_cmd { |
||
1435 | SVGA3dCmdHeader header; |
||
1436 | SVGA3dCmdBeginGBQuery q; |
||
1437 | } *cmd; |
||
1438 | |||
1439 | cmd = container_of(header, struct vmw_begin_gb_query_cmd, |
||
1440 | header); |
||
1441 | |||
1442 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
||
1443 | user_context_converter, &cmd->q.cid, |
||
1444 | NULL); |
||
1445 | } |
||
1446 | |||
1447 | /** |
||
4075 | Serge | 1448 | * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. |
1449 | * |
||
1450 | * @dev_priv: Pointer to a device private struct. |
||
1451 | * @sw_context: The software context used for this command submission. |
||
1452 | * @header: Pointer to the command header in the command stream. |
||
1453 | */ |
||
1454 | static int vmw_cmd_begin_query(struct vmw_private *dev_priv, |
||
1455 | struct vmw_sw_context *sw_context, |
||
1456 | SVGA3dCmdHeader *header) |
||
1457 | { |
||
1458 | struct vmw_begin_query_cmd { |
||
1459 | SVGA3dCmdHeader header; |
||
1460 | SVGA3dCmdBeginQuery q; |
||
1461 | } *cmd; |
||
1462 | |||
1463 | cmd = container_of(header, struct vmw_begin_query_cmd, |
||
1464 | header); |
||
1465 | |||
4569 | Serge | 1466 | if (unlikely(dev_priv->has_mob)) { |
1467 | struct { |
||
1468 | SVGA3dCmdHeader header; |
||
1469 | SVGA3dCmdBeginGBQuery q; |
||
1470 | } gb_cmd; |
||
1471 | |||
1472 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
||
1473 | |||
1474 | gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; |
||
1475 | gb_cmd.header.size = cmd->header.size; |
||
1476 | gb_cmd.q.cid = cmd->q.cid; |
||
1477 | gb_cmd.q.type = cmd->q.type; |
||
1478 | |||
1479 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
||
1480 | return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); |
||
1481 | } |
||
1482 | |||
4075 | Serge | 1483 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1484 | user_context_converter, &cmd->q.cid, |
||
1485 | NULL); |
||
1486 | } |
||
1487 | |||
1488 | /** |
||
4569 | Serge | 1489 | * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command. |
1490 | * |
||
1491 | * @dev_priv: Pointer to a device private struct. |
||
1492 | * @sw_context: The software context used for this command submission. |
||
1493 | * @header: Pointer to the command header in the command stream. |
||
1494 | */ |
||
1495 | static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, |
||
1496 | struct vmw_sw_context *sw_context, |
||
1497 | SVGA3dCmdHeader *header) |
||
1498 | { |
||
1499 | struct vmw_dma_buffer *vmw_bo; |
||
1500 | struct vmw_query_cmd { |
||
1501 | SVGA3dCmdHeader header; |
||
1502 | SVGA3dCmdEndGBQuery q; |
||
1503 | } *cmd; |
||
1504 | int ret; |
||
1505 | |||
1506 | cmd = container_of(header, struct vmw_query_cmd, header); |
||
1507 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
||
1508 | if (unlikely(ret != 0)) |
||
1509 | return ret; |
||
1510 | |||
1511 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, |
||
1512 | &cmd->q.mobid, |
||
1513 | &vmw_bo); |
||
1514 | if (unlikely(ret != 0)) |
||
1515 | return ret; |
||
1516 | |||
6296 | serge | 1517 | ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); |
4569 | Serge | 1518 | |
1519 | vmw_dmabuf_unreference(&vmw_bo); |
||
1520 | return ret; |
||
1521 | } |
||
1522 | |||
1523 | /** |
||
4075 | Serge | 1524 | * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. |
1525 | * |
||
1526 | * @dev_priv: Pointer to a device private struct. |
||
1527 | * @sw_context: The software context used for this command submission. |
||
1528 | * @header: Pointer to the command header in the command stream. |
||
1529 | */ |
||
1530 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, |
||
1531 | struct vmw_sw_context *sw_context, |
||
1532 | SVGA3dCmdHeader *header) |
||
1533 | { |
||
1534 | struct vmw_dma_buffer *vmw_bo; |
||
1535 | struct vmw_query_cmd { |
||
1536 | SVGA3dCmdHeader header; |
||
1537 | SVGA3dCmdEndQuery q; |
||
1538 | } *cmd; |
||
1539 | int ret; |
||
1540 | |||
1541 | cmd = container_of(header, struct vmw_query_cmd, header); |
||
4569 | Serge | 1542 | if (dev_priv->has_mob) { |
1543 | struct { |
||
1544 | SVGA3dCmdHeader header; |
||
1545 | SVGA3dCmdEndGBQuery q; |
||
1546 | } gb_cmd; |
||
1547 | |||
1548 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
||
1549 | |||
1550 | gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; |
||
1551 | gb_cmd.header.size = cmd->header.size; |
||
1552 | gb_cmd.q.cid = cmd->q.cid; |
||
1553 | gb_cmd.q.type = cmd->q.type; |
||
1554 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; |
||
1555 | gb_cmd.q.offset = cmd->q.guestResult.offset; |
||
1556 | |||
1557 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
||
1558 | return vmw_cmd_end_gb_query(dev_priv, sw_context, header); |
||
1559 | } |
||
1560 | |||
4075 | Serge | 1561 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1562 | if (unlikely(ret != 0)) |
||
1563 | return ret; |
||
1564 | |||
1565 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
||
1566 | &cmd->q.guestResult, |
||
1567 | &vmw_bo); |
||
1568 | if (unlikely(ret != 0)) |
||
1569 | return ret; |
||
1570 | |||
6296 | serge | 1571 | ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); |
4075 | Serge | 1572 | |
1573 | vmw_dmabuf_unreference(&vmw_bo); |
||
1574 | return ret; |
||
1575 | } |
||
1576 | |||
4569 | Serge | 1577 | /** |
1578 | * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command. |
||
1579 | * |
||
1580 | * @dev_priv: Pointer to a device private struct. |
||
1581 | * @sw_context: The software context used for this command submission. |
||
1582 | * @header: Pointer to the command header in the command stream. |
||
1583 | */ |
||
1584 | static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, |
||
1585 | struct vmw_sw_context *sw_context, |
||
1586 | SVGA3dCmdHeader *header) |
||
1587 | { |
||
1588 | struct vmw_dma_buffer *vmw_bo; |
||
1589 | struct vmw_query_cmd { |
||
1590 | SVGA3dCmdHeader header; |
||
1591 | SVGA3dCmdWaitForGBQuery q; |
||
1592 | } *cmd; |
||
1593 | int ret; |
||
1594 | |||
1595 | cmd = container_of(header, struct vmw_query_cmd, header); |
||
1596 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
||
1597 | if (unlikely(ret != 0)) |
||
1598 | return ret; |
||
1599 | |||
1600 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, |
||
1601 | &cmd->q.mobid, |
||
1602 | &vmw_bo); |
||
1603 | if (unlikely(ret != 0)) |
||
1604 | return ret; |
||
1605 | |||
1606 | vmw_dmabuf_unreference(&vmw_bo); |
||
1607 | return 0; |
||
1608 | } |
||
1609 | |||
1610 | /** |
||
4075 | Serge | 1611 | * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. |
1612 | * |
||
1613 | * @dev_priv: Pointer to a device private struct. |
||
1614 | * @sw_context: The software context used for this command submission. |
||
1615 | * @header: Pointer to the command header in the command stream. |
||
1616 | */ |
||
1617 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, |
||
1618 | struct vmw_sw_context *sw_context, |
||
1619 | SVGA3dCmdHeader *header) |
||
1620 | { |
||
1621 | struct vmw_dma_buffer *vmw_bo; |
||
1622 | struct vmw_query_cmd { |
||
1623 | SVGA3dCmdHeader header; |
||
1624 | SVGA3dCmdWaitForQuery q; |
||
1625 | } *cmd; |
||
1626 | int ret; |
||
1627 | |||
1628 | cmd = container_of(header, struct vmw_query_cmd, header); |
||
4569 | Serge | 1629 | if (dev_priv->has_mob) { |
1630 | struct { |
||
1631 | SVGA3dCmdHeader header; |
||
1632 | SVGA3dCmdWaitForGBQuery q; |
||
1633 | } gb_cmd; |
||
1634 | |||
1635 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
||
1636 | |||
1637 | gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; |
||
1638 | gb_cmd.header.size = cmd->header.size; |
||
1639 | gb_cmd.q.cid = cmd->q.cid; |
||
1640 | gb_cmd.q.type = cmd->q.type; |
||
1641 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; |
||
1642 | gb_cmd.q.offset = cmd->q.guestResult.offset; |
||
1643 | |||
1644 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
||
1645 | return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); |
||
1646 | } |
||
1647 | |||
4075 | Serge | 1648 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1649 | if (unlikely(ret != 0)) |
||
1650 | return ret; |
||
1651 | |||
1652 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
||
1653 | &cmd->q.guestResult, |
||
1654 | &vmw_bo); |
||
1655 | if (unlikely(ret != 0)) |
||
1656 | return ret; |
||
1657 | |||
1658 | vmw_dmabuf_unreference(&vmw_bo); |
||
1659 | return 0; |
||
1660 | } |
||
1661 | |||
1662 | static int vmw_cmd_dma(struct vmw_private *dev_priv, |
||
1663 | struct vmw_sw_context *sw_context, |
||
1664 | SVGA3dCmdHeader *header) |
||
1665 | { |
||
1666 | struct vmw_dma_buffer *vmw_bo = NULL; |
||
1667 | struct vmw_surface *srf = NULL; |
||
1668 | struct vmw_dma_cmd { |
||
1669 | SVGA3dCmdHeader header; |
||
1670 | SVGA3dCmdSurfaceDMA dma; |
||
1671 | } *cmd; |
||
1672 | int ret; |
||
5078 | serge | 1673 | SVGA3dCmdSurfaceDMASuffix *suffix; |
1674 | uint32_t bo_size; |
||
4075 | Serge | 1675 | |
1676 | cmd = container_of(header, struct vmw_dma_cmd, header); |
||
5078 | serge | 1677 | suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma + |
1678 | header->size - sizeof(*suffix)); |
||
1679 | |||
1680 | /* Make sure device and verifier stays in sync. */ |
||
1681 | if (unlikely(suffix->suffixSize != sizeof(*suffix))) { |
||
1682 | DRM_ERROR("Invalid DMA suffix size.\n"); |
||
1683 | return -EINVAL; |
||
1684 | } |
||
1685 | |||
4075 | Serge | 1686 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1687 | &cmd->dma.guest.ptr, |
||
1688 | &vmw_bo); |
||
1689 | if (unlikely(ret != 0)) |
||
1690 | return ret; |
||
1691 | |||
5078 | serge | 1692 | /* Make sure DMA doesn't cross BO boundaries. */ |
1693 | bo_size = vmw_bo->base.num_pages * PAGE_SIZE; |
||
1694 | if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) { |
||
1695 | DRM_ERROR("Invalid DMA offset.\n"); |
||
1696 | return -EINVAL; |
||
1697 | } |
||
1698 | |||
1699 | bo_size -= cmd->dma.guest.ptr.offset; |
||
1700 | if (unlikely(suffix->maximumOffset > bo_size)) |
||
1701 | suffix->maximumOffset = bo_size; |
||
1702 | |||
4075 | Serge | 1703 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1704 | user_surface_converter, &cmd->dma.host.sid, |
||
1705 | NULL); |
||
1706 | if (unlikely(ret != 0)) { |
||
1707 | if (unlikely(ret != -ERESTARTSYS)) |
||
1708 | DRM_ERROR("could not find surface for DMA.\n"); |
||
1709 | goto out_no_surface; |
||
1710 | } |
||
1711 | |||
1712 | srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); |
||
1713 | |||
1714 | // vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); |
||
1715 | |||
1716 | out_no_surface: |
||
1717 | vmw_dmabuf_unreference(&vmw_bo); |
||
1718 | return ret; |
||
1719 | } |
||
1720 | |||
1721 | static int vmw_cmd_draw(struct vmw_private *dev_priv, |
||
1722 | struct vmw_sw_context *sw_context, |
||
1723 | SVGA3dCmdHeader *header) |
||
1724 | { |
||
1725 | struct vmw_draw_cmd { |
||
1726 | SVGA3dCmdHeader header; |
||
1727 | SVGA3dCmdDrawPrimitives body; |
||
1728 | } *cmd; |
||
1729 | SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( |
||
1730 | (unsigned long)header + sizeof(*cmd)); |
||
1731 | SVGA3dPrimitiveRange *range; |
||
1732 | uint32_t i; |
||
1733 | uint32_t maxnum; |
||
1734 | int ret; |
||
1735 | |||
1736 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
||
1737 | if (unlikely(ret != 0)) |
||
1738 | return ret; |
||
1739 | |||
1740 | cmd = container_of(header, struct vmw_draw_cmd, header); |
||
1741 | maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); |
||
1742 | |||
1743 | if (unlikely(cmd->body.numVertexDecls > maxnum)) { |
||
1744 | DRM_ERROR("Illegal number of vertex declarations.\n"); |
||
1745 | return -EINVAL; |
||
1746 | } |
||
1747 | |||
1748 | for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { |
||
1749 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1750 | user_surface_converter, |
||
1751 | &decl->array.surfaceId, NULL); |
||
1752 | if (unlikely(ret != 0)) |
||
1753 | return ret; |
||
1754 | } |
||
1755 | |||
1756 | maxnum = (header->size - sizeof(cmd->body) - |
||
1757 | cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); |
||
1758 | if (unlikely(cmd->body.numRanges > maxnum)) { |
||
1759 | DRM_ERROR("Illegal number of index ranges.\n"); |
||
1760 | return -EINVAL; |
||
1761 | } |
||
1762 | |||
1763 | range = (SVGA3dPrimitiveRange *) decl; |
||
1764 | for (i = 0; i < cmd->body.numRanges; ++i, ++range) { |
||
1765 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1766 | user_surface_converter, |
||
1767 | &range->indexArray.surfaceId, NULL); |
||
1768 | if (unlikely(ret != 0)) |
||
1769 | return ret; |
||
1770 | } |
||
1771 | return 0; |
||
1772 | } |
||
1773 | |||
1774 | |||
1775 | static int vmw_cmd_tex_state(struct vmw_private *dev_priv, |
||
1776 | struct vmw_sw_context *sw_context, |
||
1777 | SVGA3dCmdHeader *header) |
||
1778 | { |
||
1779 | struct vmw_tex_state_cmd { |
||
1780 | SVGA3dCmdHeader header; |
||
1781 | SVGA3dCmdSetTextureState state; |
||
4569 | Serge | 1782 | } *cmd; |
4075 | Serge | 1783 | |
1784 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) |
||
1785 | ((unsigned long) header + header->size + sizeof(header)); |
||
1786 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) |
||
1787 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); |
||
4569 | Serge | 1788 | struct vmw_resource_val_node *ctx_node; |
1789 | struct vmw_resource_val_node *res_node; |
||
4075 | Serge | 1790 | int ret; |
1791 | |||
4569 | Serge | 1792 | cmd = container_of(header, struct vmw_tex_state_cmd, |
1793 | header); |
||
1794 | |||
1795 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
||
1796 | user_context_converter, &cmd->state.cid, |
||
1797 | &ctx_node); |
||
4075 | Serge | 1798 | if (unlikely(ret != 0)) |
1799 | return ret; |
||
1800 | |||
1801 | for (; cur_state < last_state; ++cur_state) { |
||
1802 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) |
||
1803 | continue; |
||
1804 | |||
6296 | serge | 1805 | if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) { |
1806 | DRM_ERROR("Illegal texture/sampler unit %u.\n", |
||
1807 | (unsigned) cur_state->stage); |
||
1808 | return -EINVAL; |
||
1809 | } |
||
1810 | |||
4075 | Serge | 1811 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1812 | user_surface_converter, |
||
4569 | Serge | 1813 | &cur_state->value, &res_node); |
4075 | Serge | 1814 | if (unlikely(ret != 0)) |
1815 | return ret; |
||
4569 | Serge | 1816 | |
1817 | if (dev_priv->has_mob) { |
||
6296 | serge | 1818 | struct vmw_ctx_bindinfo_tex binding; |
4569 | Serge | 1819 | |
6296 | serge | 1820 | binding.bi.ctx = ctx_node->res; |
1821 | binding.bi.res = res_node ? res_node->res : NULL; |
||
1822 | binding.bi.bt = vmw_ctx_binding_tex; |
||
1823 | binding.texture_stage = cur_state->stage; |
||
1824 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, |
||
1825 | 0, binding.texture_stage); |
||
4569 | Serge | 1826 | } |
4075 | Serge | 1827 | } |
1828 | |||
1829 | return 0; |
||
1830 | } |
||
1831 | |||
1832 | static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, |
||
1833 | struct vmw_sw_context *sw_context, |
||
1834 | void *buf) |
||
1835 | { |
||
1836 | struct vmw_dma_buffer *vmw_bo; |
||
1837 | int ret; |
||
1838 | |||
1839 | struct { |
||
1840 | uint32_t header; |
||
1841 | SVGAFifoCmdDefineGMRFB body; |
||
1842 | } *cmd = buf; |
||
1843 | |||
1844 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
||
1845 | &cmd->body.ptr, |
||
1846 | &vmw_bo); |
||
1847 | if (unlikely(ret != 0)) |
||
1848 | return ret; |
||
1849 | |||
1850 | vmw_dmabuf_unreference(&vmw_bo); |
||
1851 | |||
1852 | return ret; |
||
1853 | } |
||
1854 | |||
6296 | serge | 1855 | |
4075 | Serge | 1856 | /** |
6296 | serge | 1857 | * vmw_cmd_res_switch_backup - Utility function to handle backup buffer |
1858 | * switching |
||
1859 | * |
||
1860 | * @dev_priv: Pointer to a device private struct. |
||
1861 | * @sw_context: The software context being used for this batch. |
||
1862 | * @val_node: The validation node representing the resource. |
||
1863 | * @buf_id: Pointer to the user-space backup buffer handle in the command |
||
1864 | * stream. |
||
1865 | * @backup_offset: Offset of backup into MOB. |
||
1866 | * |
||
1867 | * This function prepares for registering a switch of backup buffers |
||
1868 | * in the resource metadata just prior to unreserving. It's basically a wrapper |
||
1869 | * around vmw_cmd_res_switch_backup with a different interface. |
||
1870 | */ |
||
1871 | static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, |
||
1872 | struct vmw_sw_context *sw_context, |
||
1873 | struct vmw_resource_val_node *val_node, |
||
1874 | uint32_t *buf_id, |
||
1875 | unsigned long backup_offset) |
||
1876 | { |
||
1877 | struct vmw_dma_buffer *dma_buf; |
||
1878 | int ret; |
||
1879 | |||
1880 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); |
||
1881 | if (ret) |
||
1882 | return ret; |
||
1883 | |||
1884 | val_node->switching_backup = true; |
||
1885 | if (val_node->first_usage) |
||
1886 | val_node->no_buffer_needed = true; |
||
1887 | |||
1888 | vmw_dmabuf_unreference(&val_node->new_backup); |
||
1889 | val_node->new_backup = dma_buf; |
||
1890 | val_node->new_backup_offset = backup_offset; |
||
1891 | |||
1892 | return 0; |
||
1893 | } |
||
1894 | |||
1895 | |||
1896 | /** |
||
4569 | Serge | 1897 | * vmw_cmd_switch_backup - Utility function to handle backup buffer switching |
1898 | * |
||
1899 | * @dev_priv: Pointer to a device private struct. |
||
1900 | * @sw_context: The software context being used for this batch. |
||
1901 | * @res_type: The resource type. |
||
1902 | * @converter: Information about user-space binding for this resource type. |
||
1903 | * @res_id: Pointer to the user-space resource handle in the command stream. |
||
1904 | * @buf_id: Pointer to the user-space backup buffer handle in the command |
||
1905 | * stream. |
||
1906 | * @backup_offset: Offset of backup into MOB. |
||
1907 | * |
||
1908 | * This function prepares for registering a switch of backup buffers |
||
6296 | serge | 1909 | * in the resource metadata just prior to unreserving. It's basically a wrapper |
1910 | * around vmw_cmd_res_switch_backup with a different interface. |
||
4569 | Serge | 1911 | */ |
1912 | static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, |
||
1913 | struct vmw_sw_context *sw_context, |
||
1914 | enum vmw_res_type res_type, |
||
1915 | const struct vmw_user_resource_conv |
||
1916 | *converter, |
||
1917 | uint32_t *res_id, |
||
1918 | uint32_t *buf_id, |
||
1919 | unsigned long backup_offset) |
||
1920 | { |
||
6296 | serge | 1921 | struct vmw_resource_val_node *val_node; |
4569 | Serge | 1922 | int ret; |
1923 | |||
1924 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, |
||
1925 | converter, res_id, &val_node); |
||
6296 | serge | 1926 | if (ret) |
4569 | Serge | 1927 | return ret; |
1928 | |||
6296 | serge | 1929 | return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node, |
1930 | buf_id, backup_offset); |
||
4569 | Serge | 1931 | } |
1932 | |||
1933 | /** |
||
1934 | * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE |
||
1935 | * command |
||
1936 | * |
||
1937 | * @dev_priv: Pointer to a device private struct. |
||
1938 | * @sw_context: The software context being used for this batch. |
||
1939 | * @header: Pointer to the command header in the command stream. |
||
1940 | */ |
||
1941 | static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, |
||
1942 | struct vmw_sw_context *sw_context, |
||
1943 | SVGA3dCmdHeader *header) |
||
1944 | { |
||
1945 | struct vmw_bind_gb_surface_cmd { |
||
1946 | SVGA3dCmdHeader header; |
||
1947 | SVGA3dCmdBindGBSurface body; |
||
1948 | } *cmd; |
||
1949 | |||
1950 | cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header); |
||
1951 | |||
1952 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, |
||
1953 | user_surface_converter, |
||
1954 | &cmd->body.sid, &cmd->body.mobid, |
||
1955 | 0); |
||
1956 | } |
||
1957 | |||
1958 | /** |
||
1959 | * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE |
||
1960 | * command |
||
1961 | * |
||
1962 | * @dev_priv: Pointer to a device private struct. |
||
1963 | * @sw_context: The software context being used for this batch. |
||
1964 | * @header: Pointer to the command header in the command stream. |
||
1965 | */ |
||
1966 | static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, |
||
1967 | struct vmw_sw_context *sw_context, |
||
1968 | SVGA3dCmdHeader *header) |
||
1969 | { |
||
1970 | struct vmw_gb_surface_cmd { |
||
1971 | SVGA3dCmdHeader header; |
||
1972 | SVGA3dCmdUpdateGBImage body; |
||
1973 | } *cmd; |
||
1974 | |||
1975 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
||
1976 | |||
1977 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1978 | user_surface_converter, |
||
1979 | &cmd->body.image.sid, NULL); |
||
1980 | } |
||
1981 | |||
1982 | /** |
||
1983 | * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE |
||
1984 | * command |
||
1985 | * |
||
1986 | * @dev_priv: Pointer to a device private struct. |
||
1987 | * @sw_context: The software context being used for this batch. |
||
1988 | * @header: Pointer to the command header in the command stream. |
||
1989 | */ |
||
1990 | static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, |
||
1991 | struct vmw_sw_context *sw_context, |
||
1992 | SVGA3dCmdHeader *header) |
||
1993 | { |
||
1994 | struct vmw_gb_surface_cmd { |
||
1995 | SVGA3dCmdHeader header; |
||
1996 | SVGA3dCmdUpdateGBSurface body; |
||
1997 | } *cmd; |
||
1998 | |||
1999 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
||
2000 | |||
2001 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
2002 | user_surface_converter, |
||
2003 | &cmd->body.sid, NULL); |
||
2004 | } |
||
2005 | |||
2006 | /** |
||
2007 | * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE |
||
2008 | * command |
||
2009 | * |
||
2010 | * @dev_priv: Pointer to a device private struct. |
||
2011 | * @sw_context: The software context being used for this batch. |
||
2012 | * @header: Pointer to the command header in the command stream. |
||
2013 | */ |
||
2014 | static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, |
||
2015 | struct vmw_sw_context *sw_context, |
||
2016 | SVGA3dCmdHeader *header) |
||
2017 | { |
||
2018 | struct vmw_gb_surface_cmd { |
||
2019 | SVGA3dCmdHeader header; |
||
2020 | SVGA3dCmdReadbackGBImage body; |
||
2021 | } *cmd; |
||
2022 | |||
2023 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
||
2024 | |||
2025 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
2026 | user_surface_converter, |
||
2027 | &cmd->body.image.sid, NULL); |
||
2028 | } |
||
2029 | |||
2030 | /** |
||
2031 | * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE |
||
2032 | * command |
||
2033 | * |
||
2034 | * @dev_priv: Pointer to a device private struct. |
||
2035 | * @sw_context: The software context being used for this batch. |
||
2036 | * @header: Pointer to the command header in the command stream. |
||
2037 | */ |
||
2038 | static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, |
||
2039 | struct vmw_sw_context *sw_context, |
||
2040 | SVGA3dCmdHeader *header) |
||
2041 | { |
||
2042 | struct vmw_gb_surface_cmd { |
||
2043 | SVGA3dCmdHeader header; |
||
2044 | SVGA3dCmdReadbackGBSurface body; |
||
2045 | } *cmd; |
||
2046 | |||
2047 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
||
2048 | |||
2049 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
2050 | user_surface_converter, |
||
2051 | &cmd->body.sid, NULL); |
||
2052 | } |
||
2053 | |||
2054 | /** |
||
2055 | * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE |
||
2056 | * command |
||
2057 | * |
||
2058 | * @dev_priv: Pointer to a device private struct. |
||
2059 | * @sw_context: The software context being used for this batch. |
||
2060 | * @header: Pointer to the command header in the command stream. |
||
2061 | */ |
||
2062 | static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, |
||
2063 | struct vmw_sw_context *sw_context, |
||
2064 | SVGA3dCmdHeader *header) |
||
2065 | { |
||
2066 | struct vmw_gb_surface_cmd { |
||
2067 | SVGA3dCmdHeader header; |
||
2068 | SVGA3dCmdInvalidateGBImage body; |
||
2069 | } *cmd; |
||
2070 | |||
2071 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
||
2072 | |||
2073 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
2074 | user_surface_converter, |
||
2075 | &cmd->body.image.sid, NULL); |
||
2076 | } |
||
2077 | |||
2078 | /** |
||
2079 | * vmw_cmd_invalidate_gb_surface - Validate an |
||
2080 | * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command |
||
2081 | * |
||
2082 | * @dev_priv: Pointer to a device private struct. |
||
2083 | * @sw_context: The software context being used for this batch. |
||
2084 | * @header: Pointer to the command header in the command stream. |
||
2085 | */ |
||
2086 | static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, |
||
2087 | struct vmw_sw_context *sw_context, |
||
2088 | SVGA3dCmdHeader *header) |
||
2089 | { |
||
2090 | struct vmw_gb_surface_cmd { |
||
2091 | SVGA3dCmdHeader header; |
||
2092 | SVGA3dCmdInvalidateGBSurface body; |
||
2093 | } *cmd; |
||
2094 | |||
2095 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
||
2096 | |||
2097 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
2098 | user_surface_converter, |
||
2099 | &cmd->body.sid, NULL); |
||
2100 | } |
||
2101 | |||
6296 | serge | 2102 | |
4569 | Serge | 2103 | /** |
6296 | serge | 2104 | * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE |
2105 | * command |
||
2106 | * |
||
2107 | * @dev_priv: Pointer to a device private struct. |
||
2108 | * @sw_context: The software context being used for this batch. |
||
2109 | * @header: Pointer to the command header in the command stream. |
||
2110 | */ |
||
2111 | static int vmw_cmd_shader_define(struct vmw_private *dev_priv, |
||
2112 | struct vmw_sw_context *sw_context, |
||
2113 | SVGA3dCmdHeader *header) |
||
2114 | { |
||
2115 | struct vmw_shader_define_cmd { |
||
2116 | SVGA3dCmdHeader header; |
||
2117 | SVGA3dCmdDefineShader body; |
||
2118 | } *cmd; |
||
2119 | int ret; |
||
2120 | size_t size; |
||
2121 | struct vmw_resource_val_node *val; |
||
2122 | |||
2123 | cmd = container_of(header, struct vmw_shader_define_cmd, |
||
2124 | header); |
||
2125 | |||
2126 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
||
2127 | user_context_converter, &cmd->body.cid, |
||
2128 | &val); |
||
2129 | if (unlikely(ret != 0)) |
||
2130 | return ret; |
||
2131 | |||
2132 | if (unlikely(!dev_priv->has_mob)) |
||
2133 | return 0; |
||
2134 | |||
2135 | size = cmd->header.size - sizeof(cmd->body); |
||
2136 | ret = vmw_compat_shader_add(dev_priv, |
||
2137 | vmw_context_res_man(val->res), |
||
2138 | cmd->body.shid, cmd + 1, |
||
2139 | cmd->body.type, size, |
||
2140 | &sw_context->staged_cmd_res); |
||
2141 | if (unlikely(ret != 0)) |
||
2142 | return ret; |
||
2143 | |||
2144 | return vmw_resource_relocation_add(&sw_context->res_relocations, |
||
2145 | NULL, &cmd->header.id - |
||
2146 | sw_context->buf_start); |
||
2147 | |||
2148 | return 0; |
||
2149 | } |
||
2150 | |||
2151 | /** |
||
2152 | * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY |
||
2153 | * command |
||
2154 | * |
||
2155 | * @dev_priv: Pointer to a device private struct. |
||
2156 | * @sw_context: The software context being used for this batch. |
||
2157 | * @header: Pointer to the command header in the command stream. |
||
2158 | */ |
||
2159 | static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, |
||
2160 | struct vmw_sw_context *sw_context, |
||
2161 | SVGA3dCmdHeader *header) |
||
2162 | { |
||
2163 | struct vmw_shader_destroy_cmd { |
||
2164 | SVGA3dCmdHeader header; |
||
2165 | SVGA3dCmdDestroyShader body; |
||
2166 | } *cmd; |
||
2167 | int ret; |
||
2168 | struct vmw_resource_val_node *val; |
||
2169 | |||
2170 | cmd = container_of(header, struct vmw_shader_destroy_cmd, |
||
2171 | header); |
||
2172 | |||
2173 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
||
2174 | user_context_converter, &cmd->body.cid, |
||
2175 | &val); |
||
2176 | if (unlikely(ret != 0)) |
||
2177 | return ret; |
||
2178 | |||
2179 | if (unlikely(!dev_priv->has_mob)) |
||
2180 | return 0; |
||
2181 | |||
2182 | ret = vmw_shader_remove(vmw_context_res_man(val->res), |
||
2183 | cmd->body.shid, |
||
2184 | cmd->body.type, |
||
2185 | &sw_context->staged_cmd_res); |
||
2186 | if (unlikely(ret != 0)) |
||
2187 | return ret; |
||
2188 | |||
2189 | return vmw_resource_relocation_add(&sw_context->res_relocations, |
||
2190 | NULL, &cmd->header.id - |
||
2191 | sw_context->buf_start); |
||
2192 | |||
2193 | return 0; |
||
2194 | } |
||
2195 | |||
2196 | /** |
||
4075 | Serge | 2197 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER |
2198 | * command |
||
2199 | * |
||
2200 | * @dev_priv: Pointer to a device private struct. |
||
2201 | * @sw_context: The software context being used for this batch. |
||
2202 | * @header: Pointer to the command header in the command stream. |
||
2203 | */ |
||
2204 | static int vmw_cmd_set_shader(struct vmw_private *dev_priv, |
||
2205 | struct vmw_sw_context *sw_context, |
||
2206 | SVGA3dCmdHeader *header) |
||
2207 | { |
||
2208 | struct vmw_set_shader_cmd { |
||
2209 | SVGA3dCmdHeader header; |
||
2210 | SVGA3dCmdSetShader body; |
||
2211 | } *cmd; |
||
5078 | serge | 2212 | struct vmw_resource_val_node *ctx_node, *res_node = NULL; |
6296 | serge | 2213 | struct vmw_ctx_bindinfo_shader binding; |
5078 | serge | 2214 | struct vmw_resource *res = NULL; |
4075 | Serge | 2215 | int ret; |
2216 | |||
2217 | cmd = container_of(header, struct vmw_set_shader_cmd, |
||
2218 | header); |
||
2219 | |||
6296 | serge | 2220 | if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) { |
2221 | DRM_ERROR("Illegal shader type %u.\n", |
||
2222 | (unsigned) cmd->body.type); |
||
2223 | return -EINVAL; |
||
2224 | } |
||
2225 | |||
4569 | Serge | 2226 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
2227 | user_context_converter, &cmd->body.cid, |
||
2228 | &ctx_node); |
||
4075 | Serge | 2229 | if (unlikely(ret != 0)) |
2230 | return ret; |
||
2231 | |||
5078 | serge | 2232 | if (!dev_priv->has_mob) |
2233 | return 0; |
||
4569 | Serge | 2234 | |
5078 | serge | 2235 | if (cmd->body.shid != SVGA3D_INVALID_ID) { |
6296 | serge | 2236 | res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res), |
2237 | cmd->body.shid, |
||
2238 | cmd->body.type); |
||
5078 | serge | 2239 | |
2240 | if (!IS_ERR(res)) { |
||
2241 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, |
||
2242 | &cmd->body.shid, res, |
||
2243 | &res_node); |
||
2244 | vmw_resource_unreference(&res); |
||
2245 | if (unlikely(ret != 0)) |
||
2246 | return ret; |
||
2247 | } |
||
2248 | } |
||
2249 | |||
2250 | if (!res_node) { |
||
2251 | ret = vmw_cmd_res_check(dev_priv, sw_context, |
||
6296 | serge | 2252 | vmw_res_shader, |
4569 | Serge | 2253 | user_shader_converter, |
2254 | &cmd->body.shid, &res_node); |
||
6296 | serge | 2255 | if (unlikely(ret != 0)) |
2256 | return ret; |
||
5078 | serge | 2257 | } |
4569 | Serge | 2258 | |
6296 | serge | 2259 | binding.bi.ctx = ctx_node->res; |
2260 | binding.bi.res = res_node ? res_node->res : NULL; |
||
2261 | binding.bi.bt = vmw_ctx_binding_shader; |
||
2262 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; |
||
2263 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, |
||
2264 | binding.shader_slot, 0); |
||
2265 | return 0; |
||
5078 | serge | 2266 | } |
4569 | Serge | 2267 | |
5078 | serge | 2268 | /** |
2269 | * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST |
||
2270 | * command |
||
2271 | * |
||
2272 | * @dev_priv: Pointer to a device private struct. |
||
2273 | * @sw_context: The software context being used for this batch. |
||
2274 | * @header: Pointer to the command header in the command stream. |
||
2275 | */ |
||
2276 | static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, |
||
2277 | struct vmw_sw_context *sw_context, |
||
2278 | SVGA3dCmdHeader *header) |
||
2279 | { |
||
2280 | struct vmw_set_shader_const_cmd { |
||
2281 | SVGA3dCmdHeader header; |
||
2282 | SVGA3dCmdSetShaderConst body; |
||
2283 | } *cmd; |
||
2284 | int ret; |
||
2285 | |||
2286 | cmd = container_of(header, struct vmw_set_shader_const_cmd, |
||
2287 | header); |
||
2288 | |||
2289 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
||
2290 | user_context_converter, &cmd->body.cid, |
||
2291 | NULL); |
||
2292 | if (unlikely(ret != 0)) |
||
2293 | return ret; |
||
2294 | |||
2295 | if (dev_priv->has_mob) |
||
2296 | header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; |
||
2297 | |||
4075 | Serge | 2298 | return 0; |
2299 | } |
||
2300 | |||
4569 | Serge | 2301 | /** |
2302 | * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER |
||
2303 | * command |
||
2304 | * |
||
2305 | * @dev_priv: Pointer to a device private struct. |
||
2306 | * @sw_context: The software context being used for this batch. |
||
2307 | * @header: Pointer to the command header in the command stream. |
||
2308 | */ |
||
2309 | static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, |
||
2310 | struct vmw_sw_context *sw_context, |
||
2311 | SVGA3dCmdHeader *header) |
||
2312 | { |
||
2313 | struct vmw_bind_gb_shader_cmd { |
||
2314 | SVGA3dCmdHeader header; |
||
2315 | SVGA3dCmdBindGBShader body; |
||
2316 | } *cmd; |
||
2317 | |||
2318 | cmd = container_of(header, struct vmw_bind_gb_shader_cmd, |
||
2319 | header); |
||
2320 | |||
2321 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, |
||
2322 | user_shader_converter, |
||
2323 | &cmd->body.shid, &cmd->body.mobid, |
||
2324 | cmd->body.offsetInBytes); |
||
2325 | } |
||
2326 | |||
6296 | serge | 2327 | /** |
2328 | * vmw_cmd_dx_set_single_constant_buffer - Validate an |
||
2329 | * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command. |
||
2330 | * |
||
2331 | * @dev_priv: Pointer to a device private struct. |
||
2332 | * @sw_context: The software context being used for this batch. |
||
2333 | * @header: Pointer to the command header in the command stream. |
||
2334 | */ |
||
2335 | static int |
||
2336 | vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, |
||
2337 | struct vmw_sw_context *sw_context, |
||
2338 | SVGA3dCmdHeader *header) |
||
2339 | { |
||
2340 | struct { |
||
2341 | SVGA3dCmdHeader header; |
||
2342 | SVGA3dCmdDXSetSingleConstantBuffer body; |
||
2343 | } *cmd; |
||
2344 | struct vmw_resource_val_node *res_node = NULL; |
||
2345 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
||
2346 | struct vmw_ctx_bindinfo_cb binding; |
||
2347 | int ret; |
||
2348 | |||
2349 | if (unlikely(ctx_node == NULL)) { |
||
2350 | DRM_ERROR("DX Context not set.\n"); |
||
2351 | return -EINVAL; |
||
2352 | } |
||
2353 | |||
2354 | cmd = container_of(header, typeof(*cmd), header); |
||
2355 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
2356 | user_surface_converter, |
||
2357 | &cmd->body.sid, &res_node); |
||
2358 | if (unlikely(ret != 0)) |
||
2359 | return ret; |
||
2360 | |||
2361 | binding.bi.ctx = ctx_node->res; |
||
2362 | binding.bi.res = res_node ? res_node->res : NULL; |
||
2363 | binding.bi.bt = vmw_ctx_binding_cb; |
||
2364 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; |
||
2365 | binding.offset = cmd->body.offsetInBytes; |
||
2366 | binding.size = cmd->body.sizeInBytes; |
||
2367 | binding.slot = cmd->body.slot; |
||
2368 | |||
2369 | if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 || |
||
2370 | binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { |
||
2371 | DRM_ERROR("Illegal const buffer shader %u slot %u.\n", |
||
2372 | (unsigned) cmd->body.type, |
||
2373 | (unsigned) binding.slot); |
||
2374 | return -EINVAL; |
||
2375 | } |
||
2376 | |||
2377 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, |
||
2378 | binding.shader_slot, binding.slot); |
||
2379 | |||
2380 | return 0; |
||
2381 | } |
||
2382 | |||
2383 | /** |
||
2384 | * vmw_cmd_dx_set_shader_res - Validate an |
||
2385 | * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command |
||
2386 | * |
||
2387 | * @dev_priv: Pointer to a device private struct. |
||
2388 | * @sw_context: The software context being used for this batch. |
||
2389 | * @header: Pointer to the command header in the command stream. |
||
2390 | */ |
||
2391 | static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, |
||
2392 | struct vmw_sw_context *sw_context, |
||
2393 | SVGA3dCmdHeader *header) |
||
2394 | { |
||
2395 | struct { |
||
2396 | SVGA3dCmdHeader header; |
||
2397 | SVGA3dCmdDXSetShaderResources body; |
||
2398 | } *cmd = container_of(header, typeof(*cmd), header); |
||
2399 | u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / |
||
2400 | sizeof(SVGA3dShaderResourceViewId); |
||
2401 | |||
2402 | if ((u64) cmd->body.startView + (u64) num_sr_view > |
||
2403 | (u64) SVGA3D_DX_MAX_SRVIEWS || |
||
2404 | cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { |
||
2405 | DRM_ERROR("Invalid shader binding.\n"); |
||
2406 | return -EINVAL; |
||
2407 | } |
||
2408 | |||
2409 | return vmw_view_bindings_add(sw_context, vmw_view_sr, |
||
2410 | vmw_ctx_binding_sr, |
||
2411 | cmd->body.type - SVGA3D_SHADERTYPE_MIN, |
||
2412 | (void *) &cmd[1], num_sr_view, |
||
2413 | cmd->body.startView); |
||
2414 | } |
||
2415 | |||
2416 | /** |
||
2417 | * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER |
||
2418 | * command |
||
2419 | * |
||
2420 | * @dev_priv: Pointer to a device private struct. |
||
2421 | * @sw_context: The software context being used for this batch. |
||
2422 | * @header: Pointer to the command header in the command stream. |
||
2423 | */ |
||
2424 | static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, |
||
2425 | struct vmw_sw_context *sw_context, |
||
2426 | SVGA3dCmdHeader *header) |
||
2427 | { |
||
2428 | struct { |
||
2429 | SVGA3dCmdHeader header; |
||
2430 | SVGA3dCmdDXSetShader body; |
||
2431 | } *cmd; |
||
2432 | struct vmw_resource *res = NULL; |
||
2433 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
||
2434 | struct vmw_ctx_bindinfo_shader binding; |
||
2435 | int ret = 0; |
||
2436 | |||
2437 | if (unlikely(ctx_node == NULL)) { |
||
2438 | DRM_ERROR("DX Context not set.\n"); |
||
2439 | return -EINVAL; |
||
2440 | } |
||
2441 | |||
2442 | cmd = container_of(header, typeof(*cmd), header); |
||
2443 | |||
2444 | if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { |
||
2445 | DRM_ERROR("Illegal shader type %u.\n", |
||
2446 | (unsigned) cmd->body.type); |
||
2447 | return -EINVAL; |
||
2448 | } |
||
2449 | |||
2450 | if (cmd->body.shaderId != SVGA3D_INVALID_ID) { |
||
2451 | res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0); |
||
2452 | if (IS_ERR(res)) { |
||
2453 | DRM_ERROR("Could not find shader for binding.\n"); |
||
2454 | return PTR_ERR(res); |
||
2455 | } |
||
2456 | |||
2457 | ret = vmw_resource_val_add(sw_context, res, NULL); |
||
2458 | if (ret) |
||
2459 | goto out_unref; |
||
2460 | } |
||
2461 | |||
2462 | binding.bi.ctx = ctx_node->res; |
||
2463 | binding.bi.res = res; |
||
2464 | binding.bi.bt = vmw_ctx_binding_dx_shader; |
||
2465 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; |
||
2466 | |||
2467 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, |
||
2468 | binding.shader_slot, 0); |
||
2469 | out_unref: |
||
2470 | if (res) |
||
2471 | vmw_resource_unreference(&res); |
||
2472 | |||
2473 | return ret; |
||
2474 | } |
||
2475 | |||
2476 | /** |
||
2477 | * vmw_cmd_dx_set_vertex_buffers - Validates an |
||
2478 | * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command |
||
2479 | * |
||
2480 | * @dev_priv: Pointer to a device private struct. |
||
2481 | * @sw_context: The software context being used for this batch. |
||
2482 | * @header: Pointer to the command header in the command stream. |
||
2483 | */ |
||
2484 | static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, |
||
2485 | struct vmw_sw_context *sw_context, |
||
2486 | SVGA3dCmdHeader *header) |
||
2487 | { |
||
2488 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
||
2489 | struct vmw_ctx_bindinfo_vb binding; |
||
2490 | struct vmw_resource_val_node *res_node; |
||
2491 | struct { |
||
2492 | SVGA3dCmdHeader header; |
||
2493 | SVGA3dCmdDXSetVertexBuffers body; |
||
2494 | SVGA3dVertexBuffer buf[]; |
||
2495 | } *cmd; |
||
2496 | int i, ret, num; |
||
2497 | |||
2498 | if (unlikely(ctx_node == NULL)) { |
||
2499 | DRM_ERROR("DX Context not set.\n"); |
||
2500 | return -EINVAL; |
||
2501 | } |
||
2502 | |||
2503 | cmd = container_of(header, typeof(*cmd), header); |
||
2504 | num = (cmd->header.size - sizeof(cmd->body)) / |
||
2505 | sizeof(SVGA3dVertexBuffer); |
||
2506 | if ((u64)num + (u64)cmd->body.startBuffer > |
||
2507 | (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) { |
||
2508 | DRM_ERROR("Invalid number of vertex buffers.\n"); |
||
2509 | return -EINVAL; |
||
2510 | } |
||
2511 | |||
2512 | for (i = 0; i < num; i++) { |
||
2513 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
2514 | user_surface_converter, |
||
2515 | &cmd->buf[i].sid, &res_node); |
||
2516 | if (unlikely(ret != 0)) |
||
2517 | return ret; |
||
2518 | |||
2519 | binding.bi.ctx = ctx_node->res; |
||
2520 | binding.bi.bt = vmw_ctx_binding_vb; |
||
2521 | binding.bi.res = ((res_node) ? res_node->res : NULL); |
||
2522 | binding.offset = cmd->buf[i].offset; |
||
2523 | binding.stride = cmd->buf[i].stride; |
||
2524 | binding.slot = i + cmd->body.startBuffer; |
||
2525 | |||
2526 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, |
||
2527 | 0, binding.slot); |
||
2528 | } |
||
2529 | |||
2530 | return 0; |
||
2531 | } |
||
2532 | |||
2533 | /** |
||
2534 | * vmw_cmd_dx_ia_set_vertex_buffers - Validate an |
||
2535 | * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command. |
||
2536 | * |
||
2537 | * @dev_priv: Pointer to a device private struct. |
||
2538 | * @sw_context: The software context being used for this batch. |
||
2539 | * @header: Pointer to the command header in the command stream. |
||
2540 | */ |
||
2541 | static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, |
||
2542 | struct vmw_sw_context *sw_context, |
||
2543 | SVGA3dCmdHeader *header) |
||
2544 | { |
||
2545 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
||
2546 | struct vmw_ctx_bindinfo_ib binding; |
||
2547 | struct vmw_resource_val_node *res_node; |
||
2548 | struct { |
||
2549 | SVGA3dCmdHeader header; |
||
2550 | SVGA3dCmdDXSetIndexBuffer body; |
||
2551 | } *cmd; |
||
2552 | int ret; |
||
2553 | |||
2554 | if (unlikely(ctx_node == NULL)) { |
||
2555 | DRM_ERROR("DX Context not set.\n"); |
||
2556 | return -EINVAL; |
||
2557 | } |
||
2558 | |||
2559 | cmd = container_of(header, typeof(*cmd), header); |
||
2560 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
2561 | user_surface_converter, |
||
2562 | &cmd->body.sid, &res_node); |
||
2563 | if (unlikely(ret != 0)) |
||
2564 | return ret; |
||
2565 | |||
2566 | binding.bi.ctx = ctx_node->res; |
||
2567 | binding.bi.res = ((res_node) ? res_node->res : NULL); |
||
2568 | binding.bi.bt = vmw_ctx_binding_ib; |
||
2569 | binding.offset = cmd->body.offset; |
||
2570 | binding.format = cmd->body.format; |
||
2571 | |||
2572 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0); |
||
2573 | |||
2574 | return 0; |
||
2575 | } |
||
2576 | |||
2577 | /** |
||
2578 | * vmw_cmd_dx_set_rendertarget - Validate an |
||
2579 | * SVGA_3D_CMD_DX_SET_RENDERTARGETS command |
||
2580 | * |
||
2581 | * @dev_priv: Pointer to a device private struct. |
||
2582 | * @sw_context: The software context being used for this batch. |
||
2583 | * @header: Pointer to the command header in the command stream. |
||
2584 | */ |
||
2585 | static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv, |
||
2586 | struct vmw_sw_context *sw_context, |
||
2587 | SVGA3dCmdHeader *header) |
||
2588 | { |
||
2589 | struct { |
||
2590 | SVGA3dCmdHeader header; |
||
2591 | SVGA3dCmdDXSetRenderTargets body; |
||
2592 | } *cmd = container_of(header, typeof(*cmd), header); |
||
2593 | int ret; |
||
2594 | u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) / |
||
2595 | sizeof(SVGA3dRenderTargetViewId); |
||
2596 | |||
2597 | if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) { |
||
2598 | DRM_ERROR("Invalid DX Rendertarget binding.\n"); |
||
2599 | return -EINVAL; |
||
2600 | } |
||
2601 | |||
2602 | ret = vmw_view_bindings_add(sw_context, vmw_view_ds, |
||
2603 | vmw_ctx_binding_ds, 0, |
||
2604 | &cmd->body.depthStencilViewId, 1, 0); |
||
2605 | if (ret) |
||
2606 | return ret; |
||
2607 | |||
2608 | return vmw_view_bindings_add(sw_context, vmw_view_rt, |
||
2609 | vmw_ctx_binding_dx_rt, 0, |
||
2610 | (void *)&cmd[1], num_rt_view, 0); |
||
2611 | } |
||
2612 | |||
2613 | /** |
||
2614 | * vmw_cmd_dx_clear_rendertarget_view - Validate an |
||
2615 | * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command |
||
2616 | * |
||
2617 | * @dev_priv: Pointer to a device private struct. |
||
2618 | * @sw_context: The software context being used for this batch. |
||
2619 | * @header: Pointer to the command header in the command stream. |
||
2620 | */ |
||
2621 | static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv, |
||
2622 | struct vmw_sw_context *sw_context, |
||
2623 | SVGA3dCmdHeader *header) |
||
2624 | { |
||
2625 | struct { |
||
2626 | SVGA3dCmdHeader header; |
||
2627 | SVGA3dCmdDXClearRenderTargetView body; |
||
2628 | } *cmd = container_of(header, typeof(*cmd), header); |
||
2629 | |||
2630 | return vmw_view_id_val_add(sw_context, vmw_view_rt, |
||
2631 | cmd->body.renderTargetViewId); |
||
2632 | } |
||
2633 | |||
2634 | /** |
||
2635 | * vmw_cmd_dx_clear_rendertarget_view - Validate an |
||
2636 | * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command |
||
2637 | * |
||
2638 | * @dev_priv: Pointer to a device private struct. |
||
2639 | * @sw_context: The software context being used for this batch. |
||
2640 | * @header: Pointer to the command header in the command stream. |
||
2641 | */ |
||
2642 | static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv, |
||
2643 | struct vmw_sw_context *sw_context, |
||
2644 | SVGA3dCmdHeader *header) |
||
2645 | { |
||
2646 | struct { |
||
2647 | SVGA3dCmdHeader header; |
||
2648 | SVGA3dCmdDXClearDepthStencilView body; |
||
2649 | } *cmd = container_of(header, typeof(*cmd), header); |
||
2650 | |||
2651 | return vmw_view_id_val_add(sw_context, vmw_view_ds, |
||
2652 | cmd->body.depthStencilViewId); |
||
2653 | } |
||
2654 | |||
2655 | static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, |
||
2656 | struct vmw_sw_context *sw_context, |
||
2657 | SVGA3dCmdHeader *header) |
||
2658 | { |
||
2659 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
||
2660 | struct vmw_resource_val_node *srf_node; |
||
2661 | struct vmw_resource *res; |
||
2662 | enum vmw_view_type view_type; |
||
2663 | int ret; |
||
2664 | /* |
||
2665 | * This is based on the fact that all affected define commands have |
||
2666 | * the same initial command body layout. |
||
2667 | */ |
||
2668 | struct { |
||
2669 | SVGA3dCmdHeader header; |
||
2670 | uint32 defined_id; |
||
2671 | uint32 sid; |
||
2672 | } *cmd; |
||
2673 | |||
2674 | if (unlikely(ctx_node == NULL)) { |
||
2675 | DRM_ERROR("DX Context not set.\n"); |
||
2676 | return -EINVAL; |
||
2677 | } |
||
2678 | |||
2679 | view_type = vmw_view_cmd_to_type(header->id); |
||
2680 | cmd = container_of(header, typeof(*cmd), header); |
||
2681 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
2682 | user_surface_converter, |
||
2683 | &cmd->sid, &srf_node); |
||
2684 | if (unlikely(ret != 0)) |
||
2685 | return ret; |
||
2686 | |||
2687 | res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]); |
||
2688 | ret = vmw_cotable_notify(res, cmd->defined_id); |
||
2689 | vmw_resource_unreference(&res); |
||
2690 | if (unlikely(ret != 0)) |
||
2691 | return ret; |
||
2692 | |||
2693 | return vmw_view_add(sw_context->man, |
||
2694 | ctx_node->res, |
||
2695 | srf_node->res, |
||
2696 | view_type, |
||
2697 | cmd->defined_id, |
||
2698 | header, |
||
2699 | header->size + sizeof(*header), |
||
2700 | &sw_context->staged_cmd_res); |
||
2701 | } |
||
2702 | |||
2703 | /** |
||
2704 | * vmw_cmd_dx_set_so_targets - Validate an |
||
2705 | * SVGA_3D_CMD_DX_SET_SOTARGETS command. |
||
2706 | * |
||
2707 | * @dev_priv: Pointer to a device private struct. |
||
2708 | * @sw_context: The software context being used for this batch. |
||
2709 | * @header: Pointer to the command header in the command stream. |
||
2710 | */ |
||
2711 | static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv, |
||
2712 | struct vmw_sw_context *sw_context, |
||
2713 | SVGA3dCmdHeader *header) |
||
2714 | { |
||
2715 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
||
2716 | struct vmw_ctx_bindinfo_so binding; |
||
2717 | struct vmw_resource_val_node *res_node; |
||
2718 | struct { |
||
2719 | SVGA3dCmdHeader header; |
||
2720 | SVGA3dCmdDXSetSOTargets body; |
||
2721 | SVGA3dSoTarget targets[]; |
||
2722 | } *cmd; |
||
2723 | int i, ret, num; |
||
2724 | |||
2725 | if (unlikely(ctx_node == NULL)) { |
||
2726 | DRM_ERROR("DX Context not set.\n"); |
||
2727 | return -EINVAL; |
||
2728 | } |
||
2729 | |||
2730 | cmd = container_of(header, typeof(*cmd), header); |
||
2731 | num = (cmd->header.size - sizeof(cmd->body)) / |
||
2732 | sizeof(SVGA3dSoTarget); |
||
2733 | |||
2734 | if (num > SVGA3D_DX_MAX_SOTARGETS) { |
||
2735 | DRM_ERROR("Invalid DX SO binding.\n"); |
||
2736 | return -EINVAL; |
||
2737 | } |
||
2738 | |||
2739 | for (i = 0; i < num; i++) { |
||
2740 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
2741 | user_surface_converter, |
||
2742 | &cmd->targets[i].sid, &res_node); |
||
2743 | if (unlikely(ret != 0)) |
||
2744 | return ret; |
||
2745 | |||
2746 | binding.bi.ctx = ctx_node->res; |
||
2747 | binding.bi.res = ((res_node) ? res_node->res : NULL); |
||
2748 | binding.bi.bt = vmw_ctx_binding_so, |
||
2749 | binding.offset = cmd->targets[i].offset; |
||
2750 | binding.size = cmd->targets[i].sizeInBytes; |
||
2751 | binding.slot = i; |
||
2752 | |||
2753 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, |
||
2754 | 0, binding.slot); |
||
2755 | } |
||
2756 | |||
2757 | return 0; |
||
2758 | } |
||
2759 | |||
2760 | static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, |
||
2761 | struct vmw_sw_context *sw_context, |
||
2762 | SVGA3dCmdHeader *header) |
||
2763 | { |
||
2764 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
||
2765 | struct vmw_resource *res; |
||
2766 | /* |
||
2767 | * This is based on the fact that all affected define commands have |
||
2768 | * the same initial command body layout. |
||
2769 | */ |
||
2770 | struct { |
||
2771 | SVGA3dCmdHeader header; |
||
2772 | uint32 defined_id; |
||
2773 | } *cmd; |
||
2774 | enum vmw_so_type so_type; |
||
2775 | int ret; |
||
2776 | |||
2777 | if (unlikely(ctx_node == NULL)) { |
||
2778 | DRM_ERROR("DX Context not set.\n"); |
||
2779 | return -EINVAL; |
||
2780 | } |
||
2781 | |||
2782 | so_type = vmw_so_cmd_to_type(header->id); |
||
2783 | res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]); |
||
2784 | cmd = container_of(header, typeof(*cmd), header); |
||
2785 | ret = vmw_cotable_notify(res, cmd->defined_id); |
||
2786 | vmw_resource_unreference(&res); |
||
2787 | |||
2788 | return ret; |
||
2789 | } |
||
2790 | |||
2791 | /** |
||
2792 | * vmw_cmd_dx_check_subresource - Validate an |
||
2793 | * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command |
||
2794 | * |
||
2795 | * @dev_priv: Pointer to a device private struct. |
||
2796 | * @sw_context: The software context being used for this batch. |
||
2797 | * @header: Pointer to the command header in the command stream. |
||
2798 | */ |
||
2799 | static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv, |
||
2800 | struct vmw_sw_context *sw_context, |
||
2801 | SVGA3dCmdHeader *header) |
||
2802 | { |
||
2803 | struct { |
||
2804 | SVGA3dCmdHeader header; |
||
2805 | union { |
||
2806 | SVGA3dCmdDXReadbackSubResource r_body; |
||
2807 | SVGA3dCmdDXInvalidateSubResource i_body; |
||
2808 | SVGA3dCmdDXUpdateSubResource u_body; |
||
2809 | SVGA3dSurfaceId sid; |
||
2810 | }; |
||
2811 | } *cmd; |
||
2812 | |||
2813 | BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) != |
||
2814 | offsetof(typeof(*cmd), sid)); |
||
2815 | BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) != |
||
2816 | offsetof(typeof(*cmd), sid)); |
||
2817 | BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) != |
||
2818 | offsetof(typeof(*cmd), sid)); |
||
2819 | |||
2820 | cmd = container_of(header, typeof(*cmd), header); |
||
2821 | |||
2822 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
2823 | user_surface_converter, |
||
2824 | &cmd->sid, NULL); |
||
2825 | } |
||
2826 | |||
2827 | static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, |
||
2828 | struct vmw_sw_context *sw_context, |
||
2829 | SVGA3dCmdHeader *header) |
||
2830 | { |
||
2831 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
||
2832 | |||
2833 | if (unlikely(ctx_node == NULL)) { |
||
2834 | DRM_ERROR("DX Context not set.\n"); |
||
2835 | return -EINVAL; |
||
2836 | } |
||
2837 | |||
2838 | return 0; |
||
2839 | } |
||
2840 | |||
2841 | /** |
||
2842 | * vmw_cmd_dx_view_remove - validate a view remove command and |
||
2843 | * schedule the view resource for removal. |
||
2844 | * |
||
2845 | * @dev_priv: Pointer to a device private struct. |
||
2846 | * @sw_context: The software context being used for this batch. |
||
2847 | * @header: Pointer to the command header in the command stream. |
||
2848 | * |
||
2849 | * Check that the view exists, and if it was not created using this |
||
2850 | * command batch, make sure it's validated (present in the device) so that |
||
2851 | * the remove command will not confuse the device. |
||
2852 | */ |
||
2853 | static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, |
||
2854 | struct vmw_sw_context *sw_context, |
||
2855 | SVGA3dCmdHeader *header) |
||
2856 | { |
||
2857 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
||
2858 | struct { |
||
2859 | SVGA3dCmdHeader header; |
||
2860 | union vmw_view_destroy body; |
||
2861 | } *cmd = container_of(header, typeof(*cmd), header); |
||
2862 | enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id); |
||
2863 | struct vmw_resource *view; |
||
2864 | int ret; |
||
2865 | |||
2866 | if (!ctx_node) { |
||
2867 | DRM_ERROR("DX Context not set.\n"); |
||
2868 | return -EINVAL; |
||
2869 | } |
||
2870 | |||
2871 | ret = vmw_view_remove(sw_context->man, |
||
2872 | cmd->body.view_id, view_type, |
||
2873 | &sw_context->staged_cmd_res, |
||
2874 | &view); |
||
2875 | if (ret || !view) |
||
2876 | return ret; |
||
2877 | |||
2878 | /* |
||
2879 | * Add view to the validate list iff it was not created using this |
||
2880 | * command batch. |
||
2881 | */ |
||
2882 | return vmw_view_res_val_add(sw_context, view); |
||
2883 | } |
||
2884 | |||
2885 | /** |
||
2886 | * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER |
||
2887 | * command |
||
2888 | * |
||
2889 | * @dev_priv: Pointer to a device private struct. |
||
2890 | * @sw_context: The software context being used for this batch. |
||
2891 | * @header: Pointer to the command header in the command stream. |
||
2892 | */ |
||
2893 | static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, |
||
2894 | struct vmw_sw_context *sw_context, |
||
2895 | SVGA3dCmdHeader *header) |
||
2896 | { |
||
2897 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
||
2898 | struct vmw_resource *res; |
||
2899 | struct { |
||
2900 | SVGA3dCmdHeader header; |
||
2901 | SVGA3dCmdDXDefineShader body; |
||
2902 | } *cmd = container_of(header, typeof(*cmd), header); |
||
2903 | int ret; |
||
2904 | |||
2905 | if (!ctx_node) { |
||
2906 | DRM_ERROR("DX Context not set.\n"); |
||
2907 | return -EINVAL; |
||
2908 | } |
||
2909 | |||
2910 | res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER); |
||
2911 | ret = vmw_cotable_notify(res, cmd->body.shaderId); |
||
2912 | vmw_resource_unreference(&res); |
||
2913 | if (ret) |
||
2914 | return ret; |
||
2915 | |||
2916 | return vmw_dx_shader_add(sw_context->man, ctx_node->res, |
||
2917 | cmd->body.shaderId, cmd->body.type, |
||
2918 | &sw_context->staged_cmd_res); |
||
2919 | } |
||
2920 | |||
2921 | /** |
||
2922 | * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER |
||
2923 | * command |
||
2924 | * |
||
2925 | * @dev_priv: Pointer to a device private struct. |
||
2926 | * @sw_context: The software context being used for this batch. |
||
2927 | * @header: Pointer to the command header in the command stream. |
||
2928 | */ |
||
2929 | static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv, |
||
2930 | struct vmw_sw_context *sw_context, |
||
2931 | SVGA3dCmdHeader *header) |
||
2932 | { |
||
2933 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
||
2934 | struct { |
||
2935 | SVGA3dCmdHeader header; |
||
2936 | SVGA3dCmdDXDestroyShader body; |
||
2937 | } *cmd = container_of(header, typeof(*cmd), header); |
||
2938 | int ret; |
||
2939 | |||
2940 | if (!ctx_node) { |
||
2941 | DRM_ERROR("DX Context not set.\n"); |
||
2942 | return -EINVAL; |
||
2943 | } |
||
2944 | |||
2945 | ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0, |
||
2946 | &sw_context->staged_cmd_res); |
||
2947 | if (ret) |
||
2948 | DRM_ERROR("Could not find shader to remove.\n"); |
||
2949 | |||
2950 | return ret; |
||
2951 | } |
||
2952 | |||
2953 | /** |
||
2954 | * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER |
||
2955 | * command |
||
2956 | * |
||
2957 | * @dev_priv: Pointer to a device private struct. |
||
2958 | * @sw_context: The software context being used for this batch. |
||
2959 | * @header: Pointer to the command header in the command stream. |
||
2960 | */ |
||
2961 | static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, |
||
2962 | struct vmw_sw_context *sw_context, |
||
2963 | SVGA3dCmdHeader *header) |
||
2964 | { |
||
2965 | struct vmw_resource_val_node *ctx_node; |
||
2966 | struct vmw_resource_val_node *res_node; |
||
2967 | struct vmw_resource *res; |
||
2968 | struct { |
||
2969 | SVGA3dCmdHeader header; |
||
2970 | SVGA3dCmdDXBindShader body; |
||
2971 | } *cmd = container_of(header, typeof(*cmd), header); |
||
2972 | int ret; |
||
2973 | |||
2974 | if (cmd->body.cid != SVGA3D_INVALID_ID) { |
||
2975 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
||
2976 | user_context_converter, |
||
2977 | &cmd->body.cid, &ctx_node); |
||
2978 | if (ret) |
||
2979 | return ret; |
||
2980 | } else { |
||
2981 | ctx_node = sw_context->dx_ctx_node; |
||
2982 | if (!ctx_node) { |
||
2983 | DRM_ERROR("DX Context not set.\n"); |
||
2984 | return -EINVAL; |
||
2985 | } |
||
2986 | } |
||
2987 | |||
2988 | res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res), |
||
2989 | cmd->body.shid, 0); |
||
2990 | if (IS_ERR(res)) { |
||
2991 | DRM_ERROR("Could not find shader to bind.\n"); |
||
2992 | return PTR_ERR(res); |
||
2993 | } |
||
2994 | |||
2995 | ret = vmw_resource_val_add(sw_context, res, &res_node); |
||
2996 | if (ret) { |
||
2997 | DRM_ERROR("Error creating resource validation node.\n"); |
||
2998 | goto out_unref; |
||
2999 | } |
||
3000 | |||
3001 | |||
3002 | ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node, |
||
3003 | &cmd->body.mobid, |
||
3004 | cmd->body.offsetInBytes); |
||
3005 | out_unref: |
||
3006 | vmw_resource_unreference(&res); |
||
3007 | |||
3008 | return ret; |
||
3009 | } |
||
3010 | |||
4075 | Serge | 3011 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
3012 | struct vmw_sw_context *sw_context, |
||
3013 | void *buf, uint32_t *size) |
||
3014 | { |
||
3015 | uint32_t size_remaining = *size; |
||
3016 | uint32_t cmd_id; |
||
3017 | |||
6296 | serge | 3018 | cmd_id = ((uint32_t *)buf)[0]; |
4075 | Serge | 3019 | switch (cmd_id) { |
3020 | case SVGA_CMD_UPDATE: |
||
3021 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); |
||
3022 | break; |
||
3023 | case SVGA_CMD_DEFINE_GMRFB: |
||
3024 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); |
||
3025 | break; |
||
3026 | case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: |
||
3027 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
||
3028 | break; |
||
3029 | case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: |
||
3030 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
||
3031 | break; |
||
3032 | default: |
||
3033 | DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id); |
||
3034 | return -EINVAL; |
||
3035 | } |
||
3036 | |||
3037 | if (*size > size_remaining) { |
||
3038 | DRM_ERROR("Invalid SVGA command (size mismatch):" |
||
3039 | " %u.\n", cmd_id); |
||
3040 | return -EINVAL; |
||
3041 | } |
||
3042 | |||
3043 | if (unlikely(!sw_context->kernel)) { |
||
3044 | DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id); |
||
3045 | return -EPERM; |
||
3046 | } |
||
3047 | |||
3048 | if (cmd_id == SVGA_CMD_DEFINE_GMRFB) |
||
3049 | return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); |
||
3050 | |||
3051 | return 0; |
||
3052 | } |
||
3053 | |||
5078 | serge | 3054 | static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { |
4569 | Serge | 3055 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, |
3056 | false, false, false), |
||
3057 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, |
||
3058 | false, false, false), |
||
3059 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, |
||
3060 | true, false, false), |
||
3061 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, |
||
3062 | true, false, false), |
||
3063 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, |
||
3064 | true, false, false), |
||
3065 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, |
||
3066 | false, false, false), |
||
3067 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, |
||
3068 | false, false, false), |
||
3069 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, |
||
3070 | true, false, false), |
||
3071 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, |
||
3072 | true, false, false), |
||
3073 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, |
||
3074 | true, false, false), |
||
4075 | Serge | 3075 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, |
4569 | Serge | 3076 | &vmw_cmd_set_render_target_check, true, false, false), |
3077 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, |
||
3078 | true, false, false), |
||
3079 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, |
||
3080 | true, false, false), |
||
3081 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, |
||
3082 | true, false, false), |
||
3083 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, |
||
3084 | true, false, false), |
||
3085 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, |
||
3086 | true, false, false), |
||
3087 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, |
||
3088 | true, false, false), |
||
3089 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, |
||
3090 | true, false, false), |
||
3091 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, |
||
3092 | false, false, false), |
||
5078 | serge | 3093 | // VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, |
3094 | // true, false, false), |
||
3095 | // VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, |
||
3096 | // true, false, false), |
||
3097 | // VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, |
||
3098 | // true, false, false), |
||
3099 | // VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, |
||
3100 | // true, false, false), |
||
4569 | Serge | 3101 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, |
3102 | true, false, false), |
||
3103 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, |
||
3104 | true, false, false), |
||
3105 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, |
||
3106 | true, false, false), |
||
3107 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, |
||
3108 | true, false, false), |
||
3109 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, |
||
3110 | true, false, false), |
||
3111 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, |
||
3112 | true, false, false), |
||
4075 | Serge | 3113 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
4569 | Serge | 3114 | &vmw_cmd_blt_surf_screen_check, false, false, false), |
3115 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, |
||
3116 | false, false, false), |
||
3117 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, |
||
3118 | false, false, false), |
||
3119 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, |
||
3120 | false, false, false), |
||
3121 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, |
||
3122 | false, false, false), |
||
3123 | VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, |
||
3124 | false, false, false), |
||
3125 | VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid, |
||
3126 | false, false, false), |
||
3127 | VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid, |
||
3128 | false, false, false), |
||
3129 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, |
||
3130 | false, false, false), |
||
3131 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid, |
||
3132 | false, false, false), |
||
3133 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid, |
||
3134 | false, false, false), |
||
3135 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid, |
||
3136 | false, false, false), |
||
3137 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid, |
||
3138 | false, false, false), |
||
3139 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid, |
||
3140 | false, false, false), |
||
3141 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, |
||
3142 | false, false, true), |
||
3143 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, |
||
3144 | false, false, true), |
||
3145 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, |
||
3146 | false, false, true), |
||
3147 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, |
||
3148 | false, false, true), |
||
6296 | serge | 3149 | VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid, |
4569 | Serge | 3150 | false, false, true), |
3151 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, |
||
3152 | false, false, true), |
||
3153 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, |
||
3154 | false, false, true), |
||
3155 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, |
||
3156 | false, false, true), |
||
3157 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, |
||
3158 | true, false, true), |
||
3159 | VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, |
||
3160 | false, false, true), |
||
3161 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, |
||
3162 | true, false, true), |
||
3163 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, |
||
3164 | &vmw_cmd_update_gb_surface, true, false, true), |
||
3165 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, |
||
3166 | &vmw_cmd_readback_gb_image, true, false, true), |
||
3167 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, |
||
3168 | &vmw_cmd_readback_gb_surface, true, false, true), |
||
3169 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, |
||
3170 | &vmw_cmd_invalidate_gb_image, true, false, true), |
||
3171 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, |
||
3172 | &vmw_cmd_invalidate_gb_surface, true, false, true), |
||
3173 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, |
||
3174 | false, false, true), |
||
3175 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, |
||
3176 | false, false, true), |
||
3177 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, |
||
3178 | false, false, true), |
||
3179 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, |
||
3180 | false, false, true), |
||
3181 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, |
||
3182 | false, false, true), |
||
3183 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, |
||
3184 | false, false, true), |
||
5078 | serge | 3185 | // VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, |
3186 | // true, false, true), |
||
4569 | Serge | 3187 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, |
3188 | false, false, true), |
||
3189 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, |
||
3190 | false, false, false), |
||
3191 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, |
||
3192 | true, false, true), |
||
3193 | VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, |
||
3194 | true, false, true), |
||
3195 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, |
||
3196 | true, false, true), |
||
3197 | VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, |
||
3198 | true, false, true), |
||
3199 | VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, |
||
3200 | false, false, true), |
||
3201 | VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, |
||
3202 | false, false, true), |
||
3203 | VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, |
||
3204 | false, false, true), |
||
3205 | VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, |
||
3206 | false, false, true), |
||
3207 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, |
||
3208 | false, false, true), |
||
3209 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, |
||
3210 | false, false, true), |
||
3211 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, |
||
3212 | false, false, true), |
||
3213 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, |
||
3214 | false, false, true), |
||
3215 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
||
3216 | false, false, true), |
||
3217 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
||
3218 | false, false, true), |
||
3219 | VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, |
||
3220 | true, false, true) |
||
4075 | Serge | 3221 | }; |
3222 | |||
3223 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
||
3224 | struct vmw_sw_context *sw_context, |
||
3225 | void *buf, uint32_t *size) |
||
3226 | { |
||
3227 | uint32_t cmd_id; |
||
3228 | uint32_t size_remaining = *size; |
||
3229 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; |
||
3230 | int ret; |
||
4569 | Serge | 3231 | const struct vmw_cmd_entry *entry; |
3232 | bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; |
||
4075 | Serge | 3233 | |
6296 | serge | 3234 | cmd_id = ((uint32_t *)buf)[0]; |
4075 | Serge | 3235 | /* Handle any none 3D commands */ |
3236 | if (unlikely(cmd_id < SVGA_CMD_MAX)) |
||
3237 | return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); |
||
3238 | |||
3239 | |||
6296 | serge | 3240 | cmd_id = header->id; |
3241 | *size = header->size + sizeof(SVGA3dCmdHeader); |
||
4075 | Serge | 3242 | |
3243 | cmd_id -= SVGA_3D_CMD_BASE; |
||
3244 | if (unlikely(*size > size_remaining)) |
||
4569 | Serge | 3245 | goto out_invalid; |
4075 | Serge | 3246 | |
3247 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) |
||
4569 | Serge | 3248 | goto out_invalid; |
4075 | Serge | 3249 | |
4569 | Serge | 3250 | entry = &vmw_cmd_entries[cmd_id]; |
5078 | serge | 3251 | if (unlikely(!entry->func)) |
3252 | goto out_invalid; |
||
3253 | |||
4569 | Serge | 3254 | if (unlikely(!entry->user_allow && !sw_context->kernel)) |
3255 | goto out_privileged; |
||
3256 | |||
3257 | if (unlikely(entry->gb_disable && gb)) |
||
3258 | goto out_old; |
||
3259 | |||
3260 | if (unlikely(entry->gb_enable && !gb)) |
||
3261 | goto out_new; |
||
3262 | |||
3263 | ret = entry->func(dev_priv, sw_context, header); |
||
4075 | Serge | 3264 | if (unlikely(ret != 0)) |
4569 | Serge | 3265 | goto out_invalid; |
4075 | Serge | 3266 | |
3267 | return 0; |
||
4569 | Serge | 3268 | out_invalid: |
3269 | DRM_ERROR("Invalid SVGA3D command: %d\n", |
||
4075 | Serge | 3270 | cmd_id + SVGA_3D_CMD_BASE); |
3271 | return -EINVAL; |
||
4569 | Serge | 3272 | out_privileged: |
3273 | DRM_ERROR("Privileged SVGA3D command: %d\n", |
||
3274 | cmd_id + SVGA_3D_CMD_BASE); |
||
3275 | return -EPERM; |
||
3276 | out_old: |
||
3277 | DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n", |
||
3278 | cmd_id + SVGA_3D_CMD_BASE); |
||
3279 | return -EINVAL; |
||
3280 | out_new: |
||
3281 | DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n", |
||
3282 | cmd_id + SVGA_3D_CMD_BASE); |
||
3283 | return -EINVAL; |
||
4075 | Serge | 3284 | } |
3285 | |||
3286 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, |
||
3287 | struct vmw_sw_context *sw_context, |
||
3288 | void *buf, |
||
3289 | uint32_t size) |
||
3290 | { |
||
3291 | int32_t cur_size = size; |
||
3292 | int ret; |
||
3293 | |||
3294 | sw_context->buf_start = buf; |
||
3295 | |||
3296 | while (cur_size > 0) { |
||
3297 | size = cur_size; |
||
3298 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); |
||
3299 | if (unlikely(ret != 0)) |
||
3300 | return ret; |
||
3301 | buf = (void *)((unsigned long) buf + size); |
||
3302 | cur_size -= size; |
||
3303 | } |
||
3304 | |||
3305 | if (unlikely(cur_size != 0)) { |
||
3306 | DRM_ERROR("Command verifier out of sync.\n"); |
||
3307 | return -EINVAL; |
||
3308 | } |
||
3309 | |||
3310 | return 0; |
||
3311 | } |
||
3312 | |||
3313 | static void vmw_free_relocations(struct vmw_sw_context *sw_context) |
||
3314 | { |
||
3315 | sw_context->cur_reloc = 0; |
||
3316 | } |
||
3317 | |||
3318 | static void vmw_apply_relocations(struct vmw_sw_context *sw_context) |
||
3319 | { |
||
3320 | uint32_t i; |
||
3321 | struct vmw_relocation *reloc; |
||
3322 | struct ttm_validate_buffer *validate; |
||
3323 | struct ttm_buffer_object *bo; |
||
3324 | |||
3325 | for (i = 0; i < sw_context->cur_reloc; ++i) { |
||
3326 | reloc = &sw_context->relocs[i]; |
||
3327 | validate = &sw_context->val_bufs[reloc->index].base; |
||
3328 | bo = validate->bo; |
||
3329 | switch (bo->mem.mem_type) { |
||
3330 | case TTM_PL_VRAM: |
||
3331 | reloc->location->offset += bo->offset; |
||
3332 | reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; |
||
3333 | break; |
||
3334 | case VMW_PL_GMR: |
||
3335 | reloc->location->gmrId = bo->mem.start; |
||
3336 | break; |
||
4569 | Serge | 3337 | case VMW_PL_MOB: |
3338 | *reloc->mob_loc = bo->mem.start; |
||
3339 | break; |
||
4075 | Serge | 3340 | default: |
3341 | BUG(); |
||
3342 | } |
||
3343 | } |
||
3344 | vmw_free_relocations(sw_context); |
||
3345 | } |
||
3346 | |||
3347 | /** |
||
3348 | * vmw_resource_list_unrefererence - Free up a resource list and unreference |
||
3349 | * all resources referenced by it. |
||
3350 | * |
||
3351 | * @list: The resource list. |
||
3352 | */ |
||
6296 | serge | 3353 | static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context, |
3354 | struct list_head *list) |
||
4075 | Serge | 3355 | { |
3356 | struct vmw_resource_val_node *val, *val_next; |
||
3357 | |||
3358 | /* |
||
3359 | * Drop references to resources held during command submission. |
||
3360 | */ |
||
3361 | |||
3362 | list_for_each_entry_safe(val, val_next, list, head) { |
||
3363 | list_del_init(&val->head); |
||
3364 | vmw_resource_unreference(&val->res); |
||
6296 | serge | 3365 | |
3366 | if (val->staged_bindings) { |
||
3367 | if (val->staged_bindings != sw_context->staged_bindings) |
||
3368 | vmw_binding_state_free(val->staged_bindings); |
||
3369 | else |
||
3370 | sw_context->staged_bindings_inuse = false; |
||
3371 | val->staged_bindings = NULL; |
||
3372 | } |
||
3373 | |||
4075 | Serge | 3374 | kfree(val); |
3375 | } |
||
3376 | } |
||
3377 | |||
3378 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) |
||
3379 | { |
||
3380 | struct vmw_validate_buffer *entry, *next; |
||
3381 | struct vmw_resource_val_node *val; |
||
3382 | |||
3383 | /* |
||
3384 | * Drop references to DMA buffers held during command submission. |
||
3385 | */ |
||
3386 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, |
||
3387 | base.head) { |
||
3388 | list_del(&entry->base.head); |
||
3389 | ttm_bo_unref(&entry->base.bo); |
||
3390 | (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash); |
||
3391 | sw_context->cur_val_buf--; |
||
3392 | } |
||
3393 | BUG_ON(sw_context->cur_val_buf != 0); |
||
3394 | |||
3395 | list_for_each_entry(val, &sw_context->resource_list, head) |
||
3396 | (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); |
||
3397 | } |
||
3398 | |||
6296 | serge | 3399 | int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
3400 | struct ttm_buffer_object *bo, |
||
3401 | bool interruptible, |
||
3402 | bool validate_as_mob) |
||
4075 | Serge | 3403 | { |
6296 | serge | 3404 | struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer, |
3405 | base); |
||
4075 | Serge | 3406 | int ret; |
3407 | |||
6296 | serge | 3408 | if (vbo->pin_count > 0) |
4075 | Serge | 3409 | return 0; |
3410 | |||
4569 | Serge | 3411 | if (validate_as_mob) |
6296 | serge | 3412 | return ttm_bo_validate(bo, &vmw_mob_placement, interruptible, |
3413 | false); |
||
4569 | Serge | 3414 | |
4075 | Serge | 3415 | /** |
3416 | * Put BO in VRAM if there is space, otherwise as a GMR. |
||
3417 | * If there is no space in VRAM and GMR ids are all used up, |
||
3418 | * start evicting GMRs to make room. If the DMA buffer can't be |
||
3419 | * used as a GMR, this will return -ENOMEM. |
||
3420 | */ |
||
3421 | |||
6296 | serge | 3422 | ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible, |
3423 | false); |
||
4075 | Serge | 3424 | if (likely(ret == 0 || ret == -ERESTARTSYS)) |
3425 | return ret; |
||
3426 | |||
3427 | /** |
||
3428 | * If that failed, try VRAM again, this time evicting |
||
3429 | * previous contents. |
||
3430 | */ |
||
3431 | |||
6296 | serge | 3432 | ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false); |
4075 | Serge | 3433 | return ret; |
3434 | } |
||
3435 | |||
3436 | static int vmw_validate_buffers(struct vmw_private *dev_priv, |
||
3437 | struct vmw_sw_context *sw_context) |
||
3438 | { |
||
3439 | struct vmw_validate_buffer *entry; |
||
3440 | int ret; |
||
3441 | |||
3442 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { |
||
4569 | Serge | 3443 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, |
6296 | serge | 3444 | true, |
4569 | Serge | 3445 | entry->validate_as_mob); |
4075 | Serge | 3446 | if (unlikely(ret != 0)) |
3447 | return ret; |
||
3448 | } |
||
3449 | return 0; |
||
3450 | } |
||
3451 | |||
3452 | static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, |
||
3453 | uint32_t size) |
||
3454 | { |
||
3455 | if (likely(sw_context->cmd_bounce_size >= size)) |
||
3456 | return 0; |
||
3457 | |||
3458 | if (sw_context->cmd_bounce_size == 0) |
||
3459 | sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; |
||
3460 | |||
3461 | while (sw_context->cmd_bounce_size < size) { |
||
3462 | sw_context->cmd_bounce_size = |
||
3463 | PAGE_ALIGN(sw_context->cmd_bounce_size + |
||
3464 | (sw_context->cmd_bounce_size >> 1)); |
||
3465 | } |
||
3466 | |||
3467 | if (sw_context->cmd_bounce != NULL) |
||
3468 | vfree(sw_context->cmd_bounce); |
||
3469 | |||
4569 | Serge | 3470 | sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); |
4075 | Serge | 3471 | |
3472 | if (sw_context->cmd_bounce == NULL) { |
||
3473 | DRM_ERROR("Failed to allocate command bounce buffer.\n"); |
||
3474 | sw_context->cmd_bounce_size = 0; |
||
3475 | return -ENOMEM; |
||
3476 | } |
||
3477 | |||
3478 | return 0; |
||
3479 | } |
||
3480 | |||
3481 | /** |
||
3482 | * vmw_execbuf_fence_commands - create and submit a command stream fence |
||
3483 | * |
||
3484 | * Creates a fence object and submits a command stream marker. |
||
3485 | * If this fails for some reason, We sync the fifo and return NULL. |
||
3486 | * It is then safe to fence buffers with a NULL pointer. |
||
3487 | * |
||
3488 | * If @p_handle is not NULL @file_priv must also not be NULL. Creates |
||
3489 | * a userspace handle if @p_handle is not NULL, otherwise not. |
||
3490 | */ |
||
3491 | |||
3492 | int vmw_execbuf_fence_commands(struct drm_file *file_priv, |
||
3493 | struct vmw_private *dev_priv, |
||
3494 | struct vmw_fence_obj **p_fence, |
||
3495 | uint32_t *p_handle) |
||
3496 | { |
||
3497 | uint32_t sequence; |
||
3498 | int ret; |
||
3499 | bool synced = false; |
||
3500 | |||
3501 | /* p_handle implies file_priv. */ |
||
3502 | BUG_ON(p_handle != NULL && file_priv == NULL); |
||
3503 | |||
3504 | ret = vmw_fifo_send_fence(dev_priv, &sequence); |
||
3505 | if (unlikely(ret != 0)) { |
||
3506 | DRM_ERROR("Fence submission error. Syncing.\n"); |
||
3507 | synced = true; |
||
3508 | } |
||
3509 | |||
3510 | if (p_handle != NULL) |
||
3511 | ret = vmw_user_fence_create(file_priv, dev_priv->fman, |
||
6296 | serge | 3512 | sequence, p_fence, p_handle); |
4075 | Serge | 3513 | else |
6296 | serge | 3514 | ret = vmw_fence_create(dev_priv->fman, sequence, p_fence); |
4075 | Serge | 3515 | |
3516 | if (unlikely(ret != 0 && !synced)) { |
||
3517 | (void) vmw_fallback_wait(dev_priv, false, false, |
||
3518 | sequence, false, |
||
3519 | VMW_FENCE_WAIT_TIMEOUT); |
||
3520 | *p_fence = NULL; |
||
3521 | } |
||
3522 | |||
3523 | return 0; |
||
3524 | } |
||
3525 | |||
3526 | /** |
||
3527 | * vmw_execbuf_copy_fence_user - copy fence object information to |
||
3528 | * user-space. |
||
3529 | * |
||
3530 | * @dev_priv: Pointer to a vmw_private struct. |
||
3531 | * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. |
||
3532 | * @ret: Return value from fence object creation. |
||
3533 | * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to |
||
3534 | * which the information should be copied. |
||
3535 | * @fence: Pointer to the fenc object. |
||
3536 | * @fence_handle: User-space fence handle. |
||
3537 | * |
||
3538 | * This function copies fence information to user-space. If copying fails, |
||
3539 | * The user-space struct drm_vmw_fence_rep::error member is hopefully |
||
3540 | * left untouched, and if it's preloaded with an -EFAULT by user-space, |
||
3541 | * the error will hopefully be detected. |
||
3542 | * Also if copying fails, user-space will be unable to signal the fence |
||
3543 | * object so we wait for it immediately, and then unreference the |
||
3544 | * user-space reference. |
||
3545 | */ |
||
3546 | void |
||
3547 | vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, |
||
3548 | struct vmw_fpriv *vmw_fp, |
||
3549 | int ret, |
||
3550 | struct drm_vmw_fence_rep __user *user_fence_rep, |
||
3551 | struct vmw_fence_obj *fence, |
||
3552 | uint32_t fence_handle) |
||
3553 | { |
||
3554 | struct drm_vmw_fence_rep fence_rep; |
||
3555 | |||
3556 | if (user_fence_rep == NULL) |
||
3557 | return; |
||
3558 | |||
3559 | memset(&fence_rep, 0, sizeof(fence_rep)); |
||
3560 | |||
3561 | fence_rep.error = ret; |
||
3562 | if (ret == 0) { |
||
3563 | BUG_ON(fence == NULL); |
||
3564 | |||
3565 | fence_rep.handle = fence_handle; |
||
6296 | serge | 3566 | fence_rep.seqno = fence->base.seqno; |
4075 | Serge | 3567 | vmw_update_seqno(dev_priv, &dev_priv->fifo); |
3568 | fence_rep.passed_seqno = dev_priv->last_read_seqno; |
||
3569 | } |
||
3570 | |||
3571 | /* |
||
3572 | * copy_to_user errors will be detected by user space not |
||
3573 | * seeing fence_rep::error filled in. Typically |
||
3574 | * user-space would have pre-set that member to -EFAULT. |
||
3575 | */ |
||
6296 | serge | 3576 | ret = copy_to_user(user_fence_rep, &fence_rep, |
3577 | sizeof(fence_rep)); |
||
4075 | Serge | 3578 | |
3579 | /* |
||
3580 | * User-space lost the fence object. We need to sync |
||
3581 | * and unreference the handle. |
||
3582 | */ |
||
3583 | if (unlikely(ret != 0) && (fence_rep.error == 0)) { |
||
3584 | ttm_ref_object_base_unref(vmw_fp->tfile, |
||
3585 | fence_handle, TTM_REF_USAGE); |
||
3586 | DRM_ERROR("Fence copy error. Syncing.\n"); |
||
6296 | serge | 3587 | (void) vmw_fence_obj_wait(fence, false, false, |
4075 | Serge | 3588 | VMW_FENCE_WAIT_TIMEOUT); |
3589 | } |
||
3590 | } |
||
3591 | |||
6296 | serge | 3592 | /** |
3593 | * vmw_execbuf_submit_fifo - Patch a command batch and submit it using |
||
3594 | * the fifo. |
||
3595 | * |
||
3596 | * @dev_priv: Pointer to a device private structure. |
||
3597 | * @kernel_commands: Pointer to the unpatched command batch. |
||
3598 | * @command_size: Size of the unpatched command batch. |
||
3599 | * @sw_context: Structure holding the relocation lists. |
||
3600 | * |
||
3601 | * Side effects: If this function returns 0, then the command batch |
||
3602 | * pointed to by @kernel_commands will have been modified. |
||
3603 | */ |
||
3604 | static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, |
||
3605 | void *kernel_commands, |
||
3606 | u32 command_size, |
||
3607 | struct vmw_sw_context *sw_context) |
||
3608 | { |
||
3609 | void *cmd; |
||
5078 | serge | 3610 | |
6296 | serge | 3611 | if (sw_context->dx_ctx_node) |
3612 | cmd = vmw_fifo_reserve_dx(dev_priv, command_size, |
||
3613 | sw_context->dx_ctx_node->res->id); |
||
3614 | else |
||
3615 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
||
3616 | if (!cmd) { |
||
3617 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
||
3618 | return -ENOMEM; |
||
3619 | } |
||
5078 | serge | 3620 | |
6296 | serge | 3621 | vmw_apply_relocations(sw_context); |
3622 | memcpy(cmd, kernel_commands, command_size); |
||
3623 | vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); |
||
3624 | vmw_resource_relocations_free(&sw_context->res_relocations); |
||
3625 | vmw_fifo_commit(dev_priv, command_size); |
||
3626 | |||
3627 | return 0; |
||
3628 | } |
||
3629 | |||
3630 | /** |
||
3631 | * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using |
||
3632 | * the command buffer manager. |
||
3633 | * |
||
3634 | * @dev_priv: Pointer to a device private structure. |
||
3635 | * @header: Opaque handle to the command buffer allocation. |
||
3636 | * @command_size: Size of the unpatched command batch. |
||
3637 | * @sw_context: Structure holding the relocation lists. |
||
3638 | * |
||
3639 | * Side effects: If this function returns 0, then the command buffer |
||
3640 | * represented by @header will have been modified. |
||
3641 | */ |
||
3642 | static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, |
||
3643 | struct vmw_cmdbuf_header *header, |
||
3644 | u32 command_size, |
||
3645 | struct vmw_sw_context *sw_context) |
||
3646 | { |
||
3647 | u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id : |
||
3648 | SVGA3D_INVALID_ID); |
||
3649 | void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, |
||
3650 | id, false, header); |
||
3651 | |||
3652 | vmw_apply_relocations(sw_context); |
||
3653 | vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); |
||
3654 | vmw_resource_relocations_free(&sw_context->res_relocations); |
||
3655 | vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false); |
||
3656 | |||
3657 | return 0; |
||
3658 | } |
||
3659 | |||
3660 | /** |
||
3661 | * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for |
||
3662 | * submission using a command buffer. |
||
3663 | * |
||
3664 | * @dev_priv: Pointer to a device private structure. |
||
3665 | * @user_commands: User-space pointer to the commands to be submitted. |
||
3666 | * @command_size: Size of the unpatched command batch. |
||
3667 | * @header: Out parameter returning the opaque pointer to the command buffer. |
||
3668 | * |
||
3669 | * This function checks whether we can use the command buffer manager for |
||
3670 | * submission and if so, creates a command buffer of suitable size and |
||
3671 | * copies the user data into that buffer. |
||
3672 | * |
||
3673 | * On successful return, the function returns a pointer to the data in the |
||
3674 | * command buffer and *@header is set to non-NULL. |
||
3675 | * If command buffers could not be used, the function will return the value |
||
3676 | * of @kernel_commands on function call. That value may be NULL. In that case, |
||
3677 | * the value of *@header will be set to NULL. |
||
3678 | * If an error is encountered, the function will return a pointer error value. |
||
3679 | * If the function is interrupted by a signal while sleeping, it will return |
||
3680 | * -ERESTARTSYS casted to a pointer error value. |
||
3681 | */ |
||
3682 | static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, |
||
3683 | void __user *user_commands, |
||
3684 | void *kernel_commands, |
||
3685 | u32 command_size, |
||
3686 | struct vmw_cmdbuf_header **header) |
||
3687 | { |
||
3688 | size_t cmdbuf_size; |
||
3689 | int ret; |
||
3690 | |||
3691 | *header = NULL; |
||
3692 | if (!dev_priv->cman || kernel_commands) |
||
3693 | return kernel_commands; |
||
3694 | |||
3695 | if (command_size > SVGA_CB_MAX_SIZE) { |
||
3696 | DRM_ERROR("Command buffer is too large.\n"); |
||
3697 | return ERR_PTR(-EINVAL); |
||
3698 | } |
||
3699 | |||
3700 | /* If possible, add a little space for fencing. */ |
||
3701 | cmdbuf_size = command_size + 512; |
||
3702 | cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); |
||
3703 | kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, |
||
3704 | true, header); |
||
3705 | if (IS_ERR(kernel_commands)) |
||
3706 | return kernel_commands; |
||
3707 | |||
3708 | ret = copy_from_user(kernel_commands, user_commands, |
||
3709 | command_size); |
||
3710 | if (ret) { |
||
3711 | DRM_ERROR("Failed copying commands.\n"); |
||
3712 | vmw_cmdbuf_header_free(*header); |
||
3713 | *header = NULL; |
||
3714 | return ERR_PTR(-EFAULT); |
||
3715 | } |
||
3716 | |||
3717 | return kernel_commands; |
||
3718 | } |
||
3719 | |||
3720 | static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, |
||
3721 | struct vmw_sw_context *sw_context, |
||
3722 | uint32_t handle) |
||
3723 | { |
||
3724 | struct vmw_resource_val_node *ctx_node; |
||
3725 | struct vmw_resource *res; |
||
3726 | int ret; |
||
3727 | |||
3728 | if (handle == SVGA3D_INVALID_ID) |
||
3729 | return 0; |
||
3730 | |||
3731 | ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile, |
||
3732 | handle, user_context_converter, |
||
3733 | &res); |
||
3734 | if (unlikely(ret != 0)) { |
||
3735 | DRM_ERROR("Could not find or user DX context 0x%08x.\n", |
||
3736 | (unsigned) handle); |
||
3737 | return ret; |
||
3738 | } |
||
3739 | |||
3740 | ret = vmw_resource_val_add(sw_context, res, &ctx_node); |
||
3741 | if (unlikely(ret != 0)) |
||
3742 | goto out_err; |
||
3743 | |||
3744 | sw_context->dx_ctx_node = ctx_node; |
||
3745 | sw_context->man = vmw_context_res_man(res); |
||
3746 | out_err: |
||
3747 | vmw_resource_unreference(&res); |
||
3748 | return ret; |
||
3749 | } |
||
3750 | |||
4075 | Serge | 3751 | int vmw_execbuf_process(struct drm_file *file_priv, |
3752 | struct vmw_private *dev_priv, |
||
3753 | void __user *user_commands, |
||
3754 | void *kernel_commands, |
||
3755 | uint32_t command_size, |
||
3756 | uint64_t throttle_us, |
||
6296 | serge | 3757 | uint32_t dx_context_handle, |
4075 | Serge | 3758 | struct drm_vmw_fence_rep __user *user_fence_rep, |
3759 | struct vmw_fence_obj **out_fence) |
||
3760 | { |
||
3761 | struct vmw_sw_context *sw_context = &dev_priv->ctx; |
||
3762 | struct vmw_fence_obj *fence = NULL; |
||
3763 | struct vmw_resource *error_resource; |
||
3764 | struct list_head resource_list; |
||
6296 | serge | 3765 | struct vmw_cmdbuf_header *header; |
4075 | Serge | 3766 | struct ww_acquire_ctx ticket; |
3767 | uint32_t handle; |
||
3768 | int ret; |
||
3769 | |||
6296 | serge | 3770 | if (throttle_us) { |
3771 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, |
||
3772 | throttle_us); |
||
3773 | |||
3774 | if (ret) |
||
3775 | return ret; |
||
3776 | } |
||
3777 | |||
3778 | kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands, |
||
3779 | kernel_commands, command_size, |
||
3780 | &header); |
||
3781 | if (IS_ERR(kernel_commands)) |
||
3782 | return PTR_ERR(kernel_commands); |
||
3783 | |||
4075 | Serge | 3784 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); |
6296 | serge | 3785 | if (ret) { |
3786 | ret = -ERESTARTSYS; |
||
3787 | goto out_free_header; |
||
3788 | } |
||
4075 | Serge | 3789 | |
6296 | serge | 3790 | sw_context->kernel = false; |
4075 | Serge | 3791 | if (kernel_commands == NULL) { |
3792 | ret = vmw_resize_cmd_bounce(sw_context, command_size); |
||
3793 | if (unlikely(ret != 0)) |
||
3794 | goto out_unlock; |
||
3795 | |||
3796 | |||
3797 | ret = copy_from_user(sw_context->cmd_bounce, |
||
3798 | user_commands, command_size); |
||
3799 | |||
3800 | if (unlikely(ret != 0)) { |
||
3801 | ret = -EFAULT; |
||
3802 | DRM_ERROR("Failed copying commands.\n"); |
||
3803 | goto out_unlock; |
||
3804 | } |
||
3805 | kernel_commands = sw_context->cmd_bounce; |
||
6296 | serge | 3806 | } else if (!header) |
4075 | Serge | 3807 | sw_context->kernel = true; |
3808 | |||
5078 | serge | 3809 | sw_context->fp = vmw_fpriv(file_priv); |
4075 | Serge | 3810 | sw_context->cur_reloc = 0; |
3811 | sw_context->cur_val_buf = 0; |
||
3812 | INIT_LIST_HEAD(&sw_context->resource_list); |
||
6296 | serge | 3813 | INIT_LIST_HEAD(&sw_context->ctx_resource_list); |
4075 | Serge | 3814 | sw_context->cur_query_bo = dev_priv->pinned_bo; |
3815 | sw_context->last_query_ctx = NULL; |
||
3816 | sw_context->needs_post_query_barrier = false; |
||
6296 | serge | 3817 | sw_context->dx_ctx_node = NULL; |
3818 | sw_context->dx_query_mob = NULL; |
||
3819 | sw_context->dx_query_ctx = NULL; |
||
4075 | Serge | 3820 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); |
3821 | INIT_LIST_HEAD(&sw_context->validate_nodes); |
||
3822 | INIT_LIST_HEAD(&sw_context->res_relocations); |
||
6296 | serge | 3823 | if (sw_context->staged_bindings) |
3824 | vmw_binding_state_reset(sw_context->staged_bindings); |
||
3825 | |||
4075 | Serge | 3826 | if (!sw_context->res_ht_initialized) { |
3827 | ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); |
||
3828 | if (unlikely(ret != 0)) |
||
3829 | goto out_unlock; |
||
3830 | sw_context->res_ht_initialized = true; |
||
3831 | } |
||
5078 | serge | 3832 | INIT_LIST_HEAD(&sw_context->staged_cmd_res); |
6296 | serge | 3833 | INIT_LIST_HEAD(&resource_list); |
3834 | ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle); |
||
3835 | if (unlikely(ret != 0)) { |
||
3836 | list_splice_init(&sw_context->ctx_resource_list, |
||
3837 | &sw_context->resource_list); |
||
3838 | goto out_err_nores; |
||
3839 | } |
||
4075 | Serge | 3840 | |
3841 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
||
3842 | command_size); |
||
6296 | serge | 3843 | /* |
3844 | * Merge the resource lists before checking the return status |
||
3845 | * from vmd_cmd_check_all so that all the open hashtabs will |
||
3846 | * be handled properly even if vmw_cmd_check_all fails. |
||
3847 | */ |
||
3848 | list_splice_init(&sw_context->ctx_resource_list, |
||
3849 | &sw_context->resource_list); |
||
3850 | |||
4075 | Serge | 3851 | if (unlikely(ret != 0)) |
5078 | serge | 3852 | goto out_err_nores; |
4075 | Serge | 3853 | |
3854 | ret = vmw_resources_reserve(sw_context); |
||
3855 | if (unlikely(ret != 0)) |
||
5078 | serge | 3856 | goto out_err_nores; |
4075 | Serge | 3857 | |
6296 | serge | 3858 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, |
3859 | true, NULL); |
||
4075 | Serge | 3860 | if (unlikely(ret != 0)) |
6296 | serge | 3861 | goto out_err_nores; |
4075 | Serge | 3862 | |
3863 | ret = vmw_validate_buffers(dev_priv, sw_context); |
||
3864 | if (unlikely(ret != 0)) |
||
3865 | goto out_err; |
||
3866 | |||
3867 | ret = vmw_resources_validate(sw_context); |
||
3868 | if (unlikely(ret != 0)) |
||
3869 | goto out_err; |
||
3870 | |||
4569 | Serge | 3871 | ret = mutex_lock_interruptible(&dev_priv->binding_mutex); |
3872 | if (unlikely(ret != 0)) { |
||
3873 | ret = -ERESTARTSYS; |
||
3874 | goto out_err; |
||
3875 | } |
||
3876 | |||
5078 | serge | 3877 | if (dev_priv->has_mob) { |
3878 | ret = vmw_rebind_contexts(sw_context); |
||
3879 | if (unlikely(ret != 0)) |
||
3880 | goto out_unlock_binding; |
||
3881 | } |
||
3882 | |||
6296 | serge | 3883 | if (!header) { |
3884 | ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands, |
||
3885 | command_size, sw_context); |
||
3886 | } else { |
||
3887 | ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size, |
||
3888 | sw_context); |
||
3889 | header = NULL; |
||
4075 | Serge | 3890 | } |
6296 | serge | 3891 | mutex_unlock(&dev_priv->binding_mutex); |
3892 | if (ret) |
||
3893 | goto out_err; |
||
4075 | Serge | 3894 | |
3895 | vmw_query_bo_switch_commit(dev_priv, sw_context); |
||
3896 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, |
||
3897 | &fence, |
||
3898 | (user_fence_rep) ? &handle : NULL); |
||
3899 | /* |
||
3900 | * This error is harmless, because if fence submission fails, |
||
3901 | * vmw_fifo_send_fence will sync. The error will be propagated to |
||
3902 | * user-space in @fence_rep |
||
3903 | */ |
||
3904 | |||
3905 | if (ret != 0) |
||
3906 | DRM_ERROR("Fence submission error. Syncing.\n"); |
||
3907 | |||
6296 | serge | 3908 | vmw_resources_unreserve(sw_context, false); |
4569 | Serge | 3909 | |
4075 | Serge | 3910 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, |
3911 | (void *) fence); |
||
3912 | |||
3913 | if (unlikely(dev_priv->pinned_bo != NULL && |
||
3914 | !dev_priv->query_cid_valid)) |
||
3915 | __vmw_execbuf_release_pinned_bo(dev_priv, fence); |
||
3916 | |||
3917 | vmw_clear_validations(sw_context); |
||
3918 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, |
||
3919 | user_fence_rep, fence, handle); |
||
3920 | |||
3921 | /* Don't unreference when handing fence out */ |
||
3922 | if (unlikely(out_fence != NULL)) { |
||
3923 | *out_fence = fence; |
||
3924 | fence = NULL; |
||
3925 | } else if (likely(fence != NULL)) { |
||
3926 | vmw_fence_obj_unreference(&fence); |
||
3927 | } |
||
3928 | |||
3929 | list_splice_init(&sw_context->resource_list, &resource_list); |
||
5078 | serge | 3930 | vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res); |
4075 | Serge | 3931 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
3932 | |||
3933 | /* |
||
3934 | * Unreference resources outside of the cmdbuf_mutex to |
||
3935 | * avoid deadlocks in resource destruction paths. |
||
3936 | */ |
||
6296 | serge | 3937 | vmw_resource_list_unreference(sw_context, &resource_list); |
4075 | Serge | 3938 | |
3939 | return 0; |
||
3940 | |||
4569 | Serge | 3941 | out_unlock_binding: |
3942 | mutex_unlock(&dev_priv->binding_mutex); |
||
4075 | Serge | 3943 | out_err: |
5078 | serge | 3944 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); |
3945 | out_err_nores: |
||
6296 | serge | 3946 | vmw_resources_unreserve(sw_context, true); |
4075 | Serge | 3947 | vmw_resource_relocations_free(&sw_context->res_relocations); |
3948 | vmw_free_relocations(sw_context); |
||
3949 | vmw_clear_validations(sw_context); |
||
3950 | if (unlikely(dev_priv->pinned_bo != NULL && |
||
3951 | !dev_priv->query_cid_valid)) |
||
3952 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
||
3953 | out_unlock: |
||
3954 | list_splice_init(&sw_context->resource_list, &resource_list); |
||
3955 | error_resource = sw_context->error_resource; |
||
3956 | sw_context->error_resource = NULL; |
||
5078 | serge | 3957 | vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); |
4075 | Serge | 3958 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
3959 | |||
3960 | /* |
||
3961 | * Unreference resources outside of the cmdbuf_mutex to |
||
3962 | * avoid deadlocks in resource destruction paths. |
||
3963 | */ |
||
6296 | serge | 3964 | vmw_resource_list_unreference(sw_context, &resource_list); |
4075 | Serge | 3965 | if (unlikely(error_resource != NULL)) |
3966 | vmw_resource_unreference(&error_resource); |
||
6296 | serge | 3967 | out_free_header: |
3968 | if (header) |
||
3969 | vmw_cmdbuf_header_free(header); |
||
4075 | Serge | 3970 | |
3971 | return ret; |
||
3972 | } |
||
3973 | |||
3974 | /** |
||
3975 | * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. |
||
3976 | * |
||
3977 | * @dev_priv: The device private structure. |
||
3978 | * |
||
3979 | * This function is called to idle the fifo and unpin the query buffer |
||
3980 | * if the normal way to do this hits an error, which should typically be |
||
3981 | * extremely rare. |
||
3982 | */ |
||
3983 | static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) |
||
3984 | { |
||
3985 | DRM_ERROR("Can't unpin query buffer. Trying to recover.\n"); |
||
3986 | |||
3987 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); |
||
6296 | serge | 3988 | vmw_bo_pin_reserved(dev_priv->pinned_bo, false); |
3989 | if (dev_priv->dummy_query_bo_pinned) { |
||
3990 | vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false); |
||
3991 | dev_priv->dummy_query_bo_pinned = false; |
||
3992 | } |
||
4075 | Serge | 3993 | } |
3994 | |||
3995 | |||
3996 | /** |
||
3997 | * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
||
3998 | * query bo. |
||
3999 | * |
||
4000 | * @dev_priv: The device private structure. |
||
4001 | * @fence: If non-NULL should point to a struct vmw_fence_obj issued |
||
4002 | * _after_ a query barrier that flushes all queries touching the current |
||
4003 | * buffer pointed to by @dev_priv->pinned_bo |
||
4004 | * |
||
4005 | * This function should be used to unpin the pinned query bo, or |
||
4006 | * as a query barrier when we need to make sure that all queries have |
||
4007 | * finished before the next fifo command. (For example on hardware |
||
4008 | * context destructions where the hardware may otherwise leak unfinished |
||
4009 | * queries). |
||
4010 | * |
||
4011 | * This function does not return any failure codes, but make attempts |
||
4012 | * to do safe unpinning in case of errors. |
||
4013 | * |
||
4014 | * The function will synchronize on the previous query barrier, and will |
||
4015 | * thus not finish until that barrier has executed. |
||
4016 | * |
||
4017 | * the @dev_priv->cmdbuf_mutex needs to be held by the current thread |
||
4018 | * before calling this function. |
||
4019 | */ |
||
4020 | void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, |
||
4021 | struct vmw_fence_obj *fence) |
||
4022 | { |
||
4023 | int ret = 0; |
||
4024 | struct list_head validate_list; |
||
4025 | struct ttm_validate_buffer pinned_val, query_val; |
||
4026 | struct vmw_fence_obj *lfence = NULL; |
||
4027 | struct ww_acquire_ctx ticket; |
||
4028 | |||
4029 | if (dev_priv->pinned_bo == NULL) |
||
4030 | goto out_unlock; |
||
4031 | |||
4032 | INIT_LIST_HEAD(&validate_list); |
||
4033 | |||
6296 | serge | 4034 | pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base); |
4035 | pinned_val.shared = false; |
||
4075 | Serge | 4036 | list_add_tail(&pinned_val.head, &validate_list); |
4037 | |||
6296 | serge | 4038 | query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base); |
4039 | query_val.shared = false; |
||
4075 | Serge | 4040 | list_add_tail(&query_val.head, &validate_list); |
4041 | |||
6296 | serge | 4042 | ret = ttm_eu_reserve_buffers(&ticket, &validate_list, |
4043 | false, NULL); |
||
4075 | Serge | 4044 | if (unlikely(ret != 0)) { |
4045 | vmw_execbuf_unpin_panic(dev_priv); |
||
4046 | goto out_no_reserve; |
||
4047 | } |
||
4048 | |||
4049 | if (dev_priv->query_cid_valid) { |
||
4050 | BUG_ON(fence != NULL); |
||
4051 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); |
||
4052 | if (unlikely(ret != 0)) { |
||
4053 | vmw_execbuf_unpin_panic(dev_priv); |
||
4054 | goto out_no_emit; |
||
4055 | } |
||
4056 | dev_priv->query_cid_valid = false; |
||
4057 | } |
||
4058 | |||
6296 | serge | 4059 | vmw_bo_pin_reserved(dev_priv->pinned_bo, false); |
4060 | if (dev_priv->dummy_query_bo_pinned) { |
||
4061 | vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false); |
||
4062 | dev_priv->dummy_query_bo_pinned = false; |
||
4063 | } |
||
4075 | Serge | 4064 | if (fence == NULL) { |
4065 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, |
||
4066 | NULL); |
||
4067 | fence = lfence; |
||
4068 | } |
||
4069 | ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence); |
||
4070 | if (lfence != NULL) |
||
4071 | vmw_fence_obj_unreference(&lfence); |
||
4072 | |||
4073 | ttm_bo_unref(&query_val.bo); |
||
4074 | ttm_bo_unref(&pinned_val.bo); |
||
6296 | serge | 4075 | vmw_dmabuf_unreference(&dev_priv->pinned_bo); |
4076 | DRM_INFO("Dummy query bo pin count: %d\n", |
||
4077 | dev_priv->dummy_query_bo->pin_count); |
||
4075 | Serge | 4078 | |
4079 | out_unlock: |
||
4080 | return; |
||
4081 | |||
4082 | out_no_emit: |
||
4083 | ttm_eu_backoff_reservation(&ticket, &validate_list); |
||
4084 | out_no_reserve: |
||
4085 | ttm_bo_unref(&query_val.bo); |
||
4086 | ttm_bo_unref(&pinned_val.bo); |
||
6296 | serge | 4087 | vmw_dmabuf_unreference(&dev_priv->pinned_bo); |
4075 | Serge | 4088 | } |
4089 | |||
4090 | /** |
||
4091 | * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
||
4092 | * query bo. |
||
4093 | * |
||
4094 | * @dev_priv: The device private structure. |
||
4095 | * |
||
4096 | * This function should be used to unpin the pinned query bo, or |
||
4097 | * as a query barrier when we need to make sure that all queries have |
||
4098 | * finished before the next fifo command. (For example on hardware |
||
4099 | * context destructions where the hardware may otherwise leak unfinished |
||
4100 | * queries). |
||
4101 | * |
||
4102 | * This function does not return any failure codes, but make attempts |
||
4103 | * to do safe unpinning in case of errors. |
||
4104 | * |
||
4105 | * The function will synchronize on the previous query barrier, and will |
||
4106 | * thus not finish until that barrier has executed. |
||
4107 | */ |
||
4108 | void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) |
||
4109 | { |
||
4110 | mutex_lock(&dev_priv->cmdbuf_mutex); |
||
4111 | if (dev_priv->query_cid_valid) |
||
4112 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
||
4113 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
||
4114 | } |
||
4115 | |||
6296 | serge | 4116 | int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, |
4117 | struct drm_file *file_priv, size_t size) |
||
4075 | Serge | 4118 | { |
4119 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
6296 | serge | 4120 | struct drm_vmw_execbuf_arg arg; |
4075 | Serge | 4121 | int ret; |
6296 | serge | 4122 | static const size_t copy_offset[] = { |
4123 | offsetof(struct drm_vmw_execbuf_arg, context_handle), |
||
4124 | sizeof(struct drm_vmw_execbuf_arg)}; |
||
4075 | Serge | 4125 | |
6296 | serge | 4126 | if (unlikely(size < copy_offset[0])) { |
4127 | DRM_ERROR("Invalid command size, ioctl %d\n", |
||
4128 | DRM_VMW_EXECBUF); |
||
4129 | return -EINVAL; |
||
4130 | } |
||
4131 | |||
4132 | if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0) |
||
4133 | return -EFAULT; |
||
4134 | |||
4075 | Serge | 4135 | /* |
6296 | serge | 4136 | * Extend the ioctl argument while |
4075 | Serge | 4137 | * maintaining backwards compatibility: |
4138 | * We take different code paths depending on the value of |
||
6296 | serge | 4139 | * arg.version. |
4075 | Serge | 4140 | */ |
4141 | |||
6296 | serge | 4142 | if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION || |
4143 | arg.version == 0)) { |
||
4075 | Serge | 4144 | DRM_ERROR("Incorrect execbuf version.\n"); |
4145 | return -EINVAL; |
||
4146 | } |
||
4147 | |||
6296 | serge | 4148 | if (arg.version > 1 && |
4149 | copy_from_user(&arg.context_handle, |
||
4150 | (void __user *) (data + copy_offset[0]), |
||
4151 | copy_offset[arg.version - 1] - |
||
4152 | copy_offset[0]) != 0) |
||
4153 | return -EFAULT; |
||
4154 | |||
4155 | switch (arg.version) { |
||
4156 | case 1: |
||
4157 | arg.context_handle = (uint32_t) -1; |
||
4158 | break; |
||
4159 | case 2: |
||
4160 | if (arg.pad64 != 0) { |
||
4161 | DRM_ERROR("Unused IOCTL data not set to zero.\n"); |
||
4162 | return -EINVAL; |
||
4163 | } |
||
4164 | break; |
||
4165 | default: |
||
4166 | break; |
||
4167 | } |
||
4168 | |||
5078 | serge | 4169 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
4075 | Serge | 4170 | if (unlikely(ret != 0)) |
4171 | return ret; |
||
4172 | |||
4173 | ret = vmw_execbuf_process(file_priv, dev_priv, |
||
6296 | serge | 4174 | (void __user *)(unsigned long)arg.commands, |
4175 | NULL, arg.command_size, arg.throttle_us, |
||
4176 | arg.context_handle, |
||
4177 | (void __user *)(unsigned long)arg.fence_rep, |
||
4075 | Serge | 4178 | NULL); |
6296 | serge | 4179 | ttm_read_unlock(&dev_priv->reservation_sem); |
4075 | Serge | 4180 | if (unlikely(ret != 0)) |
6296 | serge | 4181 | return ret; |
4075 | Serge | 4182 | |
4183 | // vmw_kms_cursor_post_execbuf(dev_priv); |
||
4184 | |||
6296 | serge | 4185 | return 0; |
4075 | Serge | 4186 | }>>>>>>>>>>>> |