Rev 4569 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4075 | Serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
||
4 | * All Rights Reserved. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | **************************************************************************/ |
||
27 | |||
28 | #include "vmwgfx_drv.h" |
||
29 | #include "vmwgfx_reg.h" |
||
30 | #include |
||
31 | #include |
||
32 | |||
33 | #define VMW_RES_HT_ORDER 12 |
||
34 | |||
35 | /** |
||
36 | * struct vmw_resource_relocation - Relocation info for resources |
||
37 | * |
||
38 | * @head: List head for the software context's relocation list. |
||
39 | * @res: Non-ref-counted pointer to the resource. |
||
40 | * @offset: Offset of 4 byte entries into the command buffer where the |
||
41 | * id that needs fixup is located. |
||
42 | */ |
||
43 | struct vmw_resource_relocation { |
||
44 | struct list_head head; |
||
45 | const struct vmw_resource *res; |
||
46 | unsigned long offset; |
||
47 | }; |
||
48 | |||
49 | /** |
||
50 | * struct vmw_resource_val_node - Validation info for resources |
||
51 | * |
||
52 | * @head: List head for the software context's resource list. |
||
53 | * @hash: Hash entry for quick resouce to val_node lookup. |
||
54 | * @res: Ref-counted pointer to the resource. |
||
55 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. |
||
56 | * @new_backup: Refcounted pointer to the new backup buffer. |
||
4569 | Serge | 57 | * @staged_bindings: If @res is a context, tracks bindings set up during |
58 | * the command batch. Otherwise NULL. |
||
4075 | Serge | 59 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. |
60 | * @first_usage: Set to true the first time the resource is referenced in |
||
61 | * the command stream. |
||
62 | * @no_buffer_needed: Resources do not need to allocate buffer backup on |
||
63 | * reservation. The command stream will provide one. |
||
64 | */ |
||
65 | struct vmw_resource_val_node { |
||
66 | struct list_head head; |
||
67 | struct drm_hash_item hash; |
||
68 | struct vmw_resource *res; |
||
69 | struct vmw_dma_buffer *new_backup; |
||
4569 | Serge | 70 | struct vmw_ctx_binding_state *staged_bindings; |
4075 | Serge | 71 | unsigned long new_backup_offset; |
72 | bool first_usage; |
||
73 | bool no_buffer_needed; |
||
74 | }; |
||
75 | |||
76 | /** |
||
4569 | Serge | 77 | * struct vmw_cmd_entry - Describe a command for the verifier |
78 | * |
||
79 | * @user_allow: Whether allowed from the execbuf ioctl. |
||
80 | * @gb_disable: Whether disabled if guest-backed objects are available. |
||
81 | * @gb_enable: Whether enabled iff guest-backed objects are available. |
||
82 | */ |
||
83 | struct vmw_cmd_entry { |
||
84 | int (*func) (struct vmw_private *, struct vmw_sw_context *, |
||
85 | SVGA3dCmdHeader *); |
||
86 | bool user_allow; |
||
87 | bool gb_disable; |
||
88 | bool gb_enable; |
||
89 | }; |
||
90 | |||
91 | #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ |
||
92 | [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ |
||
93 | (_gb_disable), (_gb_enable)} |
||
94 | |||
95 | /** |
||
4075 | Serge | 96 | * vmw_resource_unreserve - unreserve resources previously reserved for |
97 | * command submission. |
||
98 | * |
||
99 | * @list_head: list of resources to unreserve. |
||
100 | * @backoff: Whether command submission failed. |
||
101 | */ |
||
102 | static void vmw_resource_list_unreserve(struct list_head *list, |
||
103 | bool backoff) |
||
104 | { |
||
105 | struct vmw_resource_val_node *val; |
||
106 | |||
107 | list_for_each_entry(val, list, head) { |
||
108 | struct vmw_resource *res = val->res; |
||
109 | struct vmw_dma_buffer *new_backup = |
||
110 | backoff ? NULL : val->new_backup; |
||
111 | |||
4569 | Serge | 112 | /* |
113 | * Transfer staged context bindings to the |
||
114 | * persistent context binding tracker. |
||
115 | */ |
||
116 | if (unlikely(val->staged_bindings)) { |
||
5078 | serge | 117 | if (!backoff) { |
4569 | Serge | 118 | vmw_context_binding_state_transfer |
119 | (val->res, val->staged_bindings); |
||
5078 | serge | 120 | } |
4569 | Serge | 121 | kfree(val->staged_bindings); |
122 | val->staged_bindings = NULL; |
||
123 | } |
||
4075 | Serge | 124 | vmw_resource_unreserve(res, new_backup, |
125 | val->new_backup_offset); |
||
126 | vmw_dmabuf_unreference(&val->new_backup); |
||
127 | } |
||
128 | } |
||
129 | |||
130 | |||
131 | /** |
||
132 | * vmw_resource_val_add - Add a resource to the software context's |
||
133 | * resource list if it's not already on it. |
||
134 | * |
||
135 | * @sw_context: Pointer to the software context. |
||
136 | * @res: Pointer to the resource. |
||
137 | * @p_node On successful return points to a valid pointer to a |
||
138 | * struct vmw_resource_val_node, if non-NULL on entry. |
||
139 | */ |
||
140 | static int vmw_resource_val_add(struct vmw_sw_context *sw_context, |
||
141 | struct vmw_resource *res, |
||
142 | struct vmw_resource_val_node **p_node) |
||
143 | { |
||
144 | struct vmw_resource_val_node *node; |
||
145 | struct drm_hash_item *hash; |
||
146 | int ret; |
||
147 | |||
148 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res, |
||
149 | &hash) == 0)) { |
||
150 | node = container_of(hash, struct vmw_resource_val_node, hash); |
||
151 | node->first_usage = false; |
||
152 | if (unlikely(p_node != NULL)) |
||
153 | *p_node = node; |
||
154 | return 0; |
||
155 | } |
||
156 | |||
157 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
||
158 | if (unlikely(node == NULL)) { |
||
159 | DRM_ERROR("Failed to allocate a resource validation " |
||
160 | "entry.\n"); |
||
161 | return -ENOMEM; |
||
162 | } |
||
163 | |||
164 | node->hash.key = (unsigned long) res; |
||
165 | ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash); |
||
166 | if (unlikely(ret != 0)) { |
||
167 | DRM_ERROR("Failed to initialize a resource validation " |
||
168 | "entry.\n"); |
||
169 | kfree(node); |
||
170 | return ret; |
||
171 | } |
||
172 | list_add_tail(&node->head, &sw_context->resource_list); |
||
173 | node->res = vmw_resource_reference(res); |
||
174 | node->first_usage = true; |
||
175 | |||
176 | if (unlikely(p_node != NULL)) |
||
177 | *p_node = node; |
||
178 | |||
179 | return 0; |
||
180 | } |
||
181 | |||
182 | /** |
||
5078 | serge | 183 | * vmw_resource_context_res_add - Put resources previously bound to a context on |
184 | * the validation list |
||
185 | * |
||
186 | * @dev_priv: Pointer to a device private structure |
||
187 | * @sw_context: Pointer to a software context used for this command submission |
||
188 | * @ctx: Pointer to the context resource |
||
189 | * |
||
190 | * This function puts all resources that were previously bound to @ctx on |
||
191 | * the resource validation list. This is part of the context state reemission |
||
192 | */ |
||
193 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, |
||
194 | struct vmw_sw_context *sw_context, |
||
195 | struct vmw_resource *ctx) |
||
196 | { |
||
197 | struct list_head *binding_list; |
||
198 | struct vmw_ctx_binding *entry; |
||
199 | int ret = 0; |
||
200 | struct vmw_resource *res; |
||
201 | |||
202 | mutex_lock(&dev_priv->binding_mutex); |
||
203 | binding_list = vmw_context_binding_list(ctx); |
||
204 | |||
205 | list_for_each_entry(entry, binding_list, ctx_list) { |
||
206 | res = vmw_resource_reference_unless_doomed(entry->bi.res); |
||
207 | if (unlikely(res == NULL)) |
||
208 | continue; |
||
209 | |||
210 | ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL); |
||
211 | vmw_resource_unreference(&res); |
||
212 | if (unlikely(ret != 0)) |
||
213 | break; |
||
214 | } |
||
215 | |||
216 | mutex_unlock(&dev_priv->binding_mutex); |
||
217 | return ret; |
||
218 | } |
||
219 | |||
220 | /** |
||
4075 | Serge | 221 | * vmw_resource_relocation_add - Add a relocation to the relocation list |
222 | * |
||
223 | * @list: Pointer to head of relocation list. |
||
224 | * @res: The resource. |
||
225 | * @offset: Offset into the command buffer currently being parsed where the |
||
226 | * id that needs fixup is located. Granularity is 4 bytes. |
||
227 | */ |
||
228 | static int vmw_resource_relocation_add(struct list_head *list, |
||
229 | const struct vmw_resource *res, |
||
230 | unsigned long offset) |
||
231 | { |
||
232 | struct vmw_resource_relocation *rel; |
||
233 | |||
234 | rel = kmalloc(sizeof(*rel), GFP_KERNEL); |
||
235 | if (unlikely(rel == NULL)) { |
||
236 | DRM_ERROR("Failed to allocate a resource relocation.\n"); |
||
237 | return -ENOMEM; |
||
238 | } |
||
239 | |||
240 | rel->res = res; |
||
241 | rel->offset = offset; |
||
242 | list_add_tail(&rel->head, list); |
||
243 | |||
244 | return 0; |
||
245 | } |
||
246 | |||
247 | /** |
||
248 | * vmw_resource_relocations_free - Free all relocations on a list |
||
249 | * |
||
250 | * @list: Pointer to the head of the relocation list. |
||
251 | */ |
||
252 | static void vmw_resource_relocations_free(struct list_head *list) |
||
253 | { |
||
254 | struct vmw_resource_relocation *rel, *n; |
||
255 | |||
256 | list_for_each_entry_safe(rel, n, list, head) { |
||
257 | list_del(&rel->head); |
||
258 | kfree(rel); |
||
259 | } |
||
260 | } |
||
261 | |||
262 | /** |
||
263 | * vmw_resource_relocations_apply - Apply all relocations on a list |
||
264 | * |
||
265 | * @cb: Pointer to the start of the command buffer bein patch. This need |
||
266 | * not be the same buffer as the one being parsed when the relocation |
||
267 | * list was built, but the contents must be the same modulo the |
||
268 | * resource ids. |
||
269 | * @list: Pointer to the head of the relocation list. |
||
270 | */ |
||
271 | static void vmw_resource_relocations_apply(uint32_t *cb, |
||
272 | struct list_head *list) |
||
273 | { |
||
274 | struct vmw_resource_relocation *rel; |
||
275 | |||
5078 | serge | 276 | list_for_each_entry(rel, list, head) { |
277 | if (likely(rel->res != NULL)) |
||
4075 | Serge | 278 | cb[rel->offset] = rel->res->id; |
5078 | serge | 279 | else |
280 | cb[rel->offset] = SVGA_3D_CMD_NOP; |
||
281 | } |
||
4075 | Serge | 282 | } |
283 | |||
284 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, |
||
285 | struct vmw_sw_context *sw_context, |
||
286 | SVGA3dCmdHeader *header) |
||
287 | { |
||
288 | return 0; //capable(CAP_SYS_ADMIN) ? : -EINVAL; |
||
289 | } |
||
290 | |||
291 | static int vmw_cmd_ok(struct vmw_private *dev_priv, |
||
292 | struct vmw_sw_context *sw_context, |
||
293 | SVGA3dCmdHeader *header) |
||
294 | { |
||
295 | return 0; |
||
296 | } |
||
297 | |||
298 | /** |
||
299 | * vmw_bo_to_validate_list - add a bo to a validate list |
||
300 | * |
||
301 | * @sw_context: The software context used for this command submission batch. |
||
302 | * @bo: The buffer object to add. |
||
4569 | Serge | 303 | * @validate_as_mob: Validate this buffer as a MOB. |
4075 | Serge | 304 | * @p_val_node: If non-NULL Will be updated with the validate node number |
305 | * on return. |
||
306 | * |
||
307 | * Returns -EINVAL if the limit of number of buffer objects per command |
||
308 | * submission is reached. |
||
309 | */ |
||
310 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, |
||
311 | struct ttm_buffer_object *bo, |
||
4569 | Serge | 312 | bool validate_as_mob, |
4075 | Serge | 313 | uint32_t *p_val_node) |
314 | { |
||
315 | uint32_t val_node; |
||
316 | struct vmw_validate_buffer *vval_buf; |
||
317 | struct ttm_validate_buffer *val_buf; |
||
318 | struct drm_hash_item *hash; |
||
319 | int ret; |
||
320 | |||
321 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo, |
||
322 | &hash) == 0)) { |
||
323 | vval_buf = container_of(hash, struct vmw_validate_buffer, |
||
324 | hash); |
||
4569 | Serge | 325 | if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) { |
326 | DRM_ERROR("Inconsistent buffer usage.\n"); |
||
327 | return -EINVAL; |
||
328 | } |
||
4075 | Serge | 329 | val_buf = &vval_buf->base; |
330 | val_node = vval_buf - sw_context->val_bufs; |
||
331 | } else { |
||
332 | val_node = sw_context->cur_val_buf; |
||
333 | if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { |
||
334 | DRM_ERROR("Max number of DMA buffers per submission " |
||
335 | "exceeded.\n"); |
||
336 | return -EINVAL; |
||
337 | } |
||
338 | vval_buf = &sw_context->val_bufs[val_node]; |
||
339 | vval_buf->hash.key = (unsigned long) bo; |
||
340 | ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); |
||
341 | if (unlikely(ret != 0)) { |
||
342 | DRM_ERROR("Failed to initialize a buffer validation " |
||
343 | "entry.\n"); |
||
344 | return ret; |
||
345 | } |
||
346 | ++sw_context->cur_val_buf; |
||
347 | val_buf = &vval_buf->base; |
||
348 | val_buf->bo = ttm_bo_reference(bo); |
||
349 | val_buf->reserved = false; |
||
350 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
||
4569 | Serge | 351 | vval_buf->validate_as_mob = validate_as_mob; |
4075 | Serge | 352 | } |
353 | |||
354 | sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; |
||
355 | |||
356 | if (p_val_node) |
||
357 | *p_val_node = val_node; |
||
358 | |||
359 | return 0; |
||
360 | } |
||
361 | |||
362 | /** |
||
363 | * vmw_resources_reserve - Reserve all resources on the sw_context's |
||
364 | * resource list. |
||
365 | * |
||
366 | * @sw_context: Pointer to the software context. |
||
367 | * |
||
368 | * Note that since vmware's command submission currently is protected by |
||
369 | * the cmdbuf mutex, no fancy deadlock avoidance is required for resources, |
||
370 | * since only a single thread at once will attempt this. |
||
371 | */ |
||
372 | static int vmw_resources_reserve(struct vmw_sw_context *sw_context) |
||
373 | { |
||
374 | struct vmw_resource_val_node *val; |
||
375 | int ret; |
||
376 | |||
377 | list_for_each_entry(val, &sw_context->resource_list, head) { |
||
378 | struct vmw_resource *res = val->res; |
||
379 | |||
380 | ret = vmw_resource_reserve(res, val->no_buffer_needed); |
||
381 | if (unlikely(ret != 0)) |
||
382 | return ret; |
||
383 | |||
384 | if (res->backup) { |
||
385 | struct ttm_buffer_object *bo = &res->backup->base; |
||
386 | |||
387 | ret = vmw_bo_to_validate_list |
||
4569 | Serge | 388 | (sw_context, bo, |
389 | vmw_resource_needs_backup(res), NULL); |
||
4075 | Serge | 390 | |
391 | if (unlikely(ret != 0)) |
||
392 | return ret; |
||
393 | } |
||
394 | } |
||
395 | return 0; |
||
396 | } |
||
397 | |||
398 | /** |
||
399 | * vmw_resources_validate - Validate all resources on the sw_context's |
||
400 | * resource list. |
||
401 | * |
||
402 | * @sw_context: Pointer to the software context. |
||
403 | * |
||
404 | * Before this function is called, all resource backup buffers must have |
||
405 | * been validated. |
||
406 | */ |
||
407 | static int vmw_resources_validate(struct vmw_sw_context *sw_context) |
||
408 | { |
||
409 | struct vmw_resource_val_node *val; |
||
410 | int ret; |
||
411 | |||
412 | list_for_each_entry(val, &sw_context->resource_list, head) { |
||
413 | struct vmw_resource *res = val->res; |
||
414 | |||
415 | ret = vmw_resource_validate(res); |
||
416 | if (unlikely(ret != 0)) { |
||
417 | if (ret != -ERESTARTSYS) |
||
418 | DRM_ERROR("Failed to validate resource.\n"); |
||
419 | return ret; |
||
420 | } |
||
421 | } |
||
422 | return 0; |
||
423 | } |
||
424 | |||
5078 | serge | 425 | |
4075 | Serge | 426 | /** |
5078 | serge | 427 | * vmw_cmd_res_reloc_add - Add a resource to a software context's |
428 | * relocation- and validation lists. |
||
429 | * |
||
430 | * @dev_priv: Pointer to a struct vmw_private identifying the device. |
||
431 | * @sw_context: Pointer to the software context. |
||
432 | * @res_type: Resource type. |
||
433 | * @id_loc: Pointer to where the id that needs translation is located. |
||
434 | * @res: Valid pointer to a struct vmw_resource. |
||
435 | * @p_val: If non null, a pointer to the struct vmw_resource_validate_node |
||
436 | * used for this resource is returned here. |
||
437 | */ |
||
438 | static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, |
||
439 | struct vmw_sw_context *sw_context, |
||
440 | enum vmw_res_type res_type, |
||
441 | uint32_t *id_loc, |
||
442 | struct vmw_resource *res, |
||
443 | struct vmw_resource_val_node **p_val) |
||
444 | { |
||
445 | int ret; |
||
446 | struct vmw_resource_val_node *node; |
||
447 | |||
448 | *p_val = NULL; |
||
449 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, |
||
450 | res, |
||
451 | id_loc - sw_context->buf_start); |
||
452 | if (unlikely(ret != 0)) |
||
453 | goto out_err; |
||
454 | |||
455 | ret = vmw_resource_val_add(sw_context, res, &node); |
||
456 | if (unlikely(ret != 0)) |
||
457 | goto out_err; |
||
458 | |||
459 | if (res_type == vmw_res_context && dev_priv->has_mob && |
||
460 | node->first_usage) { |
||
461 | |||
462 | /* |
||
463 | * Put contexts first on the list to be able to exit |
||
464 | * list traversal for contexts early. |
||
465 | */ |
||
466 | list_del(&node->head); |
||
467 | list_add(&node->head, &sw_context->resource_list); |
||
468 | |||
469 | ret = vmw_resource_context_res_add(dev_priv, sw_context, res); |
||
470 | if (unlikely(ret != 0)) |
||
471 | goto out_err; |
||
472 | node->staged_bindings = |
||
473 | kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); |
||
474 | if (node->staged_bindings == NULL) { |
||
475 | DRM_ERROR("Failed to allocate context binding " |
||
476 | "information.\n"); |
||
477 | goto out_err; |
||
478 | } |
||
479 | INIT_LIST_HEAD(&node->staged_bindings->list); |
||
480 | } |
||
481 | |||
482 | if (p_val) |
||
483 | *p_val = node; |
||
484 | |||
485 | out_err: |
||
486 | return ret; |
||
487 | } |
||
488 | |||
489 | |||
490 | /** |
||
4075 | Serge | 491 | * vmw_cmd_res_check - Check that a resource is present and if so, put it |
492 | * on the resource validate list unless it's already there. |
||
493 | * |
||
494 | * @dev_priv: Pointer to a device private structure. |
||
495 | * @sw_context: Pointer to the software context. |
||
496 | * @res_type: Resource type. |
||
497 | * @converter: User-space visisble type specific information. |
||
5078 | serge | 498 | * @id_loc: Pointer to the location in the command buffer currently being |
4075 | Serge | 499 | * parsed from where the user-space resource id handle is located. |
5078 | serge | 500 | * @p_val: Pointer to pointer to resource validalidation node. Populated |
501 | * on exit. |
||
4075 | Serge | 502 | */ |
5078 | serge | 503 | static int |
504 | vmw_cmd_res_check(struct vmw_private *dev_priv, |
||
4075 | Serge | 505 | struct vmw_sw_context *sw_context, |
506 | enum vmw_res_type res_type, |
||
507 | const struct vmw_user_resource_conv *converter, |
||
5078 | serge | 508 | uint32_t *id_loc, |
4075 | Serge | 509 | struct vmw_resource_val_node **p_val) |
510 | { |
||
511 | struct vmw_res_cache_entry *rcache = |
||
512 | &sw_context->res_cache[res_type]; |
||
513 | struct vmw_resource *res; |
||
514 | struct vmw_resource_val_node *node; |
||
515 | int ret; |
||
516 | |||
5078 | serge | 517 | if (*id_loc == SVGA3D_INVALID_ID) { |
4569 | Serge | 518 | if (p_val) |
519 | *p_val = NULL; |
||
520 | if (res_type == vmw_res_context) { |
||
521 | DRM_ERROR("Illegal context invalid id.\n"); |
||
522 | return -EINVAL; |
||
523 | } |
||
4075 | Serge | 524 | return 0; |
4569 | Serge | 525 | } |
4075 | Serge | 526 | |
527 | /* |
||
528 | * Fastpath in case of repeated commands referencing the same |
||
529 | * resource |
||
530 | */ |
||
531 | |||
5078 | serge | 532 | if (likely(rcache->valid && *id_loc == rcache->handle)) { |
4075 | Serge | 533 | const struct vmw_resource *res = rcache->res; |
534 | |||
535 | rcache->node->first_usage = false; |
||
536 | if (p_val) |
||
537 | *p_val = rcache->node; |
||
538 | |||
539 | return vmw_resource_relocation_add |
||
540 | (&sw_context->res_relocations, res, |
||
5078 | serge | 541 | id_loc - sw_context->buf_start); |
4075 | Serge | 542 | } |
543 | |||
5078 | serge | 544 | ret = vmw_user_resource_lookup_handle(dev_priv, |
545 | sw_context->fp->tfile, |
||
546 | *id_loc, |
||
547 | converter, |
||
548 | &res); |
||
4075 | Serge | 549 | if (unlikely(ret != 0)) { |
550 | DRM_ERROR("Could not find or use resource 0x%08x.\n", |
||
5078 | serge | 551 | (unsigned) *id_loc); |
552 | .. dump_stack(); |
||
4075 | Serge | 553 | return ret; |
554 | } |
||
555 | |||
556 | rcache->valid = true; |
||
557 | rcache->res = res; |
||
5078 | serge | 558 | rcache->handle = *id_loc; |
4075 | Serge | 559 | |
5078 | serge | 560 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc, |
561 | res, &node); |
||
4075 | Serge | 562 | if (unlikely(ret != 0)) |
563 | goto out_no_reloc; |
||
564 | |||
565 | rcache->node = node; |
||
566 | if (p_val) |
||
567 | *p_val = node; |
||
568 | vmw_resource_unreference(&res); |
||
569 | return 0; |
||
570 | |||
571 | out_no_reloc: |
||
572 | BUG_ON(sw_context->error_resource != NULL); |
||
573 | sw_context->error_resource = res; |
||
574 | |||
575 | return ret; |
||
576 | } |
||
577 | |||
578 | /** |
||
5078 | serge | 579 | * vmw_rebind_contexts - Rebind all resources previously bound to |
580 | * referenced contexts. |
||
581 | * |
||
582 | * @sw_context: Pointer to the software context. |
||
583 | * |
||
584 | * Rebind context binding points that have been scrubbed because of eviction. |
||
585 | */ |
||
586 | static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) |
||
587 | { |
||
588 | struct vmw_resource_val_node *val; |
||
589 | int ret; |
||
590 | |||
591 | list_for_each_entry(val, &sw_context->resource_list, head) { |
||
592 | if (unlikely(!val->staged_bindings)) |
||
593 | break; |
||
594 | |||
595 | ret = vmw_context_rebind_all(val->res); |
||
596 | if (unlikely(ret != 0)) { |
||
597 | if (ret != -ERESTARTSYS) |
||
598 | DRM_ERROR("Failed to rebind context.\n"); |
||
599 | return ret; |
||
600 | } |
||
601 | } |
||
602 | |||
603 | return 0; |
||
604 | } |
||
605 | |||
606 | /** |
||
4075 | Serge | 607 | * vmw_cmd_cid_check - Check a command header for valid context information. |
608 | * |
||
609 | * @dev_priv: Pointer to a device private structure. |
||
610 | * @sw_context: Pointer to the software context. |
||
611 | * @header: A command header with an embedded user-space context handle. |
||
612 | * |
||
613 | * Convenience function: Call vmw_cmd_res_check with the user-space context |
||
614 | * handle embedded in @header. |
||
615 | */ |
||
616 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, |
||
617 | struct vmw_sw_context *sw_context, |
||
618 | SVGA3dCmdHeader *header) |
||
619 | { |
||
620 | struct vmw_cid_cmd { |
||
621 | SVGA3dCmdHeader header; |
||
5078 | serge | 622 | uint32_t cid; |
4075 | Serge | 623 | } *cmd; |
624 | |||
625 | cmd = container_of(header, struct vmw_cid_cmd, header); |
||
626 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
||
627 | user_context_converter, &cmd->cid, NULL); |
||
628 | } |
||
629 | |||
630 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, |
||
631 | struct vmw_sw_context *sw_context, |
||
632 | SVGA3dCmdHeader *header) |
||
633 | { |
||
634 | struct vmw_sid_cmd { |
||
635 | SVGA3dCmdHeader header; |
||
636 | SVGA3dCmdSetRenderTarget body; |
||
637 | } *cmd; |
||
4569 | Serge | 638 | struct vmw_resource_val_node *ctx_node; |
639 | struct vmw_resource_val_node *res_node; |
||
4075 | Serge | 640 | int ret; |
641 | |||
4569 | Serge | 642 | cmd = container_of(header, struct vmw_sid_cmd, header); |
643 | |||
644 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
||
645 | user_context_converter, &cmd->body.cid, |
||
646 | &ctx_node); |
||
4075 | Serge | 647 | if (unlikely(ret != 0)) |
648 | return ret; |
||
649 | |||
650 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
651 | user_surface_converter, |
||
4569 | Serge | 652 | &cmd->body.target.sid, &res_node); |
653 | if (unlikely(ret != 0)) |
||
4075 | Serge | 654 | return ret; |
4569 | Serge | 655 | |
656 | if (dev_priv->has_mob) { |
||
657 | struct vmw_ctx_bindinfo bi; |
||
658 | |||
659 | bi.ctx = ctx_node->res; |
||
660 | bi.res = res_node ? res_node->res : NULL; |
||
661 | bi.bt = vmw_ctx_binding_rt; |
||
662 | bi.i1.rt_type = cmd->body.type; |
||
663 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); |
||
664 | } |
||
665 | |||
666 | return 0; |
||
4075 | Serge | 667 | } |
668 | |||
669 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, |
||
670 | struct vmw_sw_context *sw_context, |
||
671 | SVGA3dCmdHeader *header) |
||
672 | { |
||
673 | struct vmw_sid_cmd { |
||
674 | SVGA3dCmdHeader header; |
||
675 | SVGA3dCmdSurfaceCopy body; |
||
676 | } *cmd; |
||
677 | int ret; |
||
678 | |||
679 | cmd = container_of(header, struct vmw_sid_cmd, header); |
||
680 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
681 | user_surface_converter, |
||
682 | &cmd->body.src.sid, NULL); |
||
683 | if (unlikely(ret != 0)) |
||
684 | return ret; |
||
685 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
686 | user_surface_converter, |
||
687 | &cmd->body.dest.sid, NULL); |
||
688 | } |
||
689 | |||
690 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, |
||
691 | struct vmw_sw_context *sw_context, |
||
692 | SVGA3dCmdHeader *header) |
||
693 | { |
||
694 | struct vmw_sid_cmd { |
||
695 | SVGA3dCmdHeader header; |
||
696 | SVGA3dCmdSurfaceStretchBlt body; |
||
697 | } *cmd; |
||
698 | int ret; |
||
699 | |||
700 | cmd = container_of(header, struct vmw_sid_cmd, header); |
||
701 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
702 | user_surface_converter, |
||
703 | &cmd->body.src.sid, NULL); |
||
704 | if (unlikely(ret != 0)) |
||
705 | return ret; |
||
706 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
707 | user_surface_converter, |
||
708 | &cmd->body.dest.sid, NULL); |
||
709 | } |
||
710 | |||
711 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, |
||
712 | struct vmw_sw_context *sw_context, |
||
713 | SVGA3dCmdHeader *header) |
||
714 | { |
||
715 | struct vmw_sid_cmd { |
||
716 | SVGA3dCmdHeader header; |
||
717 | SVGA3dCmdBlitSurfaceToScreen body; |
||
718 | } *cmd; |
||
719 | |||
720 | cmd = container_of(header, struct vmw_sid_cmd, header); |
||
721 | |||
722 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
723 | user_surface_converter, |
||
724 | &cmd->body.srcImage.sid, NULL); |
||
725 | } |
||
726 | |||
727 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, |
||
728 | struct vmw_sw_context *sw_context, |
||
729 | SVGA3dCmdHeader *header) |
||
730 | { |
||
731 | struct vmw_sid_cmd { |
||
732 | SVGA3dCmdHeader header; |
||
733 | SVGA3dCmdPresent body; |
||
734 | } *cmd; |
||
735 | |||
736 | |||
737 | cmd = container_of(header, struct vmw_sid_cmd, header); |
||
738 | |||
739 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
740 | user_surface_converter, &cmd->body.sid, |
||
741 | NULL); |
||
742 | } |
||
743 | |||
744 | /** |
||
745 | * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. |
||
746 | * |
||
747 | * @dev_priv: The device private structure. |
||
748 | * @new_query_bo: The new buffer holding query results. |
||
749 | * @sw_context: The software context used for this command submission. |
||
750 | * |
||
751 | * This function checks whether @new_query_bo is suitable for holding |
||
752 | * query results, and if another buffer currently is pinned for query |
||
753 | * results. If so, the function prepares the state of @sw_context for |
||
754 | * switching pinned buffers after successful submission of the current |
||
755 | * command batch. |
||
756 | */ |
||
757 | static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, |
||
758 | struct ttm_buffer_object *new_query_bo, |
||
759 | struct vmw_sw_context *sw_context) |
||
760 | { |
||
761 | struct vmw_res_cache_entry *ctx_entry = |
||
762 | &sw_context->res_cache[vmw_res_context]; |
||
763 | int ret; |
||
764 | |||
765 | BUG_ON(!ctx_entry->valid); |
||
766 | sw_context->last_query_ctx = ctx_entry->res; |
||
767 | |||
768 | if (unlikely(new_query_bo != sw_context->cur_query_bo)) { |
||
769 | |||
770 | if (unlikely(new_query_bo->num_pages > 4)) { |
||
771 | DRM_ERROR("Query buffer too large.\n"); |
||
772 | return -EINVAL; |
||
773 | } |
||
774 | |||
775 | if (unlikely(sw_context->cur_query_bo != NULL)) { |
||
776 | sw_context->needs_post_query_barrier = true; |
||
777 | ret = vmw_bo_to_validate_list(sw_context, |
||
778 | sw_context->cur_query_bo, |
||
4569 | Serge | 779 | dev_priv->has_mob, NULL); |
4075 | Serge | 780 | if (unlikely(ret != 0)) |
781 | return ret; |
||
782 | } |
||
783 | sw_context->cur_query_bo = new_query_bo; |
||
784 | |||
785 | ret = vmw_bo_to_validate_list(sw_context, |
||
786 | dev_priv->dummy_query_bo, |
||
4569 | Serge | 787 | dev_priv->has_mob, NULL); |
4075 | Serge | 788 | if (unlikely(ret != 0)) |
789 | return ret; |
||
790 | |||
791 | } |
||
792 | |||
793 | return 0; |
||
794 | } |
||
795 | |||
796 | |||
797 | /** |
||
798 | * vmw_query_bo_switch_commit - Finalize switching pinned query buffer |
||
799 | * |
||
800 | * @dev_priv: The device private structure. |
||
801 | * @sw_context: The software context used for this command submission batch. |
||
802 | * |
||
803 | * This function will check if we're switching query buffers, and will then, |
||
804 | * issue a dummy occlusion query wait used as a query barrier. When the fence |
||
805 | * object following that query wait has signaled, we are sure that all |
||
806 | * preceding queries have finished, and the old query buffer can be unpinned. |
||
807 | * However, since both the new query buffer and the old one are fenced with |
||
808 | * that fence, we can do an asynchronus unpin now, and be sure that the |
||
809 | * old query buffer won't be moved until the fence has signaled. |
||
810 | * |
||
811 | * As mentioned above, both the new - and old query buffers need to be fenced |
||
812 | * using a sequence emitted *after* calling this function. |
||
813 | */ |
||
814 | static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, |
||
815 | struct vmw_sw_context *sw_context) |
||
816 | { |
||
817 | /* |
||
818 | * The validate list should still hold references to all |
||
819 | * contexts here. |
||
820 | */ |
||
821 | |||
822 | if (sw_context->needs_post_query_barrier) { |
||
823 | struct vmw_res_cache_entry *ctx_entry = |
||
824 | &sw_context->res_cache[vmw_res_context]; |
||
825 | struct vmw_resource *ctx; |
||
826 | int ret; |
||
827 | |||
828 | BUG_ON(!ctx_entry->valid); |
||
829 | ctx = ctx_entry->res; |
||
830 | |||
831 | ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); |
||
832 | |||
833 | if (unlikely(ret != 0)) |
||
834 | DRM_ERROR("Out of fifo space for dummy query.\n"); |
||
835 | } |
||
836 | |||
837 | if (dev_priv->pinned_bo != sw_context->cur_query_bo) { |
||
838 | if (dev_priv->pinned_bo) { |
||
839 | vmw_bo_pin(dev_priv->pinned_bo, false); |
||
840 | ttm_bo_unref(&dev_priv->pinned_bo); |
||
841 | } |
||
842 | |||
843 | if (!sw_context->needs_post_query_barrier) { |
||
844 | vmw_bo_pin(sw_context->cur_query_bo, true); |
||
845 | |||
846 | /* |
||
847 | * We pin also the dummy_query_bo buffer so that we |
||
848 | * don't need to validate it when emitting |
||
849 | * dummy queries in context destroy paths. |
||
850 | */ |
||
851 | |||
852 | vmw_bo_pin(dev_priv->dummy_query_bo, true); |
||
853 | dev_priv->dummy_query_bo_pinned = true; |
||
854 | |||
855 | BUG_ON(sw_context->last_query_ctx == NULL); |
||
856 | dev_priv->query_cid = sw_context->last_query_ctx->id; |
||
857 | dev_priv->query_cid_valid = true; |
||
858 | dev_priv->pinned_bo = |
||
859 | ttm_bo_reference(sw_context->cur_query_bo); |
||
860 | } |
||
861 | } |
||
862 | } |
||
863 | |||
864 | /** |
||
4569 | Serge | 865 | * vmw_translate_mob_pointer - Prepare to translate a user-space buffer |
866 | * handle to a MOB id. |
||
867 | * |
||
868 | * @dev_priv: Pointer to a device private structure. |
||
869 | * @sw_context: The software context used for this command batch validation. |
||
870 | * @id: Pointer to the user-space handle to be translated. |
||
871 | * @vmw_bo_p: Points to a location that, on successful return will carry |
||
872 | * a reference-counted pointer to the DMA buffer identified by the |
||
873 | * user-space handle in @id. |
||
874 | * |
||
875 | * This function saves information needed to translate a user-space buffer |
||
876 | * handle to a MOB id. The translation does not take place immediately, but |
||
877 | * during a call to vmw_apply_relocations(). This function builds a relocation |
||
878 | * list and a list of buffers to validate. The former needs to be freed using |
||
879 | * either vmw_apply_relocations() or vmw_free_relocations(). The latter |
||
880 | * needs to be freed using vmw_clear_validations. |
||
881 | */ |
||
882 | static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, |
||
883 | struct vmw_sw_context *sw_context, |
||
884 | SVGAMobId *id, |
||
885 | struct vmw_dma_buffer **vmw_bo_p) |
||
886 | { |
||
887 | struct vmw_dma_buffer *vmw_bo = NULL; |
||
888 | struct ttm_buffer_object *bo; |
||
889 | uint32_t handle = *id; |
||
890 | struct vmw_relocation *reloc; |
||
891 | int ret; |
||
892 | |||
5078 | serge | 893 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
4569 | Serge | 894 | if (unlikely(ret != 0)) { |
895 | DRM_ERROR("Could not find or use MOB buffer.\n"); |
||
896 | return -EINVAL; |
||
897 | } |
||
898 | bo = &vmw_bo->base; |
||
899 | |||
900 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
||
901 | DRM_ERROR("Max number relocations per submission" |
||
902 | " exceeded\n"); |
||
903 | ret = -EINVAL; |
||
904 | goto out_no_reloc; |
||
905 | } |
||
906 | |||
907 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
||
908 | reloc->mob_loc = id; |
||
909 | reloc->location = NULL; |
||
910 | |||
911 | ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); |
||
912 | if (unlikely(ret != 0)) |
||
913 | goto out_no_reloc; |
||
914 | |||
915 | *vmw_bo_p = vmw_bo; |
||
916 | return 0; |
||
917 | |||
918 | out_no_reloc: |
||
919 | vmw_dmabuf_unreference(&vmw_bo); |
||
920 | vmw_bo_p = NULL; |
||
921 | return ret; |
||
922 | } |
||
923 | |||
924 | /** |
||
4075 | Serge | 925 | * vmw_translate_guest_pointer - Prepare to translate a user-space buffer |
926 | * handle to a valid SVGAGuestPtr |
||
927 | * |
||
928 | * @dev_priv: Pointer to a device private structure. |
||
929 | * @sw_context: The software context used for this command batch validation. |
||
930 | * @ptr: Pointer to the user-space handle to be translated. |
||
931 | * @vmw_bo_p: Points to a location that, on successful return will carry |
||
932 | * a reference-counted pointer to the DMA buffer identified by the |
||
933 | * user-space handle in @id. |
||
934 | * |
||
935 | * This function saves information needed to translate a user-space buffer |
||
936 | * handle to a valid SVGAGuestPtr. The translation does not take place |
||
937 | * immediately, but during a call to vmw_apply_relocations(). |
||
938 | * This function builds a relocation list and a list of buffers to validate. |
||
939 | * The former needs to be freed using either vmw_apply_relocations() or |
||
940 | * vmw_free_relocations(). The latter needs to be freed using |
||
941 | * vmw_clear_validations. |
||
942 | */ |
||
943 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
||
944 | struct vmw_sw_context *sw_context, |
||
945 | SVGAGuestPtr *ptr, |
||
946 | struct vmw_dma_buffer **vmw_bo_p) |
||
947 | { |
||
948 | struct vmw_dma_buffer *vmw_bo = NULL; |
||
949 | struct ttm_buffer_object *bo; |
||
950 | uint32_t handle = ptr->gmrId; |
||
951 | struct vmw_relocation *reloc; |
||
952 | int ret; |
||
953 | |||
5078 | serge | 954 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
4075 | Serge | 955 | if (unlikely(ret != 0)) { |
956 | DRM_ERROR("Could not find or use GMR region.\n"); |
||
957 | return -EINVAL; |
||
958 | } |
||
959 | bo = &vmw_bo->base; |
||
960 | |||
961 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
||
962 | DRM_ERROR("Max number relocations per submission" |
||
963 | " exceeded\n"); |
||
964 | ret = -EINVAL; |
||
965 | goto out_no_reloc; |
||
966 | } |
||
967 | |||
968 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
||
969 | reloc->location = ptr; |
||
970 | |||
4569 | Serge | 971 | ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); |
4075 | Serge | 972 | if (unlikely(ret != 0)) |
973 | goto out_no_reloc; |
||
974 | |||
975 | *vmw_bo_p = vmw_bo; |
||
976 | return 0; |
||
977 | |||
978 | out_no_reloc: |
||
979 | vmw_dmabuf_unreference(&vmw_bo); |
||
980 | vmw_bo_p = NULL; |
||
981 | return ret; |
||
982 | } |
||
983 | |||
984 | /** |
||
4569 | Serge | 985 | * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. |
986 | * |
||
987 | * @dev_priv: Pointer to a device private struct. |
||
988 | * @sw_context: The software context used for this command submission. |
||
989 | * @header: Pointer to the command header in the command stream. |
||
990 | */ |
||
991 | static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, |
||
992 | struct vmw_sw_context *sw_context, |
||
993 | SVGA3dCmdHeader *header) |
||
994 | { |
||
995 | struct vmw_begin_gb_query_cmd { |
||
996 | SVGA3dCmdHeader header; |
||
997 | SVGA3dCmdBeginGBQuery q; |
||
998 | } *cmd; |
||
999 | |||
1000 | cmd = container_of(header, struct vmw_begin_gb_query_cmd, |
||
1001 | header); |
||
1002 | |||
1003 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
||
1004 | user_context_converter, &cmd->q.cid, |
||
1005 | NULL); |
||
1006 | } |
||
1007 | |||
1008 | /** |
||
4075 | Serge | 1009 | * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. |
1010 | * |
||
1011 | * @dev_priv: Pointer to a device private struct. |
||
1012 | * @sw_context: The software context used for this command submission. |
||
1013 | * @header: Pointer to the command header in the command stream. |
||
1014 | */ |
||
1015 | static int vmw_cmd_begin_query(struct vmw_private *dev_priv, |
||
1016 | struct vmw_sw_context *sw_context, |
||
1017 | SVGA3dCmdHeader *header) |
||
1018 | { |
||
1019 | struct vmw_begin_query_cmd { |
||
1020 | SVGA3dCmdHeader header; |
||
1021 | SVGA3dCmdBeginQuery q; |
||
1022 | } *cmd; |
||
1023 | |||
1024 | cmd = container_of(header, struct vmw_begin_query_cmd, |
||
1025 | header); |
||
1026 | |||
4569 | Serge | 1027 | if (unlikely(dev_priv->has_mob)) { |
1028 | struct { |
||
1029 | SVGA3dCmdHeader header; |
||
1030 | SVGA3dCmdBeginGBQuery q; |
||
1031 | } gb_cmd; |
||
1032 | |||
1033 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
||
1034 | |||
1035 | gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; |
||
1036 | gb_cmd.header.size = cmd->header.size; |
||
1037 | gb_cmd.q.cid = cmd->q.cid; |
||
1038 | gb_cmd.q.type = cmd->q.type; |
||
1039 | |||
1040 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
||
1041 | return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); |
||
1042 | } |
||
1043 | |||
4075 | Serge | 1044 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1045 | user_context_converter, &cmd->q.cid, |
||
1046 | NULL); |
||
1047 | } |
||
1048 | |||
1049 | /** |
||
4569 | Serge | 1050 | * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command. |
1051 | * |
||
1052 | * @dev_priv: Pointer to a device private struct. |
||
1053 | * @sw_context: The software context used for this command submission. |
||
1054 | * @header: Pointer to the command header in the command stream. |
||
1055 | */ |
||
1056 | static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, |
||
1057 | struct vmw_sw_context *sw_context, |
||
1058 | SVGA3dCmdHeader *header) |
||
1059 | { |
||
1060 | struct vmw_dma_buffer *vmw_bo; |
||
1061 | struct vmw_query_cmd { |
||
1062 | SVGA3dCmdHeader header; |
||
1063 | SVGA3dCmdEndGBQuery q; |
||
1064 | } *cmd; |
||
1065 | int ret; |
||
1066 | |||
1067 | cmd = container_of(header, struct vmw_query_cmd, header); |
||
1068 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
||
1069 | if (unlikely(ret != 0)) |
||
1070 | return ret; |
||
1071 | |||
1072 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, |
||
1073 | &cmd->q.mobid, |
||
1074 | &vmw_bo); |
||
1075 | if (unlikely(ret != 0)) |
||
1076 | return ret; |
||
1077 | |||
1078 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); |
||
1079 | |||
1080 | vmw_dmabuf_unreference(&vmw_bo); |
||
1081 | return ret; |
||
1082 | } |
||
1083 | |||
1084 | /** |
||
4075 | Serge | 1085 | * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. |
1086 | * |
||
1087 | * @dev_priv: Pointer to a device private struct. |
||
1088 | * @sw_context: The software context used for this command submission. |
||
1089 | * @header: Pointer to the command header in the command stream. |
||
1090 | */ |
||
1091 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, |
||
1092 | struct vmw_sw_context *sw_context, |
||
1093 | SVGA3dCmdHeader *header) |
||
1094 | { |
||
1095 | struct vmw_dma_buffer *vmw_bo; |
||
1096 | struct vmw_query_cmd { |
||
1097 | SVGA3dCmdHeader header; |
||
1098 | SVGA3dCmdEndQuery q; |
||
1099 | } *cmd; |
||
1100 | int ret; |
||
1101 | |||
1102 | cmd = container_of(header, struct vmw_query_cmd, header); |
||
4569 | Serge | 1103 | if (dev_priv->has_mob) { |
1104 | struct { |
||
1105 | SVGA3dCmdHeader header; |
||
1106 | SVGA3dCmdEndGBQuery q; |
||
1107 | } gb_cmd; |
||
1108 | |||
1109 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
||
1110 | |||
1111 | gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; |
||
1112 | gb_cmd.header.size = cmd->header.size; |
||
1113 | gb_cmd.q.cid = cmd->q.cid; |
||
1114 | gb_cmd.q.type = cmd->q.type; |
||
1115 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; |
||
1116 | gb_cmd.q.offset = cmd->q.guestResult.offset; |
||
1117 | |||
1118 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
||
1119 | return vmw_cmd_end_gb_query(dev_priv, sw_context, header); |
||
1120 | } |
||
1121 | |||
4075 | Serge | 1122 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1123 | if (unlikely(ret != 0)) |
||
1124 | return ret; |
||
1125 | |||
1126 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
||
1127 | &cmd->q.guestResult, |
||
1128 | &vmw_bo); |
||
1129 | if (unlikely(ret != 0)) |
||
1130 | return ret; |
||
1131 | |||
1132 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); |
||
1133 | |||
1134 | vmw_dmabuf_unreference(&vmw_bo); |
||
1135 | return ret; |
||
1136 | } |
||
1137 | |||
4569 | Serge | 1138 | /** |
1139 | * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command. |
||
1140 | * |
||
1141 | * @dev_priv: Pointer to a device private struct. |
||
1142 | * @sw_context: The software context used for this command submission. |
||
1143 | * @header: Pointer to the command header in the command stream. |
||
1144 | */ |
||
1145 | static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, |
||
1146 | struct vmw_sw_context *sw_context, |
||
1147 | SVGA3dCmdHeader *header) |
||
1148 | { |
||
1149 | struct vmw_dma_buffer *vmw_bo; |
||
1150 | struct vmw_query_cmd { |
||
1151 | SVGA3dCmdHeader header; |
||
1152 | SVGA3dCmdWaitForGBQuery q; |
||
1153 | } *cmd; |
||
1154 | int ret; |
||
1155 | |||
1156 | cmd = container_of(header, struct vmw_query_cmd, header); |
||
1157 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
||
1158 | if (unlikely(ret != 0)) |
||
1159 | return ret; |
||
1160 | |||
1161 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, |
||
1162 | &cmd->q.mobid, |
||
1163 | &vmw_bo); |
||
1164 | if (unlikely(ret != 0)) |
||
1165 | return ret; |
||
1166 | |||
1167 | vmw_dmabuf_unreference(&vmw_bo); |
||
1168 | return 0; |
||
1169 | } |
||
1170 | |||
1171 | /** |
||
4075 | Serge | 1172 | * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. |
1173 | * |
||
1174 | * @dev_priv: Pointer to a device private struct. |
||
1175 | * @sw_context: The software context used for this command submission. |
||
1176 | * @header: Pointer to the command header in the command stream. |
||
1177 | */ |
||
1178 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, |
||
1179 | struct vmw_sw_context *sw_context, |
||
1180 | SVGA3dCmdHeader *header) |
||
1181 | { |
||
1182 | struct vmw_dma_buffer *vmw_bo; |
||
1183 | struct vmw_query_cmd { |
||
1184 | SVGA3dCmdHeader header; |
||
1185 | SVGA3dCmdWaitForQuery q; |
||
1186 | } *cmd; |
||
1187 | int ret; |
||
1188 | |||
1189 | cmd = container_of(header, struct vmw_query_cmd, header); |
||
4569 | Serge | 1190 | if (dev_priv->has_mob) { |
1191 | struct { |
||
1192 | SVGA3dCmdHeader header; |
||
1193 | SVGA3dCmdWaitForGBQuery q; |
||
1194 | } gb_cmd; |
||
1195 | |||
1196 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
||
1197 | |||
1198 | gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; |
||
1199 | gb_cmd.header.size = cmd->header.size; |
||
1200 | gb_cmd.q.cid = cmd->q.cid; |
||
1201 | gb_cmd.q.type = cmd->q.type; |
||
1202 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; |
||
1203 | gb_cmd.q.offset = cmd->q.guestResult.offset; |
||
1204 | |||
1205 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
||
1206 | return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); |
||
1207 | } |
||
1208 | |||
4075 | Serge | 1209 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1210 | if (unlikely(ret != 0)) |
||
1211 | return ret; |
||
1212 | |||
1213 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
||
1214 | &cmd->q.guestResult, |
||
1215 | &vmw_bo); |
||
1216 | if (unlikely(ret != 0)) |
||
1217 | return ret; |
||
1218 | |||
1219 | vmw_dmabuf_unreference(&vmw_bo); |
||
1220 | return 0; |
||
1221 | } |
||
1222 | |||
1223 | static int vmw_cmd_dma(struct vmw_private *dev_priv, |
||
1224 | struct vmw_sw_context *sw_context, |
||
1225 | SVGA3dCmdHeader *header) |
||
1226 | { |
||
1227 | struct vmw_dma_buffer *vmw_bo = NULL; |
||
1228 | struct vmw_surface *srf = NULL; |
||
1229 | struct vmw_dma_cmd { |
||
1230 | SVGA3dCmdHeader header; |
||
1231 | SVGA3dCmdSurfaceDMA dma; |
||
1232 | } *cmd; |
||
1233 | int ret; |
||
5078 | serge | 1234 | SVGA3dCmdSurfaceDMASuffix *suffix; |
1235 | uint32_t bo_size; |
||
4075 | Serge | 1236 | |
1237 | cmd = container_of(header, struct vmw_dma_cmd, header); |
||
5078 | serge | 1238 | suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma + |
1239 | header->size - sizeof(*suffix)); |
||
1240 | |||
1241 | /* Make sure device and verifier stays in sync. */ |
||
1242 | if (unlikely(suffix->suffixSize != sizeof(*suffix))) { |
||
1243 | DRM_ERROR("Invalid DMA suffix size.\n"); |
||
1244 | return -EINVAL; |
||
1245 | } |
||
1246 | |||
4075 | Serge | 1247 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1248 | &cmd->dma.guest.ptr, |
||
1249 | &vmw_bo); |
||
1250 | if (unlikely(ret != 0)) |
||
1251 | return ret; |
||
1252 | |||
5078 | serge | 1253 | /* Make sure DMA doesn't cross BO boundaries. */ |
1254 | bo_size = vmw_bo->base.num_pages * PAGE_SIZE; |
||
1255 | if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) { |
||
1256 | DRM_ERROR("Invalid DMA offset.\n"); |
||
1257 | return -EINVAL; |
||
1258 | } |
||
1259 | |||
1260 | bo_size -= cmd->dma.guest.ptr.offset; |
||
1261 | if (unlikely(suffix->maximumOffset > bo_size)) |
||
1262 | suffix->maximumOffset = bo_size; |
||
1263 | |||
4075 | Serge | 1264 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1265 | user_surface_converter, &cmd->dma.host.sid, |
||
1266 | NULL); |
||
1267 | if (unlikely(ret != 0)) { |
||
1268 | if (unlikely(ret != -ERESTARTSYS)) |
||
1269 | DRM_ERROR("could not find surface for DMA.\n"); |
||
1270 | goto out_no_surface; |
||
1271 | } |
||
1272 | |||
1273 | srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); |
||
1274 | |||
1275 | // vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); |
||
1276 | |||
1277 | out_no_surface: |
||
1278 | vmw_dmabuf_unreference(&vmw_bo); |
||
1279 | return ret; |
||
1280 | } |
||
1281 | |||
1282 | static int vmw_cmd_draw(struct vmw_private *dev_priv, |
||
1283 | struct vmw_sw_context *sw_context, |
||
1284 | SVGA3dCmdHeader *header) |
||
1285 | { |
||
1286 | struct vmw_draw_cmd { |
||
1287 | SVGA3dCmdHeader header; |
||
1288 | SVGA3dCmdDrawPrimitives body; |
||
1289 | } *cmd; |
||
1290 | SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( |
||
1291 | (unsigned long)header + sizeof(*cmd)); |
||
1292 | SVGA3dPrimitiveRange *range; |
||
1293 | uint32_t i; |
||
1294 | uint32_t maxnum; |
||
1295 | int ret; |
||
1296 | |||
1297 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
||
1298 | if (unlikely(ret != 0)) |
||
1299 | return ret; |
||
1300 | |||
1301 | cmd = container_of(header, struct vmw_draw_cmd, header); |
||
1302 | maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); |
||
1303 | |||
1304 | if (unlikely(cmd->body.numVertexDecls > maxnum)) { |
||
1305 | DRM_ERROR("Illegal number of vertex declarations.\n"); |
||
1306 | return -EINVAL; |
||
1307 | } |
||
1308 | |||
1309 | for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { |
||
1310 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1311 | user_surface_converter, |
||
1312 | &decl->array.surfaceId, NULL); |
||
1313 | if (unlikely(ret != 0)) |
||
1314 | return ret; |
||
1315 | } |
||
1316 | |||
1317 | maxnum = (header->size - sizeof(cmd->body) - |
||
1318 | cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); |
||
1319 | if (unlikely(cmd->body.numRanges > maxnum)) { |
||
1320 | DRM_ERROR("Illegal number of index ranges.\n"); |
||
1321 | return -EINVAL; |
||
1322 | } |
||
1323 | |||
1324 | range = (SVGA3dPrimitiveRange *) decl; |
||
1325 | for (i = 0; i < cmd->body.numRanges; ++i, ++range) { |
||
1326 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1327 | user_surface_converter, |
||
1328 | &range->indexArray.surfaceId, NULL); |
||
1329 | if (unlikely(ret != 0)) |
||
1330 | return ret; |
||
1331 | } |
||
1332 | return 0; |
||
1333 | } |
||
1334 | |||
1335 | |||
1336 | static int vmw_cmd_tex_state(struct vmw_private *dev_priv, |
||
1337 | struct vmw_sw_context *sw_context, |
||
1338 | SVGA3dCmdHeader *header) |
||
1339 | { |
||
1340 | struct vmw_tex_state_cmd { |
||
1341 | SVGA3dCmdHeader header; |
||
1342 | SVGA3dCmdSetTextureState state; |
||
4569 | Serge | 1343 | } *cmd; |
4075 | Serge | 1344 | |
1345 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) |
||
1346 | ((unsigned long) header + header->size + sizeof(header)); |
||
1347 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) |
||
1348 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); |
||
4569 | Serge | 1349 | struct vmw_resource_val_node *ctx_node; |
1350 | struct vmw_resource_val_node *res_node; |
||
4075 | Serge | 1351 | int ret; |
1352 | |||
4569 | Serge | 1353 | cmd = container_of(header, struct vmw_tex_state_cmd, |
1354 | header); |
||
1355 | |||
1356 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
||
1357 | user_context_converter, &cmd->state.cid, |
||
1358 | &ctx_node); |
||
4075 | Serge | 1359 | if (unlikely(ret != 0)) |
1360 | return ret; |
||
1361 | |||
1362 | for (; cur_state < last_state; ++cur_state) { |
||
1363 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) |
||
1364 | continue; |
||
1365 | |||
1366 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1367 | user_surface_converter, |
||
4569 | Serge | 1368 | &cur_state->value, &res_node); |
4075 | Serge | 1369 | if (unlikely(ret != 0)) |
1370 | return ret; |
||
4569 | Serge | 1371 | |
1372 | if (dev_priv->has_mob) { |
||
1373 | struct vmw_ctx_bindinfo bi; |
||
1374 | |||
1375 | bi.ctx = ctx_node->res; |
||
1376 | bi.res = res_node ? res_node->res : NULL; |
||
1377 | bi.bt = vmw_ctx_binding_tex; |
||
1378 | bi.i1.texture_stage = cur_state->stage; |
||
1379 | vmw_context_binding_add(ctx_node->staged_bindings, |
||
1380 | &bi); |
||
1381 | } |
||
4075 | Serge | 1382 | } |
1383 | |||
1384 | return 0; |
||
1385 | } |
||
1386 | |||
1387 | static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, |
||
1388 | struct vmw_sw_context *sw_context, |
||
1389 | void *buf) |
||
1390 | { |
||
1391 | struct vmw_dma_buffer *vmw_bo; |
||
1392 | int ret; |
||
1393 | |||
1394 | struct { |
||
1395 | uint32_t header; |
||
1396 | SVGAFifoCmdDefineGMRFB body; |
||
1397 | } *cmd = buf; |
||
1398 | |||
1399 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
||
1400 | &cmd->body.ptr, |
||
1401 | &vmw_bo); |
||
1402 | if (unlikely(ret != 0)) |
||
1403 | return ret; |
||
1404 | |||
1405 | vmw_dmabuf_unreference(&vmw_bo); |
||
1406 | |||
1407 | return ret; |
||
1408 | } |
||
1409 | |||
1410 | /** |
||
4569 | Serge | 1411 | * vmw_cmd_switch_backup - Utility function to handle backup buffer switching |
1412 | * |
||
1413 | * @dev_priv: Pointer to a device private struct. |
||
1414 | * @sw_context: The software context being used for this batch. |
||
1415 | * @res_type: The resource type. |
||
1416 | * @converter: Information about user-space binding for this resource type. |
||
1417 | * @res_id: Pointer to the user-space resource handle in the command stream. |
||
1418 | * @buf_id: Pointer to the user-space backup buffer handle in the command |
||
1419 | * stream. |
||
1420 | * @backup_offset: Offset of backup into MOB. |
||
1421 | * |
||
1422 | * This function prepares for registering a switch of backup buffers |
||
1423 | * in the resource metadata just prior to unreserving. |
||
1424 | */ |
||
1425 | static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, |
||
1426 | struct vmw_sw_context *sw_context, |
||
1427 | enum vmw_res_type res_type, |
||
1428 | const struct vmw_user_resource_conv |
||
1429 | *converter, |
||
1430 | uint32_t *res_id, |
||
1431 | uint32_t *buf_id, |
||
1432 | unsigned long backup_offset) |
||
1433 | { |
||
1434 | int ret; |
||
1435 | struct vmw_dma_buffer *dma_buf; |
||
1436 | struct vmw_resource_val_node *val_node; |
||
1437 | |||
1438 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, |
||
1439 | converter, res_id, &val_node); |
||
1440 | if (unlikely(ret != 0)) |
||
1441 | return ret; |
||
1442 | |||
1443 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); |
||
1444 | if (unlikely(ret != 0)) |
||
1445 | return ret; |
||
1446 | |||
1447 | if (val_node->first_usage) |
||
1448 | val_node->no_buffer_needed = true; |
||
1449 | |||
1450 | vmw_dmabuf_unreference(&val_node->new_backup); |
||
1451 | val_node->new_backup = dma_buf; |
||
1452 | val_node->new_backup_offset = backup_offset; |
||
1453 | |||
1454 | return 0; |
||
1455 | } |
||
1456 | |||
1457 | /** |
||
1458 | * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE |
||
1459 | * command |
||
1460 | * |
||
1461 | * @dev_priv: Pointer to a device private struct. |
||
1462 | * @sw_context: The software context being used for this batch. |
||
1463 | * @header: Pointer to the command header in the command stream. |
||
1464 | */ |
||
1465 | static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, |
||
1466 | struct vmw_sw_context *sw_context, |
||
1467 | SVGA3dCmdHeader *header) |
||
1468 | { |
||
1469 | struct vmw_bind_gb_surface_cmd { |
||
1470 | SVGA3dCmdHeader header; |
||
1471 | SVGA3dCmdBindGBSurface body; |
||
1472 | } *cmd; |
||
1473 | |||
1474 | cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header); |
||
1475 | |||
1476 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, |
||
1477 | user_surface_converter, |
||
1478 | &cmd->body.sid, &cmd->body.mobid, |
||
1479 | 0); |
||
1480 | } |
||
1481 | |||
1482 | /** |
||
1483 | * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE |
||
1484 | * command |
||
1485 | * |
||
1486 | * @dev_priv: Pointer to a device private struct. |
||
1487 | * @sw_context: The software context being used for this batch. |
||
1488 | * @header: Pointer to the command header in the command stream. |
||
1489 | */ |
||
1490 | static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, |
||
1491 | struct vmw_sw_context *sw_context, |
||
1492 | SVGA3dCmdHeader *header) |
||
1493 | { |
||
1494 | struct vmw_gb_surface_cmd { |
||
1495 | SVGA3dCmdHeader header; |
||
1496 | SVGA3dCmdUpdateGBImage body; |
||
1497 | } *cmd; |
||
1498 | |||
1499 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
||
1500 | |||
1501 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1502 | user_surface_converter, |
||
1503 | &cmd->body.image.sid, NULL); |
||
1504 | } |
||
1505 | |||
1506 | /** |
||
1507 | * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE |
||
1508 | * command |
||
1509 | * |
||
1510 | * @dev_priv: Pointer to a device private struct. |
||
1511 | * @sw_context: The software context being used for this batch. |
||
1512 | * @header: Pointer to the command header in the command stream. |
||
1513 | */ |
||
1514 | static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, |
||
1515 | struct vmw_sw_context *sw_context, |
||
1516 | SVGA3dCmdHeader *header) |
||
1517 | { |
||
1518 | struct vmw_gb_surface_cmd { |
||
1519 | SVGA3dCmdHeader header; |
||
1520 | SVGA3dCmdUpdateGBSurface body; |
||
1521 | } *cmd; |
||
1522 | |||
1523 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
||
1524 | |||
1525 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1526 | user_surface_converter, |
||
1527 | &cmd->body.sid, NULL); |
||
1528 | } |
||
1529 | |||
1530 | /** |
||
1531 | * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE |
||
1532 | * command |
||
1533 | * |
||
1534 | * @dev_priv: Pointer to a device private struct. |
||
1535 | * @sw_context: The software context being used for this batch. |
||
1536 | * @header: Pointer to the command header in the command stream. |
||
1537 | */ |
||
1538 | static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, |
||
1539 | struct vmw_sw_context *sw_context, |
||
1540 | SVGA3dCmdHeader *header) |
||
1541 | { |
||
1542 | struct vmw_gb_surface_cmd { |
||
1543 | SVGA3dCmdHeader header; |
||
1544 | SVGA3dCmdReadbackGBImage body; |
||
1545 | } *cmd; |
||
1546 | |||
1547 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
||
1548 | |||
1549 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1550 | user_surface_converter, |
||
1551 | &cmd->body.image.sid, NULL); |
||
1552 | } |
||
1553 | |||
1554 | /** |
||
1555 | * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE |
||
1556 | * command |
||
1557 | * |
||
1558 | * @dev_priv: Pointer to a device private struct. |
||
1559 | * @sw_context: The software context being used for this batch. |
||
1560 | * @header: Pointer to the command header in the command stream. |
||
1561 | */ |
||
1562 | static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, |
||
1563 | struct vmw_sw_context *sw_context, |
||
1564 | SVGA3dCmdHeader *header) |
||
1565 | { |
||
1566 | struct vmw_gb_surface_cmd { |
||
1567 | SVGA3dCmdHeader header; |
||
1568 | SVGA3dCmdReadbackGBSurface body; |
||
1569 | } *cmd; |
||
1570 | |||
1571 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
||
1572 | |||
1573 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1574 | user_surface_converter, |
||
1575 | &cmd->body.sid, NULL); |
||
1576 | } |
||
1577 | |||
1578 | /** |
||
1579 | * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE |
||
1580 | * command |
||
1581 | * |
||
1582 | * @dev_priv: Pointer to a device private struct. |
||
1583 | * @sw_context: The software context being used for this batch. |
||
1584 | * @header: Pointer to the command header in the command stream. |
||
1585 | */ |
||
1586 | static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, |
||
1587 | struct vmw_sw_context *sw_context, |
||
1588 | SVGA3dCmdHeader *header) |
||
1589 | { |
||
1590 | struct vmw_gb_surface_cmd { |
||
1591 | SVGA3dCmdHeader header; |
||
1592 | SVGA3dCmdInvalidateGBImage body; |
||
1593 | } *cmd; |
||
1594 | |||
1595 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
||
1596 | |||
1597 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1598 | user_surface_converter, |
||
1599 | &cmd->body.image.sid, NULL); |
||
1600 | } |
||
1601 | |||
1602 | /** |
||
1603 | * vmw_cmd_invalidate_gb_surface - Validate an |
||
1604 | * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command |
||
1605 | * |
||
1606 | * @dev_priv: Pointer to a device private struct. |
||
1607 | * @sw_context: The software context being used for this batch. |
||
1608 | * @header: Pointer to the command header in the command stream. |
||
1609 | */ |
||
1610 | static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, |
||
1611 | struct vmw_sw_context *sw_context, |
||
1612 | SVGA3dCmdHeader *header) |
||
1613 | { |
||
1614 | struct vmw_gb_surface_cmd { |
||
1615 | SVGA3dCmdHeader header; |
||
1616 | SVGA3dCmdInvalidateGBSurface body; |
||
1617 | } *cmd; |
||
1618 | |||
1619 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
||
1620 | |||
1621 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
||
1622 | user_surface_converter, |
||
1623 | &cmd->body.sid, NULL); |
||
1624 | } |
||
1625 | |||
5078 | serge | 1626 | #if 0 |
4569 | Serge | 1627 | /** |
4075 | Serge | 1628 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER |
1629 | * command |
||
1630 | * |
||
1631 | * @dev_priv: Pointer to a device private struct. |
||
1632 | * @sw_context: The software context being used for this batch. |
||
1633 | * @header: Pointer to the command header in the command stream. |
||
1634 | */ |
||
1635 | static int vmw_cmd_set_shader(struct vmw_private *dev_priv, |
||
1636 | struct vmw_sw_context *sw_context, |
||
1637 | SVGA3dCmdHeader *header) |
||
1638 | { |
||
1639 | struct vmw_set_shader_cmd { |
||
1640 | SVGA3dCmdHeader header; |
||
1641 | SVGA3dCmdSetShader body; |
||
1642 | } *cmd; |
||
5078 | serge | 1643 | struct vmw_resource_val_node *ctx_node, *res_node = NULL; |
1644 | struct vmw_ctx_bindinfo bi; |
||
1645 | struct vmw_resource *res = NULL; |
||
4075 | Serge | 1646 | int ret; |
1647 | |||
1648 | cmd = container_of(header, struct vmw_set_shader_cmd, |
||
1649 | header); |
||
1650 | |||
4569 | Serge | 1651 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1652 | user_context_converter, &cmd->body.cid, |
||
1653 | &ctx_node); |
||
4075 | Serge | 1654 | if (unlikely(ret != 0)) |
1655 | return ret; |
||
1656 | |||
5078 | serge | 1657 | if (!dev_priv->has_mob) |
1658 | return 0; |
||
4569 | Serge | 1659 | |
5078 | serge | 1660 | if (cmd->body.shid != SVGA3D_INVALID_ID) { |
1661 | res = vmw_compat_shader_lookup |
||
1662 | (vmw_context_res_man(ctx_node->res), |
||
1663 | cmd->body.shid, |
||
1664 | cmd->body.type); |
||
1665 | |||
1666 | if (!IS_ERR(res)) { |
||
1667 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, |
||
1668 | vmw_res_shader, |
||
1669 | &cmd->body.shid, res, |
||
1670 | &res_node); |
||
1671 | vmw_resource_unreference(&res); |
||
1672 | if (unlikely(ret != 0)) |
||
1673 | return ret; |
||
1674 | } |
||
1675 | } |
||
1676 | |||
1677 | if (!res_node) { |
||
1678 | ret = vmw_cmd_res_check(dev_priv, sw_context, |
||
1679 | vmw_res_shader, |
||
4569 | Serge | 1680 | user_shader_converter, |
1681 | &cmd->body.shid, &res_node); |
||
1682 | if (unlikely(ret != 0)) |
||
1683 | return ret; |
||
5078 | serge | 1684 | } |
4569 | Serge | 1685 | |
1686 | bi.ctx = ctx_node->res; |
||
1687 | bi.res = res_node ? res_node->res : NULL; |
||
1688 | bi.bt = vmw_ctx_binding_shader; |
||
1689 | bi.i1.shader_type = cmd->body.type; |
||
1690 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); |
||
5078 | serge | 1691 | } |
1692 | #endif |
||
4569 | Serge | 1693 | |
5078 | serge | 1694 | /** |
1695 | * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST |
||
1696 | * command |
||
1697 | * |
||
1698 | * @dev_priv: Pointer to a device private struct. |
||
1699 | * @sw_context: The software context being used for this batch. |
||
1700 | * @header: Pointer to the command header in the command stream. |
||
1701 | */ |
||
1702 | static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, |
||
1703 | struct vmw_sw_context *sw_context, |
||
1704 | SVGA3dCmdHeader *header) |
||
1705 | { |
||
1706 | struct vmw_set_shader_const_cmd { |
||
1707 | SVGA3dCmdHeader header; |
||
1708 | SVGA3dCmdSetShaderConst body; |
||
1709 | } *cmd; |
||
1710 | int ret; |
||
1711 | |||
1712 | cmd = container_of(header, struct vmw_set_shader_const_cmd, |
||
1713 | header); |
||
1714 | |||
1715 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
||
1716 | user_context_converter, &cmd->body.cid, |
||
1717 | NULL); |
||
1718 | if (unlikely(ret != 0)) |
||
1719 | return ret; |
||
1720 | |||
1721 | if (dev_priv->has_mob) |
||
1722 | header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; |
||
1723 | |||
4075 | Serge | 1724 | return 0; |
1725 | } |
||
1726 | |||
5078 | serge | 1727 | #if 0 |
4569 | Serge | 1728 | /** |
1729 | * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER |
||
1730 | * command |
||
1731 | * |
||
1732 | * @dev_priv: Pointer to a device private struct. |
||
1733 | * @sw_context: The software context being used for this batch. |
||
1734 | * @header: Pointer to the command header in the command stream. |
||
1735 | */ |
||
1736 | static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, |
||
1737 | struct vmw_sw_context *sw_context, |
||
1738 | SVGA3dCmdHeader *header) |
||
1739 | { |
||
1740 | struct vmw_bind_gb_shader_cmd { |
||
1741 | SVGA3dCmdHeader header; |
||
1742 | SVGA3dCmdBindGBShader body; |
||
1743 | } *cmd; |
||
1744 | |||
1745 | cmd = container_of(header, struct vmw_bind_gb_shader_cmd, |
||
1746 | header); |
||
1747 | |||
1748 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, |
||
1749 | user_shader_converter, |
||
1750 | &cmd->body.shid, &cmd->body.mobid, |
||
1751 | cmd->body.offsetInBytes); |
||
1752 | } |
||
5078 | serge | 1753 | #endif |
4569 | Serge | 1754 | |
4075 | Serge | 1755 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
1756 | struct vmw_sw_context *sw_context, |
||
1757 | void *buf, uint32_t *size) |
||
1758 | { |
||
1759 | uint32_t size_remaining = *size; |
||
1760 | uint32_t cmd_id; |
||
1761 | |||
1762 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
||
1763 | switch (cmd_id) { |
||
1764 | case SVGA_CMD_UPDATE: |
||
1765 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); |
||
1766 | break; |
||
1767 | case SVGA_CMD_DEFINE_GMRFB: |
||
1768 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); |
||
1769 | break; |
||
1770 | case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: |
||
1771 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
||
1772 | break; |
||
1773 | case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: |
||
1774 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
||
1775 | break; |
||
1776 | default: |
||
1777 | DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id); |
||
1778 | return -EINVAL; |
||
1779 | } |
||
1780 | |||
1781 | if (*size > size_remaining) { |
||
1782 | DRM_ERROR("Invalid SVGA command (size mismatch):" |
||
1783 | " %u.\n", cmd_id); |
||
1784 | return -EINVAL; |
||
1785 | } |
||
1786 | |||
1787 | if (unlikely(!sw_context->kernel)) { |
||
1788 | DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id); |
||
1789 | return -EPERM; |
||
1790 | } |
||
1791 | |||
1792 | if (cmd_id == SVGA_CMD_DEFINE_GMRFB) |
||
1793 | return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); |
||
1794 | |||
1795 | return 0; |
||
1796 | } |
||
1797 | |||
5078 | serge | 1798 | static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { |
4569 | Serge | 1799 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, |
1800 | false, false, false), |
||
1801 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, |
||
1802 | false, false, false), |
||
1803 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, |
||
1804 | true, false, false), |
||
1805 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, |
||
1806 | true, false, false), |
||
1807 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, |
||
1808 | true, false, false), |
||
1809 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, |
||
1810 | false, false, false), |
||
1811 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, |
||
1812 | false, false, false), |
||
1813 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, |
||
1814 | true, false, false), |
||
1815 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, |
||
1816 | true, false, false), |
||
1817 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, |
||
1818 | true, false, false), |
||
4075 | Serge | 1819 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, |
4569 | Serge | 1820 | &vmw_cmd_set_render_target_check, true, false, false), |
1821 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, |
||
1822 | true, false, false), |
||
1823 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, |
||
1824 | true, false, false), |
||
1825 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, |
||
1826 | true, false, false), |
||
1827 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, |
||
1828 | true, false, false), |
||
1829 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, |
||
1830 | true, false, false), |
||
1831 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, |
||
1832 | true, false, false), |
||
1833 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, |
||
1834 | true, false, false), |
||
1835 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, |
||
1836 | false, false, false), |
||
5078 | serge | 1837 | // VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, |
1838 | // true, false, false), |
||
1839 | // VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, |
||
1840 | // true, false, false), |
||
1841 | // VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, |
||
1842 | // true, false, false), |
||
1843 | // VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, |
||
1844 | // true, false, false), |
||
4569 | Serge | 1845 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, |
1846 | true, false, false), |
||
1847 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, |
||
1848 | true, false, false), |
||
1849 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, |
||
1850 | true, false, false), |
||
1851 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, |
||
1852 | true, false, false), |
||
1853 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, |
||
1854 | true, false, false), |
||
1855 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, |
||
1856 | true, false, false), |
||
4075 | Serge | 1857 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
4569 | Serge | 1858 | &vmw_cmd_blt_surf_screen_check, false, false, false), |
1859 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, |
||
1860 | false, false, false), |
||
1861 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, |
||
1862 | false, false, false), |
||
1863 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, |
||
1864 | false, false, false), |
||
1865 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, |
||
1866 | false, false, false), |
||
1867 | VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, |
||
1868 | false, false, false), |
||
1869 | VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid, |
||
1870 | false, false, false), |
||
1871 | VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid, |
||
1872 | false, false, false), |
||
1873 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, |
||
1874 | false, false, false), |
||
1875 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid, |
||
1876 | false, false, false), |
||
1877 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid, |
||
1878 | false, false, false), |
||
1879 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid, |
||
1880 | false, false, false), |
||
1881 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid, |
||
1882 | false, false, false), |
||
1883 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid, |
||
1884 | false, false, false), |
||
1885 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, |
||
1886 | false, false, true), |
||
1887 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, |
||
1888 | false, false, true), |
||
1889 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, |
||
1890 | false, false, true), |
||
1891 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, |
||
1892 | false, false, true), |
||
1893 | VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid, |
||
1894 | false, false, true), |
||
1895 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, |
||
1896 | false, false, true), |
||
1897 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, |
||
1898 | false, false, true), |
||
1899 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, |
||
1900 | false, false, true), |
||
1901 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, |
||
1902 | true, false, true), |
||
1903 | VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, |
||
1904 | false, false, true), |
||
1905 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, |
||
1906 | true, false, true), |
||
1907 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, |
||
1908 | &vmw_cmd_update_gb_surface, true, false, true), |
||
1909 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, |
||
1910 | &vmw_cmd_readback_gb_image, true, false, true), |
||
1911 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, |
||
1912 | &vmw_cmd_readback_gb_surface, true, false, true), |
||
1913 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, |
||
1914 | &vmw_cmd_invalidate_gb_image, true, false, true), |
||
1915 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, |
||
1916 | &vmw_cmd_invalidate_gb_surface, true, false, true), |
||
1917 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, |
||
1918 | false, false, true), |
||
1919 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, |
||
1920 | false, false, true), |
||
1921 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, |
||
1922 | false, false, true), |
||
1923 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, |
||
1924 | false, false, true), |
||
1925 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, |
||
1926 | false, false, true), |
||
1927 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, |
||
1928 | false, false, true), |
||
5078 | serge | 1929 | // VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, |
1930 | // true, false, true), |
||
4569 | Serge | 1931 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, |
1932 | false, false, true), |
||
1933 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, |
||
1934 | false, false, false), |
||
1935 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, |
||
1936 | true, false, true), |
||
1937 | VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, |
||
1938 | true, false, true), |
||
1939 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, |
||
1940 | true, false, true), |
||
1941 | VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, |
||
1942 | true, false, true), |
||
1943 | VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, |
||
1944 | false, false, true), |
||
1945 | VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, |
||
1946 | false, false, true), |
||
1947 | VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, |
||
1948 | false, false, true), |
||
1949 | VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, |
||
1950 | false, false, true), |
||
1951 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, |
||
1952 | false, false, true), |
||
1953 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, |
||
1954 | false, false, true), |
||
1955 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, |
||
1956 | false, false, true), |
||
1957 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, |
||
1958 | false, false, true), |
||
1959 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
||
1960 | false, false, true), |
||
1961 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
||
1962 | false, false, true), |
||
1963 | VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, |
||
1964 | true, false, true) |
||
4075 | Serge | 1965 | }; |
1966 | |||
1967 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
||
1968 | struct vmw_sw_context *sw_context, |
||
1969 | void *buf, uint32_t *size) |
||
1970 | { |
||
1971 | uint32_t cmd_id; |
||
1972 | uint32_t size_remaining = *size; |
||
1973 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; |
||
1974 | int ret; |
||
4569 | Serge | 1975 | const struct vmw_cmd_entry *entry; |
1976 | bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; |
||
4075 | Serge | 1977 | |
1978 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
||
1979 | /* Handle any none 3D commands */ |
||
1980 | if (unlikely(cmd_id < SVGA_CMD_MAX)) |
||
1981 | return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); |
||
1982 | |||
1983 | |||
1984 | cmd_id = le32_to_cpu(header->id); |
||
1985 | *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); |
||
1986 | |||
1987 | cmd_id -= SVGA_3D_CMD_BASE; |
||
1988 | if (unlikely(*size > size_remaining)) |
||
4569 | Serge | 1989 | goto out_invalid; |
4075 | Serge | 1990 | |
1991 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) |
||
4569 | Serge | 1992 | goto out_invalid; |
4075 | Serge | 1993 | |
4569 | Serge | 1994 | entry = &vmw_cmd_entries[cmd_id]; |
5078 | serge | 1995 | if (unlikely(!entry->func)) |
1996 | goto out_invalid; |
||
1997 | |||
4569 | Serge | 1998 | if (unlikely(!entry->user_allow && !sw_context->kernel)) |
1999 | goto out_privileged; |
||
2000 | |||
2001 | if (unlikely(entry->gb_disable && gb)) |
||
2002 | goto out_old; |
||
2003 | |||
2004 | if (unlikely(entry->gb_enable && !gb)) |
||
2005 | goto out_new; |
||
2006 | |||
2007 | ret = entry->func(dev_priv, sw_context, header); |
||
4075 | Serge | 2008 | if (unlikely(ret != 0)) |
4569 | Serge | 2009 | goto out_invalid; |
4075 | Serge | 2010 | |
2011 | return 0; |
||
4569 | Serge | 2012 | out_invalid: |
2013 | DRM_ERROR("Invalid SVGA3D command: %d\n", |
||
4075 | Serge | 2014 | cmd_id + SVGA_3D_CMD_BASE); |
2015 | return -EINVAL; |
||
4569 | Serge | 2016 | out_privileged: |
2017 | DRM_ERROR("Privileged SVGA3D command: %d\n", |
||
2018 | cmd_id + SVGA_3D_CMD_BASE); |
||
2019 | return -EPERM; |
||
2020 | out_old: |
||
2021 | DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n", |
||
2022 | cmd_id + SVGA_3D_CMD_BASE); |
||
2023 | return -EINVAL; |
||
2024 | out_new: |
||
2025 | DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n", |
||
2026 | cmd_id + SVGA_3D_CMD_BASE); |
||
2027 | return -EINVAL; |
||
4075 | Serge | 2028 | } |
2029 | |||
2030 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, |
||
2031 | struct vmw_sw_context *sw_context, |
||
2032 | void *buf, |
||
2033 | uint32_t size) |
||
2034 | { |
||
2035 | int32_t cur_size = size; |
||
2036 | int ret; |
||
2037 | |||
2038 | sw_context->buf_start = buf; |
||
2039 | |||
2040 | while (cur_size > 0) { |
||
2041 | size = cur_size; |
||
2042 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); |
||
2043 | if (unlikely(ret != 0)) |
||
2044 | return ret; |
||
2045 | buf = (void *)((unsigned long) buf + size); |
||
2046 | cur_size -= size; |
||
2047 | } |
||
2048 | |||
2049 | if (unlikely(cur_size != 0)) { |
||
2050 | DRM_ERROR("Command verifier out of sync.\n"); |
||
2051 | return -EINVAL; |
||
2052 | } |
||
2053 | |||
2054 | return 0; |
||
2055 | } |
||
2056 | |||
2057 | static void vmw_free_relocations(struct vmw_sw_context *sw_context) |
||
2058 | { |
||
2059 | sw_context->cur_reloc = 0; |
||
2060 | } |
||
2061 | |||
2062 | static void vmw_apply_relocations(struct vmw_sw_context *sw_context) |
||
2063 | { |
||
2064 | uint32_t i; |
||
2065 | struct vmw_relocation *reloc; |
||
2066 | struct ttm_validate_buffer *validate; |
||
2067 | struct ttm_buffer_object *bo; |
||
2068 | |||
2069 | for (i = 0; i < sw_context->cur_reloc; ++i) { |
||
2070 | reloc = &sw_context->relocs[i]; |
||
2071 | validate = &sw_context->val_bufs[reloc->index].base; |
||
2072 | bo = validate->bo; |
||
2073 | switch (bo->mem.mem_type) { |
||
2074 | case TTM_PL_VRAM: |
||
2075 | reloc->location->offset += bo->offset; |
||
2076 | reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; |
||
2077 | break; |
||
2078 | case VMW_PL_GMR: |
||
2079 | reloc->location->gmrId = bo->mem.start; |
||
2080 | break; |
||
4569 | Serge | 2081 | case VMW_PL_MOB: |
2082 | *reloc->mob_loc = bo->mem.start; |
||
2083 | break; |
||
4075 | Serge | 2084 | default: |
2085 | BUG(); |
||
2086 | } |
||
2087 | } |
||
2088 | vmw_free_relocations(sw_context); |
||
2089 | } |
||
2090 | |||
2091 | /** |
||
2092 | * vmw_resource_list_unrefererence - Free up a resource list and unreference |
||
2093 | * all resources referenced by it. |
||
2094 | * |
||
2095 | * @list: The resource list. |
||
2096 | */ |
||
2097 | static void vmw_resource_list_unreference(struct list_head *list) |
||
2098 | { |
||
2099 | struct vmw_resource_val_node *val, *val_next; |
||
2100 | |||
2101 | /* |
||
2102 | * Drop references to resources held during command submission. |
||
2103 | */ |
||
2104 | |||
2105 | list_for_each_entry_safe(val, val_next, list, head) { |
||
2106 | list_del_init(&val->head); |
||
2107 | vmw_resource_unreference(&val->res); |
||
4569 | Serge | 2108 | if (unlikely(val->staged_bindings)) |
2109 | kfree(val->staged_bindings); |
||
4075 | Serge | 2110 | kfree(val); |
2111 | } |
||
2112 | } |
||
2113 | |||
2114 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) |
||
2115 | { |
||
2116 | struct vmw_validate_buffer *entry, *next; |
||
2117 | struct vmw_resource_val_node *val; |
||
2118 | |||
2119 | /* |
||
2120 | * Drop references to DMA buffers held during command submission. |
||
2121 | */ |
||
2122 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, |
||
2123 | base.head) { |
||
2124 | list_del(&entry->base.head); |
||
2125 | ttm_bo_unref(&entry->base.bo); |
||
2126 | (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash); |
||
2127 | sw_context->cur_val_buf--; |
||
2128 | } |
||
2129 | BUG_ON(sw_context->cur_val_buf != 0); |
||
2130 | |||
2131 | list_for_each_entry(val, &sw_context->resource_list, head) |
||
2132 | (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); |
||
2133 | } |
||
2134 | |||
2135 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
||
4569 | Serge | 2136 | struct ttm_buffer_object *bo, |
2137 | bool validate_as_mob) |
||
4075 | Serge | 2138 | { |
2139 | int ret; |
||
2140 | |||
2141 | |||
2142 | /* |
||
2143 | * Don't validate pinned buffers. |
||
2144 | */ |
||
2145 | |||
2146 | if (bo == dev_priv->pinned_bo || |
||
2147 | (bo == dev_priv->dummy_query_bo && |
||
2148 | dev_priv->dummy_query_bo_pinned)) |
||
2149 | return 0; |
||
2150 | |||
4569 | Serge | 2151 | if (validate_as_mob) |
2152 | return ttm_bo_validate(bo, &vmw_mob_placement, true, false); |
||
2153 | |||
4075 | Serge | 2154 | /** |
2155 | * Put BO in VRAM if there is space, otherwise as a GMR. |
||
2156 | * If there is no space in VRAM and GMR ids are all used up, |
||
2157 | * start evicting GMRs to make room. If the DMA buffer can't be |
||
2158 | * used as a GMR, this will return -ENOMEM. |
||
2159 | */ |
||
2160 | |||
2161 | ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false); |
||
2162 | if (likely(ret == 0 || ret == -ERESTARTSYS)) |
||
2163 | return ret; |
||
2164 | |||
2165 | /** |
||
2166 | * If that failed, try VRAM again, this time evicting |
||
2167 | * previous contents. |
||
2168 | */ |
||
2169 | |||
2170 | DRM_INFO("Falling through to VRAM.\n"); |
||
2171 | ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); |
||
2172 | return ret; |
||
2173 | } |
||
2174 | |||
2175 | static int vmw_validate_buffers(struct vmw_private *dev_priv, |
||
2176 | struct vmw_sw_context *sw_context) |
||
2177 | { |
||
2178 | struct vmw_validate_buffer *entry; |
||
2179 | int ret; |
||
2180 | |||
2181 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { |
||
4569 | Serge | 2182 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, |
2183 | entry->validate_as_mob); |
||
4075 | Serge | 2184 | if (unlikely(ret != 0)) |
2185 | return ret; |
||
2186 | } |
||
2187 | return 0; |
||
2188 | } |
||
2189 | |||
2190 | static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, |
||
2191 | uint32_t size) |
||
2192 | { |
||
2193 | if (likely(sw_context->cmd_bounce_size >= size)) |
||
2194 | return 0; |
||
2195 | |||
2196 | if (sw_context->cmd_bounce_size == 0) |
||
2197 | sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; |
||
2198 | |||
2199 | while (sw_context->cmd_bounce_size < size) { |
||
2200 | sw_context->cmd_bounce_size = |
||
2201 | PAGE_ALIGN(sw_context->cmd_bounce_size + |
||
2202 | (sw_context->cmd_bounce_size >> 1)); |
||
2203 | } |
||
2204 | |||
2205 | if (sw_context->cmd_bounce != NULL) |
||
2206 | vfree(sw_context->cmd_bounce); |
||
2207 | |||
4569 | Serge | 2208 | sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); |
4075 | Serge | 2209 | |
2210 | if (sw_context->cmd_bounce == NULL) { |
||
2211 | DRM_ERROR("Failed to allocate command bounce buffer.\n"); |
||
2212 | sw_context->cmd_bounce_size = 0; |
||
2213 | return -ENOMEM; |
||
2214 | } |
||
2215 | |||
2216 | return 0; |
||
2217 | } |
||
2218 | |||
2219 | /** |
||
2220 | * vmw_execbuf_fence_commands - create and submit a command stream fence |
||
2221 | * |
||
2222 | * Creates a fence object and submits a command stream marker. |
||
2223 | * If this fails for some reason, We sync the fifo and return NULL. |
||
2224 | * It is then safe to fence buffers with a NULL pointer. |
||
2225 | * |
||
2226 | * If @p_handle is not NULL @file_priv must also not be NULL. Creates |
||
2227 | * a userspace handle if @p_handle is not NULL, otherwise not. |
||
2228 | */ |
||
2229 | |||
2230 | int vmw_execbuf_fence_commands(struct drm_file *file_priv, |
||
2231 | struct vmw_private *dev_priv, |
||
2232 | struct vmw_fence_obj **p_fence, |
||
2233 | uint32_t *p_handle) |
||
2234 | { |
||
2235 | uint32_t sequence; |
||
2236 | int ret; |
||
2237 | bool synced = false; |
||
2238 | |||
2239 | /* p_handle implies file_priv. */ |
||
2240 | BUG_ON(p_handle != NULL && file_priv == NULL); |
||
2241 | |||
2242 | ret = vmw_fifo_send_fence(dev_priv, &sequence); |
||
2243 | if (unlikely(ret != 0)) { |
||
2244 | DRM_ERROR("Fence submission error. Syncing.\n"); |
||
2245 | synced = true; |
||
2246 | } |
||
2247 | |||
2248 | if (p_handle != NULL) |
||
2249 | ret = vmw_user_fence_create(file_priv, dev_priv->fman, |
||
2250 | sequence, |
||
2251 | DRM_VMW_FENCE_FLAG_EXEC, |
||
2252 | p_fence, p_handle); |
||
2253 | else |
||
2254 | ret = vmw_fence_create(dev_priv->fman, sequence, |
||
2255 | DRM_VMW_FENCE_FLAG_EXEC, |
||
2256 | p_fence); |
||
2257 | |||
2258 | if (unlikely(ret != 0 && !synced)) { |
||
2259 | (void) vmw_fallback_wait(dev_priv, false, false, |
||
2260 | sequence, false, |
||
2261 | VMW_FENCE_WAIT_TIMEOUT); |
||
2262 | *p_fence = NULL; |
||
2263 | } |
||
2264 | |||
2265 | return 0; |
||
2266 | } |
||
2267 | |||
2268 | /** |
||
2269 | * vmw_execbuf_copy_fence_user - copy fence object information to |
||
2270 | * user-space. |
||
2271 | * |
||
2272 | * @dev_priv: Pointer to a vmw_private struct. |
||
2273 | * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. |
||
2274 | * @ret: Return value from fence object creation. |
||
2275 | * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to |
||
2276 | * which the information should be copied. |
||
2277 | * @fence: Pointer to the fenc object. |
||
2278 | * @fence_handle: User-space fence handle. |
||
2279 | * |
||
2280 | * This function copies fence information to user-space. If copying fails, |
||
2281 | * The user-space struct drm_vmw_fence_rep::error member is hopefully |
||
2282 | * left untouched, and if it's preloaded with an -EFAULT by user-space, |
||
2283 | * the error will hopefully be detected. |
||
2284 | * Also if copying fails, user-space will be unable to signal the fence |
||
2285 | * object so we wait for it immediately, and then unreference the |
||
2286 | * user-space reference. |
||
2287 | */ |
||
2288 | void |
||
2289 | vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, |
||
2290 | struct vmw_fpriv *vmw_fp, |
||
2291 | int ret, |
||
2292 | struct drm_vmw_fence_rep __user *user_fence_rep, |
||
2293 | struct vmw_fence_obj *fence, |
||
2294 | uint32_t fence_handle) |
||
2295 | { |
||
2296 | struct drm_vmw_fence_rep fence_rep; |
||
2297 | |||
2298 | if (user_fence_rep == NULL) |
||
2299 | return; |
||
2300 | |||
2301 | memset(&fence_rep, 0, sizeof(fence_rep)); |
||
2302 | |||
2303 | fence_rep.error = ret; |
||
2304 | if (ret == 0) { |
||
2305 | BUG_ON(fence == NULL); |
||
2306 | |||
2307 | fence_rep.handle = fence_handle; |
||
2308 | fence_rep.seqno = fence->seqno; |
||
2309 | vmw_update_seqno(dev_priv, &dev_priv->fifo); |
||
2310 | fence_rep.passed_seqno = dev_priv->last_read_seqno; |
||
2311 | } |
||
2312 | |||
2313 | /* |
||
2314 | * copy_to_user errors will be detected by user space not |
||
2315 | * seeing fence_rep::error filled in. Typically |
||
2316 | * user-space would have pre-set that member to -EFAULT. |
||
2317 | */ |
||
2318 | // ret = copy_to_user(user_fence_rep, &fence_rep, |
||
2319 | // sizeof(fence_rep)); |
||
2320 | |||
2321 | /* |
||
2322 | * User-space lost the fence object. We need to sync |
||
2323 | * and unreference the handle. |
||
2324 | */ |
||
2325 | if (unlikely(ret != 0) && (fence_rep.error == 0)) { |
||
2326 | ttm_ref_object_base_unref(vmw_fp->tfile, |
||
2327 | fence_handle, TTM_REF_USAGE); |
||
2328 | DRM_ERROR("Fence copy error. Syncing.\n"); |
||
2329 | (void) vmw_fence_obj_wait(fence, fence->signal_mask, |
||
2330 | false, false, |
||
2331 | VMW_FENCE_WAIT_TIMEOUT); |
||
2332 | } |
||
2333 | } |
||
2334 | |||
5078 | serge | 2335 | |
2336 | |||
4075 | Serge | 2337 | int vmw_execbuf_process(struct drm_file *file_priv, |
2338 | struct vmw_private *dev_priv, |
||
2339 | void __user *user_commands, |
||
2340 | void *kernel_commands, |
||
2341 | uint32_t command_size, |
||
2342 | uint64_t throttle_us, |
||
2343 | struct drm_vmw_fence_rep __user *user_fence_rep, |
||
2344 | struct vmw_fence_obj **out_fence) |
||
2345 | { |
||
2346 | struct vmw_sw_context *sw_context = &dev_priv->ctx; |
||
2347 | struct vmw_fence_obj *fence = NULL; |
||
2348 | struct vmw_resource *error_resource; |
||
2349 | struct list_head resource_list; |
||
2350 | struct ww_acquire_ctx ticket; |
||
2351 | uint32_t handle; |
||
2352 | void *cmd; |
||
2353 | int ret; |
||
2354 | |||
2355 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); |
||
2356 | if (unlikely(ret != 0)) |
||
2357 | return -ERESTARTSYS; |
||
2358 | |||
2359 | /* |
||
2360 | if (kernel_commands == NULL) { |
||
2361 | sw_context->kernel = false; |
||
2362 | |||
2363 | ret = vmw_resize_cmd_bounce(sw_context, command_size); |
||
2364 | if (unlikely(ret != 0)) |
||
2365 | goto out_unlock; |
||
2366 | |||
2367 | |||
2368 | ret = copy_from_user(sw_context->cmd_bounce, |
||
2369 | user_commands, command_size); |
||
2370 | |||
2371 | if (unlikely(ret != 0)) { |
||
2372 | ret = -EFAULT; |
||
2373 | DRM_ERROR("Failed copying commands.\n"); |
||
2374 | goto out_unlock; |
||
2375 | } |
||
2376 | kernel_commands = sw_context->cmd_bounce; |
||
2377 | } else */ |
||
2378 | sw_context->kernel = true; |
||
2379 | |||
5078 | serge | 2380 | sw_context->fp = vmw_fpriv(file_priv); |
4075 | Serge | 2381 | sw_context->cur_reloc = 0; |
2382 | sw_context->cur_val_buf = 0; |
||
2383 | sw_context->fence_flags = 0; |
||
2384 | INIT_LIST_HEAD(&sw_context->resource_list); |
||
2385 | sw_context->cur_query_bo = dev_priv->pinned_bo; |
||
2386 | sw_context->last_query_ctx = NULL; |
||
2387 | sw_context->needs_post_query_barrier = false; |
||
2388 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); |
||
2389 | INIT_LIST_HEAD(&sw_context->validate_nodes); |
||
2390 | INIT_LIST_HEAD(&sw_context->res_relocations); |
||
2391 | if (!sw_context->res_ht_initialized) { |
||
2392 | ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); |
||
2393 | if (unlikely(ret != 0)) |
||
2394 | goto out_unlock; |
||
2395 | sw_context->res_ht_initialized = true; |
||
2396 | } |
||
5078 | serge | 2397 | INIT_LIST_HEAD(&sw_context->staged_cmd_res); |
4075 | Serge | 2398 | |
2399 | INIT_LIST_HEAD(&resource_list); |
||
2400 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
||
2401 | command_size); |
||
2402 | if (unlikely(ret != 0)) |
||
5078 | serge | 2403 | goto out_err_nores; |
4075 | Serge | 2404 | |
2405 | ret = vmw_resources_reserve(sw_context); |
||
2406 | if (unlikely(ret != 0)) |
||
5078 | serge | 2407 | goto out_err_nores; |
4075 | Serge | 2408 | |
2409 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); |
||
2410 | if (unlikely(ret != 0)) |
||
2411 | goto out_err; |
||
2412 | |||
2413 | ret = vmw_validate_buffers(dev_priv, sw_context); |
||
2414 | if (unlikely(ret != 0)) |
||
2415 | goto out_err; |
||
2416 | |||
2417 | ret = vmw_resources_validate(sw_context); |
||
2418 | if (unlikely(ret != 0)) |
||
2419 | goto out_err; |
||
2420 | |||
2421 | if (throttle_us) { |
||
2422 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, |
||
2423 | throttle_us); |
||
2424 | |||
2425 | if (unlikely(ret != 0)) |
||
2426 | goto out_err; |
||
2427 | } |
||
2428 | |||
4569 | Serge | 2429 | ret = mutex_lock_interruptible(&dev_priv->binding_mutex); |
2430 | if (unlikely(ret != 0)) { |
||
2431 | ret = -ERESTARTSYS; |
||
2432 | goto out_err; |
||
2433 | } |
||
2434 | |||
5078 | serge | 2435 | if (dev_priv->has_mob) { |
2436 | ret = vmw_rebind_contexts(sw_context); |
||
2437 | if (unlikely(ret != 0)) |
||
2438 | goto out_unlock_binding; |
||
2439 | } |
||
2440 | |||
4075 | Serge | 2441 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
2442 | if (unlikely(cmd == NULL)) { |
||
2443 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
||
2444 | ret = -ENOMEM; |
||
4569 | Serge | 2445 | goto out_unlock_binding; |
4075 | Serge | 2446 | } |
2447 | |||
2448 | vmw_apply_relocations(sw_context); |
||
2449 | memcpy(cmd, kernel_commands, command_size); |
||
2450 | |||
2451 | vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); |
||
2452 | vmw_resource_relocations_free(&sw_context->res_relocations); |
||
2453 | |||
2454 | vmw_fifo_commit(dev_priv, command_size); |
||
2455 | |||
2456 | vmw_query_bo_switch_commit(dev_priv, sw_context); |
||
2457 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, |
||
2458 | &fence, |
||
2459 | (user_fence_rep) ? &handle : NULL); |
||
2460 | /* |
||
2461 | * This error is harmless, because if fence submission fails, |
||
2462 | * vmw_fifo_send_fence will sync. The error will be propagated to |
||
2463 | * user-space in @fence_rep |
||
2464 | */ |
||
2465 | |||
2466 | if (ret != 0) |
||
2467 | DRM_ERROR("Fence submission error. Syncing.\n"); |
||
2468 | |||
2469 | vmw_resource_list_unreserve(&sw_context->resource_list, false); |
||
4569 | Serge | 2470 | mutex_unlock(&dev_priv->binding_mutex); |
2471 | |||
4075 | Serge | 2472 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, |
2473 | (void *) fence); |
||
2474 | |||
2475 | if (unlikely(dev_priv->pinned_bo != NULL && |
||
2476 | !dev_priv->query_cid_valid)) |
||
2477 | __vmw_execbuf_release_pinned_bo(dev_priv, fence); |
||
2478 | |||
2479 | vmw_clear_validations(sw_context); |
||
2480 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, |
||
2481 | user_fence_rep, fence, handle); |
||
2482 | |||
2483 | /* Don't unreference when handing fence out */ |
||
2484 | if (unlikely(out_fence != NULL)) { |
||
2485 | *out_fence = fence; |
||
2486 | fence = NULL; |
||
2487 | } else if (likely(fence != NULL)) { |
||
2488 | vmw_fence_obj_unreference(&fence); |
||
2489 | } |
||
2490 | |||
2491 | list_splice_init(&sw_context->resource_list, &resource_list); |
||
5078 | serge | 2492 | vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res); |
4075 | Serge | 2493 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
2494 | |||
2495 | /* |
||
2496 | * Unreference resources outside of the cmdbuf_mutex to |
||
2497 | * avoid deadlocks in resource destruction paths. |
||
2498 | */ |
||
2499 | vmw_resource_list_unreference(&resource_list); |
||
2500 | |||
2501 | return 0; |
||
2502 | |||
4569 | Serge | 2503 | out_unlock_binding: |
2504 | mutex_unlock(&dev_priv->binding_mutex); |
||
4075 | Serge | 2505 | out_err: |
5078 | serge | 2506 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); |
2507 | out_err_nores: |
||
2508 | vmw_resource_list_unreserve(&sw_context->resource_list, true); |
||
4075 | Serge | 2509 | vmw_resource_relocations_free(&sw_context->res_relocations); |
2510 | vmw_free_relocations(sw_context); |
||
2511 | vmw_clear_validations(sw_context); |
||
2512 | if (unlikely(dev_priv->pinned_bo != NULL && |
||
2513 | !dev_priv->query_cid_valid)) |
||
2514 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
||
2515 | out_unlock: |
||
2516 | list_splice_init(&sw_context->resource_list, &resource_list); |
||
2517 | error_resource = sw_context->error_resource; |
||
2518 | sw_context->error_resource = NULL; |
||
5078 | serge | 2519 | vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); |
4075 | Serge | 2520 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
2521 | |||
2522 | /* |
||
2523 | * Unreference resources outside of the cmdbuf_mutex to |
||
2524 | * avoid deadlocks in resource destruction paths. |
||
2525 | */ |
||
2526 | vmw_resource_list_unreference(&resource_list); |
||
2527 | if (unlikely(error_resource != NULL)) |
||
2528 | vmw_resource_unreference(&error_resource); |
||
2529 | |||
2530 | return ret; |
||
2531 | } |
||
2532 | |||
2533 | /** |
||
2534 | * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. |
||
2535 | * |
||
2536 | * @dev_priv: The device private structure. |
||
2537 | * |
||
2538 | * This function is called to idle the fifo and unpin the query buffer |
||
2539 | * if the normal way to do this hits an error, which should typically be |
||
2540 | * extremely rare. |
||
2541 | */ |
||
2542 | static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) |
||
2543 | { |
||
2544 | DRM_ERROR("Can't unpin query buffer. Trying to recover.\n"); |
||
2545 | |||
2546 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); |
||
2547 | vmw_bo_pin(dev_priv->pinned_bo, false); |
||
2548 | vmw_bo_pin(dev_priv->dummy_query_bo, false); |
||
2549 | dev_priv->dummy_query_bo_pinned = false; |
||
2550 | } |
||
2551 | |||
2552 | |||
2553 | /** |
||
2554 | * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
||
2555 | * query bo. |
||
2556 | * |
||
2557 | * @dev_priv: The device private structure. |
||
2558 | * @fence: If non-NULL should point to a struct vmw_fence_obj issued |
||
2559 | * _after_ a query barrier that flushes all queries touching the current |
||
2560 | * buffer pointed to by @dev_priv->pinned_bo |
||
2561 | * |
||
2562 | * This function should be used to unpin the pinned query bo, or |
||
2563 | * as a query barrier when we need to make sure that all queries have |
||
2564 | * finished before the next fifo command. (For example on hardware |
||
2565 | * context destructions where the hardware may otherwise leak unfinished |
||
2566 | * queries). |
||
2567 | * |
||
2568 | * This function does not return any failure codes, but make attempts |
||
2569 | * to do safe unpinning in case of errors. |
||
2570 | * |
||
2571 | * The function will synchronize on the previous query barrier, and will |
||
2572 | * thus not finish until that barrier has executed. |
||
2573 | * |
||
2574 | * the @dev_priv->cmdbuf_mutex needs to be held by the current thread |
||
2575 | * before calling this function. |
||
2576 | */ |
||
2577 | void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, |
||
2578 | struct vmw_fence_obj *fence) |
||
2579 | { |
||
2580 | int ret = 0; |
||
2581 | struct list_head validate_list; |
||
2582 | struct ttm_validate_buffer pinned_val, query_val; |
||
2583 | struct vmw_fence_obj *lfence = NULL; |
||
2584 | struct ww_acquire_ctx ticket; |
||
2585 | |||
2586 | if (dev_priv->pinned_bo == NULL) |
||
2587 | goto out_unlock; |
||
2588 | |||
2589 | INIT_LIST_HEAD(&validate_list); |
||
2590 | |||
2591 | pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); |
||
2592 | list_add_tail(&pinned_val.head, &validate_list); |
||
2593 | |||
2594 | query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); |
||
2595 | list_add_tail(&query_val.head, &validate_list); |
||
2596 | |||
2597 | do { |
||
2598 | ret = ttm_eu_reserve_buffers(&ticket, &validate_list); |
||
2599 | } while (ret == -ERESTARTSYS); |
||
2600 | |||
2601 | if (unlikely(ret != 0)) { |
||
2602 | vmw_execbuf_unpin_panic(dev_priv); |
||
2603 | goto out_no_reserve; |
||
2604 | } |
||
2605 | |||
2606 | if (dev_priv->query_cid_valid) { |
||
2607 | BUG_ON(fence != NULL); |
||
2608 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); |
||
2609 | if (unlikely(ret != 0)) { |
||
2610 | vmw_execbuf_unpin_panic(dev_priv); |
||
2611 | goto out_no_emit; |
||
2612 | } |
||
2613 | dev_priv->query_cid_valid = false; |
||
2614 | } |
||
2615 | |||
2616 | vmw_bo_pin(dev_priv->pinned_bo, false); |
||
2617 | vmw_bo_pin(dev_priv->dummy_query_bo, false); |
||
2618 | dev_priv->dummy_query_bo_pinned = false; |
||
2619 | |||
2620 | if (fence == NULL) { |
||
2621 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, |
||
2622 | NULL); |
||
2623 | fence = lfence; |
||
2624 | } |
||
2625 | ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence); |
||
2626 | if (lfence != NULL) |
||
2627 | vmw_fence_obj_unreference(&lfence); |
||
2628 | |||
2629 | ttm_bo_unref(&query_val.bo); |
||
2630 | ttm_bo_unref(&pinned_val.bo); |
||
2631 | ttm_bo_unref(&dev_priv->pinned_bo); |
||
2632 | |||
2633 | out_unlock: |
||
2634 | return; |
||
2635 | |||
2636 | out_no_emit: |
||
2637 | ttm_eu_backoff_reservation(&ticket, &validate_list); |
||
2638 | out_no_reserve: |
||
2639 | ttm_bo_unref(&query_val.bo); |
||
2640 | ttm_bo_unref(&pinned_val.bo); |
||
2641 | ttm_bo_unref(&dev_priv->pinned_bo); |
||
2642 | } |
||
2643 | |||
2644 | /** |
||
2645 | * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
||
2646 | * query bo. |
||
2647 | * |
||
2648 | * @dev_priv: The device private structure. |
||
2649 | * |
||
2650 | * This function should be used to unpin the pinned query bo, or |
||
2651 | * as a query barrier when we need to make sure that all queries have |
||
2652 | * finished before the next fifo command. (For example on hardware |
||
2653 | * context destructions where the hardware may otherwise leak unfinished |
||
2654 | * queries). |
||
2655 | * |
||
2656 | * This function does not return any failure codes, but make attempts |
||
2657 | * to do safe unpinning in case of errors. |
||
2658 | * |
||
2659 | * The function will synchronize on the previous query barrier, and will |
||
2660 | * thus not finish until that barrier has executed. |
||
2661 | */ |
||
2662 | void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) |
||
2663 | { |
||
2664 | mutex_lock(&dev_priv->cmdbuf_mutex); |
||
2665 | if (dev_priv->query_cid_valid) |
||
2666 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
||
2667 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
||
2668 | } |
||
2669 | |||
2670 | |||
2671 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, |
||
2672 | struct drm_file *file_priv) |
||
2673 | { |
||
2674 | struct vmw_private *dev_priv = vmw_priv(dev); |
||
2675 | struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; |
||
2676 | int ret; |
||
2677 | |||
2678 | /* |
||
2679 | * This will allow us to extend the ioctl argument while |
||
2680 | * maintaining backwards compatibility: |
||
2681 | * We take different code paths depending on the value of |
||
2682 | * arg->version. |
||
2683 | */ |
||
2684 | |||
2685 | if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) { |
||
2686 | DRM_ERROR("Incorrect execbuf version.\n"); |
||
2687 | DRM_ERROR("You're running outdated experimental " |
||
2688 | "vmwgfx user-space drivers."); |
||
2689 | return -EINVAL; |
||
2690 | } |
||
2691 | |||
5078 | serge | 2692 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
4075 | Serge | 2693 | if (unlikely(ret != 0)) |
2694 | return ret; |
||
2695 | |||
2696 | ret = vmw_execbuf_process(file_priv, dev_priv, |
||
2697 | (void __user *)(unsigned long)arg->commands, |
||
2698 | NULL, arg->command_size, arg->throttle_us, |
||
2699 | (void __user *)(unsigned long)arg->fence_rep, |
||
2700 | NULL); |
||
2701 | |||
2702 | if (unlikely(ret != 0)) |
||
2703 | goto out_unlock; |
||
2704 | |||
2705 | // vmw_kms_cursor_post_execbuf(dev_priv); |
||
2706 | |||
2707 | out_unlock: |
||
5078 | serge | 2708 | ttm_read_unlock(&dev_priv->reservation_sem); |
4075 | Serge | 2709 | return ret; |
2710 | }>>>>>> |