Rev 5078 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 5078 | Rev 6296 | ||
---|---|---|---|
1 | /************************************************************************** |
1 | /************************************************************************** |
2 | * |
2 | * |
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
3 | * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. |
4 | * All Rights Reserved. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
12 | * the following conditions: |
13 | * |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
16 | * of the Software. |
17 | * |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
25 | * |
26 | **************************************************************************/ |
26 | **************************************************************************/ |
27 | 27 | ||
28 | #include "vmwgfx_drv.h" |
28 | #include "vmwgfx_drv.h" |
29 | #include "vmwgfx_reg.h" |
29 | #include "vmwgfx_reg.h" |
30 | #include |
30 | #include |
31 | #include |
31 | #include |
- | 32 | #include "vmwgfx_so.h" |
|
- | 33 | #include "vmwgfx_binding.h" |
|
32 | 34 | ||
33 | #define VMW_RES_HT_ORDER 12 |
35 | #define VMW_RES_HT_ORDER 12 |
34 | 36 | ||
35 | /** |
37 | /** |
36 | * struct vmw_resource_relocation - Relocation info for resources |
38 | * struct vmw_resource_relocation - Relocation info for resources |
37 | * |
39 | * |
38 | * @head: List head for the software context's relocation list. |
40 | * @head: List head for the software context's relocation list. |
39 | * @res: Non-ref-counted pointer to the resource. |
41 | * @res: Non-ref-counted pointer to the resource. |
40 | * @offset: Offset of 4 byte entries into the command buffer where the |
42 | * @offset: Offset of 4 byte entries into the command buffer where the |
41 | * id that needs fixup is located. |
43 | * id that needs fixup is located. |
42 | */ |
44 | */ |
43 | struct vmw_resource_relocation { |
45 | struct vmw_resource_relocation { |
44 | struct list_head head; |
46 | struct list_head head; |
45 | const struct vmw_resource *res; |
47 | const struct vmw_resource *res; |
46 | unsigned long offset; |
48 | unsigned long offset; |
47 | }; |
49 | }; |
48 | 50 | ||
49 | /** |
51 | /** |
50 | * struct vmw_resource_val_node - Validation info for resources |
52 | * struct vmw_resource_val_node - Validation info for resources |
51 | * |
53 | * |
52 | * @head: List head for the software context's resource list. |
54 | * @head: List head for the software context's resource list. |
53 | * @hash: Hash entry for quick resouce to val_node lookup. |
55 | * @hash: Hash entry for quick resouce to val_node lookup. |
54 | * @res: Ref-counted pointer to the resource. |
56 | * @res: Ref-counted pointer to the resource. |
55 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. |
57 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. |
56 | * @new_backup: Refcounted pointer to the new backup buffer. |
58 | * @new_backup: Refcounted pointer to the new backup buffer. |
57 | * @staged_bindings: If @res is a context, tracks bindings set up during |
59 | * @staged_bindings: If @res is a context, tracks bindings set up during |
58 | * the command batch. Otherwise NULL. |
60 | * the command batch. Otherwise NULL. |
59 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. |
61 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. |
60 | * @first_usage: Set to true the first time the resource is referenced in |
62 | * @first_usage: Set to true the first time the resource is referenced in |
61 | * the command stream. |
63 | * the command stream. |
- | 64 | * @switching_backup: The command stream provides a new backup buffer for a |
|
- | 65 | * resource. |
|
- | 66 | * @no_buffer_needed: This means @switching_backup is true on first buffer |
|
62 | * @no_buffer_needed: Resources do not need to allocate buffer backup on |
67 | * reference. So resource reservation does not need to allocate a backup |
63 | * reservation. The command stream will provide one. |
68 | * buffer for the resource. |
64 | */ |
69 | */ |
65 | struct vmw_resource_val_node { |
70 | struct vmw_resource_val_node { |
66 | struct list_head head; |
71 | struct list_head head; |
67 | struct drm_hash_item hash; |
72 | struct drm_hash_item hash; |
68 | struct vmw_resource *res; |
73 | struct vmw_resource *res; |
69 | struct vmw_dma_buffer *new_backup; |
74 | struct vmw_dma_buffer *new_backup; |
70 | struct vmw_ctx_binding_state *staged_bindings; |
75 | struct vmw_ctx_binding_state *staged_bindings; |
71 | unsigned long new_backup_offset; |
76 | unsigned long new_backup_offset; |
72 | bool first_usage; |
77 | u32 first_usage : 1; |
- | 78 | u32 switching_backup : 1; |
|
73 | bool no_buffer_needed; |
79 | u32 no_buffer_needed : 1; |
74 | }; |
80 | }; |
75 | 81 | ||
76 | /** |
82 | /** |
77 | * struct vmw_cmd_entry - Describe a command for the verifier |
83 | * struct vmw_cmd_entry - Describe a command for the verifier |
78 | * |
84 | * |
79 | * @user_allow: Whether allowed from the execbuf ioctl. |
85 | * @user_allow: Whether allowed from the execbuf ioctl. |
80 | * @gb_disable: Whether disabled if guest-backed objects are available. |
86 | * @gb_disable: Whether disabled if guest-backed objects are available. |
81 | * @gb_enable: Whether enabled iff guest-backed objects are available. |
87 | * @gb_enable: Whether enabled iff guest-backed objects are available. |
82 | */ |
88 | */ |
83 | struct vmw_cmd_entry { |
89 | struct vmw_cmd_entry { |
84 | int (*func) (struct vmw_private *, struct vmw_sw_context *, |
90 | int (*func) (struct vmw_private *, struct vmw_sw_context *, |
85 | SVGA3dCmdHeader *); |
91 | SVGA3dCmdHeader *); |
86 | bool user_allow; |
92 | bool user_allow; |
87 | bool gb_disable; |
93 | bool gb_disable; |
88 | bool gb_enable; |
94 | bool gb_enable; |
89 | }; |
95 | }; |
90 | 96 | ||
91 | #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ |
97 | #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ |
92 | [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ |
98 | [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ |
93 | (_gb_disable), (_gb_enable)} |
99 | (_gb_disable), (_gb_enable)} |
- | 100 | ||
- | 101 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, |
|
- | 102 | struct vmw_sw_context *sw_context, |
|
- | 103 | struct vmw_resource *ctx); |
|
- | 104 | static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, |
|
- | 105 | struct vmw_sw_context *sw_context, |
|
- | 106 | SVGAMobId *id, |
|
- | 107 | struct vmw_dma_buffer **vmw_bo_p); |
|
- | 108 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, |
|
- | 109 | struct vmw_dma_buffer *vbo, |
|
- | 110 | bool validate_as_mob, |
|
- | 111 | uint32_t *p_val_node); |
|
- | 112 | ||
94 | 113 | ||
95 | /** |
114 | /** |
96 | * vmw_resource_unreserve - unreserve resources previously reserved for |
115 | * vmw_resources_unreserve - unreserve resources previously reserved for |
97 | * command submission. |
116 | * command submission. |
98 | * |
117 | * |
99 | * @list_head: list of resources to unreserve. |
118 | * @sw_context: pointer to the software context |
100 | * @backoff: Whether command submission failed. |
119 | * @backoff: Whether command submission failed. |
101 | */ |
120 | */ |
102 | static void vmw_resource_list_unreserve(struct list_head *list, |
121 | static void vmw_resources_unreserve(struct vmw_sw_context *sw_context, |
103 | bool backoff) |
122 | bool backoff) |
104 | { |
123 | { |
105 | struct vmw_resource_val_node *val; |
124 | struct vmw_resource_val_node *val; |
- | 125 | struct list_head *list = &sw_context->resource_list; |
|
- | 126 | ||
- | 127 | if (sw_context->dx_query_mob && !backoff) |
|
- | 128 | vmw_context_bind_dx_query(sw_context->dx_query_ctx, |
|
- | 129 | sw_context->dx_query_mob); |
|
106 | 130 | ||
107 | list_for_each_entry(val, list, head) { |
131 | list_for_each_entry(val, list, head) { |
108 | struct vmw_resource *res = val->res; |
132 | struct vmw_resource *res = val->res; |
109 | struct vmw_dma_buffer *new_backup = |
133 | bool switch_backup = |
110 | backoff ? NULL : val->new_backup; |
134 | (backoff) ? false : val->switching_backup; |
111 | 135 | ||
112 | /* |
136 | /* |
113 | * Transfer staged context bindings to the |
137 | * Transfer staged context bindings to the |
114 | * persistent context binding tracker. |
138 | * persistent context binding tracker. |
115 | */ |
139 | */ |
116 | if (unlikely(val->staged_bindings)) { |
140 | if (unlikely(val->staged_bindings)) { |
117 | if (!backoff) { |
141 | if (!backoff) { |
- | 142 | vmw_binding_state_commit |
|
118 | vmw_context_binding_state_transfer |
143 | (vmw_context_binding_state(val->res), |
119 | (val->res, val->staged_bindings); |
144 | val->staged_bindings); |
120 | } |
145 | } |
- | 146 | ||
- | 147 | if (val->staged_bindings != sw_context->staged_bindings) |
|
121 | kfree(val->staged_bindings); |
148 | vmw_binding_state_free(val->staged_bindings); |
- | 149 | else |
|
- | 150 | sw_context->staged_bindings_inuse = false; |
|
122 | val->staged_bindings = NULL; |
151 | val->staged_bindings = NULL; |
123 | } |
152 | } |
124 | vmw_resource_unreserve(res, new_backup, |
153 | vmw_resource_unreserve(res, switch_backup, val->new_backup, |
125 | val->new_backup_offset); |
154 | val->new_backup_offset); |
126 | vmw_dmabuf_unreference(&val->new_backup); |
155 | vmw_dmabuf_unreference(&val->new_backup); |
127 | } |
156 | } |
128 | } |
157 | } |
- | 158 | ||
- | 159 | /** |
|
- | 160 | * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is |
|
- | 161 | * added to the validate list. |
|
- | 162 | * |
|
- | 163 | * @dev_priv: Pointer to the device private: |
|
- | 164 | * @sw_context: The validation context: |
|
- | 165 | * @node: The validation node holding this context. |
|
- | 166 | */ |
|
- | 167 | static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, |
|
- | 168 | struct vmw_sw_context *sw_context, |
|
- | 169 | struct vmw_resource_val_node *node) |
|
- | 170 | { |
|
- | 171 | int ret; |
|
- | 172 | ||
- | 173 | ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res); |
|
- | 174 | if (unlikely(ret != 0)) |
|
- | 175 | goto out_err; |
|
- | 176 | ||
- | 177 | if (!sw_context->staged_bindings) { |
|
- | 178 | sw_context->staged_bindings = |
|
- | 179 | vmw_binding_state_alloc(dev_priv); |
|
- | 180 | if (IS_ERR(sw_context->staged_bindings)) { |
|
- | 181 | DRM_ERROR("Failed to allocate context binding " |
|
- | 182 | "information.\n"); |
|
- | 183 | ret = PTR_ERR(sw_context->staged_bindings); |
|
- | 184 | sw_context->staged_bindings = NULL; |
|
- | 185 | goto out_err; |
|
- | 186 | } |
|
- | 187 | } |
|
- | 188 | ||
- | 189 | if (sw_context->staged_bindings_inuse) { |
|
- | 190 | node->staged_bindings = vmw_binding_state_alloc(dev_priv); |
|
- | 191 | if (IS_ERR(node->staged_bindings)) { |
|
- | 192 | DRM_ERROR("Failed to allocate context binding " |
|
- | 193 | "information.\n"); |
|
- | 194 | ret = PTR_ERR(node->staged_bindings); |
|
- | 195 | node->staged_bindings = NULL; |
|
- | 196 | goto out_err; |
|
- | 197 | } |
|
- | 198 | } else { |
|
- | 199 | node->staged_bindings = sw_context->staged_bindings; |
|
- | 200 | sw_context->staged_bindings_inuse = true; |
|
- | 201 | } |
|
- | 202 | ||
- | 203 | return 0; |
|
- | 204 | out_err: |
|
- | 205 | return ret; |
|
129 | 206 | } |
|
130 | 207 | ||
131 | /** |
208 | /** |
132 | * vmw_resource_val_add - Add a resource to the software context's |
209 | * vmw_resource_val_add - Add a resource to the software context's |
133 | * resource list if it's not already on it. |
210 | * resource list if it's not already on it. |
134 | * |
211 | * |
135 | * @sw_context: Pointer to the software context. |
212 | * @sw_context: Pointer to the software context. |
136 | * @res: Pointer to the resource. |
213 | * @res: Pointer to the resource. |
137 | * @p_node On successful return points to a valid pointer to a |
214 | * @p_node On successful return points to a valid pointer to a |
138 | * struct vmw_resource_val_node, if non-NULL on entry. |
215 | * struct vmw_resource_val_node, if non-NULL on entry. |
139 | */ |
216 | */ |
140 | static int vmw_resource_val_add(struct vmw_sw_context *sw_context, |
217 | static int vmw_resource_val_add(struct vmw_sw_context *sw_context, |
141 | struct vmw_resource *res, |
218 | struct vmw_resource *res, |
142 | struct vmw_resource_val_node **p_node) |
219 | struct vmw_resource_val_node **p_node) |
143 | { |
220 | { |
- | 221 | struct vmw_private *dev_priv = res->dev_priv; |
|
144 | struct vmw_resource_val_node *node; |
222 | struct vmw_resource_val_node *node; |
145 | struct drm_hash_item *hash; |
223 | struct drm_hash_item *hash; |
146 | int ret; |
224 | int ret; |
147 | 225 | ||
148 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res, |
226 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res, |
149 | &hash) == 0)) { |
227 | &hash) == 0)) { |
150 | node = container_of(hash, struct vmw_resource_val_node, hash); |
228 | node = container_of(hash, struct vmw_resource_val_node, hash); |
151 | node->first_usage = false; |
229 | node->first_usage = false; |
152 | if (unlikely(p_node != NULL)) |
230 | if (unlikely(p_node != NULL)) |
153 | *p_node = node; |
231 | *p_node = node; |
154 | return 0; |
232 | return 0; |
155 | } |
233 | } |
156 | 234 | ||
157 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
235 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
158 | if (unlikely(node == NULL)) { |
236 | if (unlikely(node == NULL)) { |
159 | DRM_ERROR("Failed to allocate a resource validation " |
237 | DRM_ERROR("Failed to allocate a resource validation " |
160 | "entry.\n"); |
238 | "entry.\n"); |
161 | return -ENOMEM; |
239 | return -ENOMEM; |
162 | } |
240 | } |
163 | 241 | ||
164 | node->hash.key = (unsigned long) res; |
242 | node->hash.key = (unsigned long) res; |
165 | ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash); |
243 | ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash); |
166 | if (unlikely(ret != 0)) { |
244 | if (unlikely(ret != 0)) { |
167 | DRM_ERROR("Failed to initialize a resource validation " |
245 | DRM_ERROR("Failed to initialize a resource validation " |
168 | "entry.\n"); |
246 | "entry.\n"); |
169 | kfree(node); |
247 | kfree(node); |
170 | return ret; |
248 | return ret; |
171 | } |
249 | } |
172 | list_add_tail(&node->head, &sw_context->resource_list); |
- | |
173 | node->res = vmw_resource_reference(res); |
250 | node->res = vmw_resource_reference(res); |
174 | node->first_usage = true; |
251 | node->first_usage = true; |
175 | - | ||
176 | if (unlikely(p_node != NULL)) |
252 | if (unlikely(p_node != NULL)) |
177 | *p_node = node; |
253 | *p_node = node; |
- | 254 | ||
- | 255 | if (!dev_priv->has_mob) { |
|
178 | 256 | list_add_tail(&node->head, &sw_context->resource_list); |
|
179 | return 0; |
257 | return 0; |
180 | } |
258 | } |
- | 259 | ||
- | 260 | switch (vmw_res_type(res)) { |
|
- | 261 | case vmw_res_context: |
|
- | 262 | case vmw_res_dx_context: |
|
- | 263 | list_add(&node->head, &sw_context->ctx_resource_list); |
|
- | 264 | ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node); |
|
- | 265 | break; |
|
- | 266 | case vmw_res_cotable: |
|
- | 267 | list_add_tail(&node->head, &sw_context->ctx_resource_list); |
|
- | 268 | break; |
|
- | 269 | default: |
|
- | 270 | list_add_tail(&node->head, &sw_context->resource_list); |
|
- | 271 | break; |
|
- | 272 | } |
|
- | 273 | ||
- | 274 | return ret; |
|
- | 275 | } |
|
- | 276 | ||
- | 277 | /** |
|
- | 278 | * vmw_view_res_val_add - Add a view and the surface it's pointing to |
|
- | 279 | * to the validation list |
|
- | 280 | * |
|
- | 281 | * @sw_context: The software context holding the validation list. |
|
- | 282 | * @view: Pointer to the view resource. |
|
- | 283 | * |
|
- | 284 | * Returns 0 if success, negative error code otherwise. |
|
- | 285 | */ |
|
- | 286 | static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, |
|
- | 287 | struct vmw_resource *view) |
|
- | 288 | { |
|
- | 289 | int ret; |
|
- | 290 | ||
- | 291 | /* |
|
- | 292 | * First add the resource the view is pointing to, otherwise |
|
- | 293 | * it may be swapped out when the view is validated. |
|
- | 294 | */ |
|
- | 295 | ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL); |
|
- | 296 | if (ret) |
|
- | 297 | return ret; |
|
- | 298 | ||
- | 299 | return vmw_resource_val_add(sw_context, view, NULL); |
|
- | 300 | } |
|
- | 301 | ||
- | 302 | /** |
|
- | 303 | * vmw_view_id_val_add - Look up a view and add it and the surface it's |
|
- | 304 | * pointing to to the validation list. |
|
- | 305 | * |
|
- | 306 | * @sw_context: The software context holding the validation list. |
|
- | 307 | * @view_type: The view type to look up. |
|
- | 308 | * @id: view id of the view. |
|
- | 309 | * |
|
- | 310 | * The view is represented by a view id and the DX context it's created on, |
|
- | 311 | * or scheduled for creation on. If there is no DX context set, the function |
|
- | 312 | * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure. |
|
- | 313 | */ |
|
- | 314 | static int vmw_view_id_val_add(struct vmw_sw_context *sw_context, |
|
- | 315 | enum vmw_view_type view_type, u32 id) |
|
- | 316 | { |
|
- | 317 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
|
- | 318 | struct vmw_resource *view; |
|
- | 319 | int ret; |
|
- | 320 | ||
- | 321 | if (!ctx_node) { |
|
- | 322 | DRM_ERROR("DX Context not set.\n"); |
|
- | 323 | return -EINVAL; |
|
- | 324 | } |
|
- | 325 | ||
- | 326 | view = vmw_view_lookup(sw_context->man, view_type, id); |
|
- | 327 | if (IS_ERR(view)) |
|
- | 328 | return PTR_ERR(view); |
|
- | 329 | ||
- | 330 | ret = vmw_view_res_val_add(sw_context, view); |
|
- | 331 | vmw_resource_unreference(&view); |
|
- | 332 | ||
- | 333 | return ret; |
|
- | 334 | } |
|
181 | 335 | ||
182 | /** |
336 | /** |
183 | * vmw_resource_context_res_add - Put resources previously bound to a context on |
337 | * vmw_resource_context_res_add - Put resources previously bound to a context on |
184 | * the validation list |
338 | * the validation list |
185 | * |
339 | * |
186 | * @dev_priv: Pointer to a device private structure |
340 | * @dev_priv: Pointer to a device private structure |
187 | * @sw_context: Pointer to a software context used for this command submission |
341 | * @sw_context: Pointer to a software context used for this command submission |
188 | * @ctx: Pointer to the context resource |
342 | * @ctx: Pointer to the context resource |
189 | * |
343 | * |
190 | * This function puts all resources that were previously bound to @ctx on |
344 | * This function puts all resources that were previously bound to @ctx on |
191 | * the resource validation list. This is part of the context state reemission |
345 | * the resource validation list. This is part of the context state reemission |
192 | */ |
346 | */ |
193 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, |
347 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, |
194 | struct vmw_sw_context *sw_context, |
348 | struct vmw_sw_context *sw_context, |
195 | struct vmw_resource *ctx) |
349 | struct vmw_resource *ctx) |
196 | { |
350 | { |
197 | struct list_head *binding_list; |
351 | struct list_head *binding_list; |
198 | struct vmw_ctx_binding *entry; |
352 | struct vmw_ctx_bindinfo *entry; |
199 | int ret = 0; |
353 | int ret = 0; |
200 | struct vmw_resource *res; |
354 | struct vmw_resource *res; |
- | 355 | u32 i; |
|
- | 356 | ||
- | 357 | /* Add all cotables to the validation list. */ |
|
- | 358 | if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { |
|
- | 359 | for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { |
|
- | 360 | res = vmw_context_cotable(ctx, i); |
|
- | 361 | if (IS_ERR(res)) |
|
- | 362 | continue; |
|
- | 363 | ||
- | 364 | ret = vmw_resource_val_add(sw_context, res, NULL); |
|
- | 365 | vmw_resource_unreference(&res); |
|
- | 366 | if (unlikely(ret != 0)) |
|
- | 367 | return ret; |
|
- | 368 | } |
|
- | 369 | } |
|
- | 370 | ||
- | 371 | ||
201 | 372 | /* Add all resources bound to the context to the validation list */ |
|
202 | mutex_lock(&dev_priv->binding_mutex); |
373 | mutex_lock(&dev_priv->binding_mutex); |
203 | binding_list = vmw_context_binding_list(ctx); |
374 | binding_list = vmw_context_binding_list(ctx); |
204 | 375 | ||
205 | list_for_each_entry(entry, binding_list, ctx_list) { |
376 | list_for_each_entry(entry, binding_list, ctx_list) { |
- | 377 | /* entry->res is not refcounted */ |
|
206 | res = vmw_resource_reference_unless_doomed(entry->bi.res); |
378 | res = vmw_resource_reference_unless_doomed(entry->res); |
207 | if (unlikely(res == NULL)) |
379 | if (unlikely(res == NULL)) |
208 | continue; |
380 | continue; |
- | 381 | ||
- | 382 | if (vmw_res_type(entry->res) == vmw_res_view) |
|
- | 383 | ret = vmw_view_res_val_add(sw_context, entry->res); |
|
209 | 384 | else |
|
- | 385 | ret = vmw_resource_val_add(sw_context, entry->res, |
|
210 | ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL); |
386 | NULL); |
211 | vmw_resource_unreference(&res); |
387 | vmw_resource_unreference(&res); |
212 | if (unlikely(ret != 0)) |
388 | if (unlikely(ret != 0)) |
213 | break; |
389 | break; |
214 | } |
390 | } |
- | 391 | ||
- | 392 | if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { |
|
- | 393 | struct vmw_dma_buffer *dx_query_mob; |
|
- | 394 | ||
- | 395 | dx_query_mob = vmw_context_get_dx_query_mob(ctx); |
|
- | 396 | if (dx_query_mob) |
|
- | 397 | ret = vmw_bo_to_validate_list(sw_context, |
|
- | 398 | dx_query_mob, |
|
- | 399 | true, NULL); |
|
- | 400 | } |
|
215 | 401 | ||
216 | mutex_unlock(&dev_priv->binding_mutex); |
402 | mutex_unlock(&dev_priv->binding_mutex); |
217 | return ret; |
403 | return ret; |
218 | } |
404 | } |
219 | 405 | ||
220 | /** |
406 | /** |
221 | * vmw_resource_relocation_add - Add a relocation to the relocation list |
407 | * vmw_resource_relocation_add - Add a relocation to the relocation list |
222 | * |
408 | * |
223 | * @list: Pointer to head of relocation list. |
409 | * @list: Pointer to head of relocation list. |
224 | * @res: The resource. |
410 | * @res: The resource. |
225 | * @offset: Offset into the command buffer currently being parsed where the |
411 | * @offset: Offset into the command buffer currently being parsed where the |
226 | * id that needs fixup is located. Granularity is 4 bytes. |
412 | * id that needs fixup is located. Granularity is 4 bytes. |
227 | */ |
413 | */ |
228 | static int vmw_resource_relocation_add(struct list_head *list, |
414 | static int vmw_resource_relocation_add(struct list_head *list, |
229 | const struct vmw_resource *res, |
415 | const struct vmw_resource *res, |
230 | unsigned long offset) |
416 | unsigned long offset) |
231 | { |
417 | { |
232 | struct vmw_resource_relocation *rel; |
418 | struct vmw_resource_relocation *rel; |
233 | 419 | ||
234 | rel = kmalloc(sizeof(*rel), GFP_KERNEL); |
420 | rel = kmalloc(sizeof(*rel), GFP_KERNEL); |
235 | if (unlikely(rel == NULL)) { |
421 | if (unlikely(rel == NULL)) { |
236 | DRM_ERROR("Failed to allocate a resource relocation.\n"); |
422 | DRM_ERROR("Failed to allocate a resource relocation.\n"); |
237 | return -ENOMEM; |
423 | return -ENOMEM; |
238 | } |
424 | } |
239 | 425 | ||
240 | rel->res = res; |
426 | rel->res = res; |
241 | rel->offset = offset; |
427 | rel->offset = offset; |
242 | list_add_tail(&rel->head, list); |
428 | list_add_tail(&rel->head, list); |
243 | 429 | ||
244 | return 0; |
430 | return 0; |
245 | } |
431 | } |
246 | 432 | ||
247 | /** |
433 | /** |
248 | * vmw_resource_relocations_free - Free all relocations on a list |
434 | * vmw_resource_relocations_free - Free all relocations on a list |
249 | * |
435 | * |
250 | * @list: Pointer to the head of the relocation list. |
436 | * @list: Pointer to the head of the relocation list. |
251 | */ |
437 | */ |
252 | static void vmw_resource_relocations_free(struct list_head *list) |
438 | static void vmw_resource_relocations_free(struct list_head *list) |
253 | { |
439 | { |
254 | struct vmw_resource_relocation *rel, *n; |
440 | struct vmw_resource_relocation *rel, *n; |
255 | 441 | ||
256 | list_for_each_entry_safe(rel, n, list, head) { |
442 | list_for_each_entry_safe(rel, n, list, head) { |
257 | list_del(&rel->head); |
443 | list_del(&rel->head); |
258 | kfree(rel); |
444 | kfree(rel); |
259 | } |
445 | } |
260 | } |
446 | } |
261 | 447 | ||
262 | /** |
448 | /** |
263 | * vmw_resource_relocations_apply - Apply all relocations on a list |
449 | * vmw_resource_relocations_apply - Apply all relocations on a list |
264 | * |
450 | * |
265 | * @cb: Pointer to the start of the command buffer bein patch. This need |
451 | * @cb: Pointer to the start of the command buffer bein patch. This need |
266 | * not be the same buffer as the one being parsed when the relocation |
452 | * not be the same buffer as the one being parsed when the relocation |
267 | * list was built, but the contents must be the same modulo the |
453 | * list was built, but the contents must be the same modulo the |
268 | * resource ids. |
454 | * resource ids. |
269 | * @list: Pointer to the head of the relocation list. |
455 | * @list: Pointer to the head of the relocation list. |
270 | */ |
456 | */ |
271 | static void vmw_resource_relocations_apply(uint32_t *cb, |
457 | static void vmw_resource_relocations_apply(uint32_t *cb, |
272 | struct list_head *list) |
458 | struct list_head *list) |
273 | { |
459 | { |
274 | struct vmw_resource_relocation *rel; |
460 | struct vmw_resource_relocation *rel; |
275 | 461 | ||
276 | list_for_each_entry(rel, list, head) { |
462 | list_for_each_entry(rel, list, head) { |
277 | if (likely(rel->res != NULL)) |
463 | if (likely(rel->res != NULL)) |
278 | cb[rel->offset] = rel->res->id; |
464 | cb[rel->offset] = rel->res->id; |
279 | else |
465 | else |
280 | cb[rel->offset] = SVGA_3D_CMD_NOP; |
466 | cb[rel->offset] = SVGA_3D_CMD_NOP; |
281 | } |
467 | } |
282 | } |
468 | } |
283 | 469 | ||
284 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, |
470 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, |
285 | struct vmw_sw_context *sw_context, |
471 | struct vmw_sw_context *sw_context, |
286 | SVGA3dCmdHeader *header) |
472 | SVGA3dCmdHeader *header) |
287 | { |
473 | { |
288 | return 0; //capable(CAP_SYS_ADMIN) ? : -EINVAL; |
474 | return 0; //capable(CAP_SYS_ADMIN) ? : -EINVAL; |
289 | } |
475 | } |
290 | 476 | ||
291 | static int vmw_cmd_ok(struct vmw_private *dev_priv, |
477 | static int vmw_cmd_ok(struct vmw_private *dev_priv, |
292 | struct vmw_sw_context *sw_context, |
478 | struct vmw_sw_context *sw_context, |
293 | SVGA3dCmdHeader *header) |
479 | SVGA3dCmdHeader *header) |
294 | { |
480 | { |
295 | return 0; |
481 | return 0; |
296 | } |
482 | } |
297 | 483 | ||
298 | /** |
484 | /** |
299 | * vmw_bo_to_validate_list - add a bo to a validate list |
485 | * vmw_bo_to_validate_list - add a bo to a validate list |
300 | * |
486 | * |
301 | * @sw_context: The software context used for this command submission batch. |
487 | * @sw_context: The software context used for this command submission batch. |
302 | * @bo: The buffer object to add. |
488 | * @bo: The buffer object to add. |
303 | * @validate_as_mob: Validate this buffer as a MOB. |
489 | * @validate_as_mob: Validate this buffer as a MOB. |
304 | * @p_val_node: If non-NULL Will be updated with the validate node number |
490 | * @p_val_node: If non-NULL Will be updated with the validate node number |
305 | * on return. |
491 | * on return. |
306 | * |
492 | * |
307 | * Returns -EINVAL if the limit of number of buffer objects per command |
493 | * Returns -EINVAL if the limit of number of buffer objects per command |
308 | * submission is reached. |
494 | * submission is reached. |
309 | */ |
495 | */ |
310 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, |
496 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, |
311 | struct ttm_buffer_object *bo, |
497 | struct vmw_dma_buffer *vbo, |
312 | bool validate_as_mob, |
498 | bool validate_as_mob, |
313 | uint32_t *p_val_node) |
499 | uint32_t *p_val_node) |
314 | { |
500 | { |
315 | uint32_t val_node; |
501 | uint32_t val_node; |
316 | struct vmw_validate_buffer *vval_buf; |
502 | struct vmw_validate_buffer *vval_buf; |
317 | struct ttm_validate_buffer *val_buf; |
503 | struct ttm_validate_buffer *val_buf; |
318 | struct drm_hash_item *hash; |
504 | struct drm_hash_item *hash; |
319 | int ret; |
505 | int ret; |
320 | 506 | ||
321 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo, |
507 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo, |
322 | &hash) == 0)) { |
508 | &hash) == 0)) { |
323 | vval_buf = container_of(hash, struct vmw_validate_buffer, |
509 | vval_buf = container_of(hash, struct vmw_validate_buffer, |
324 | hash); |
510 | hash); |
325 | if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) { |
511 | if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) { |
326 | DRM_ERROR("Inconsistent buffer usage.\n"); |
512 | DRM_ERROR("Inconsistent buffer usage.\n"); |
327 | return -EINVAL; |
513 | return -EINVAL; |
328 | } |
514 | } |
329 | val_buf = &vval_buf->base; |
515 | val_buf = &vval_buf->base; |
330 | val_node = vval_buf - sw_context->val_bufs; |
516 | val_node = vval_buf - sw_context->val_bufs; |
331 | } else { |
517 | } else { |
332 | val_node = sw_context->cur_val_buf; |
518 | val_node = sw_context->cur_val_buf; |
333 | if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { |
519 | if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { |
334 | DRM_ERROR("Max number of DMA buffers per submission " |
520 | DRM_ERROR("Max number of DMA buffers per submission " |
335 | "exceeded.\n"); |
521 | "exceeded.\n"); |
336 | return -EINVAL; |
522 | return -EINVAL; |
337 | } |
523 | } |
338 | vval_buf = &sw_context->val_bufs[val_node]; |
524 | vval_buf = &sw_context->val_bufs[val_node]; |
339 | vval_buf->hash.key = (unsigned long) bo; |
525 | vval_buf->hash.key = (unsigned long) vbo; |
340 | ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); |
526 | ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); |
341 | if (unlikely(ret != 0)) { |
527 | if (unlikely(ret != 0)) { |
342 | DRM_ERROR("Failed to initialize a buffer validation " |
528 | DRM_ERROR("Failed to initialize a buffer validation " |
343 | "entry.\n"); |
529 | "entry.\n"); |
344 | return ret; |
530 | return ret; |
345 | } |
531 | } |
346 | ++sw_context->cur_val_buf; |
532 | ++sw_context->cur_val_buf; |
347 | val_buf = &vval_buf->base; |
533 | val_buf = &vval_buf->base; |
348 | val_buf->bo = ttm_bo_reference(bo); |
534 | val_buf->bo = ttm_bo_reference(&vbo->base); |
349 | val_buf->reserved = false; |
535 | val_buf->shared = false; |
350 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
536 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
351 | vval_buf->validate_as_mob = validate_as_mob; |
537 | vval_buf->validate_as_mob = validate_as_mob; |
352 | } |
538 | } |
353 | - | ||
354 | sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; |
- | |
355 | 539 | ||
356 | if (p_val_node) |
540 | if (p_val_node) |
357 | *p_val_node = val_node; |
541 | *p_val_node = val_node; |
358 | 542 | ||
359 | return 0; |
543 | return 0; |
360 | } |
544 | } |
361 | 545 | ||
362 | /** |
546 | /** |
363 | * vmw_resources_reserve - Reserve all resources on the sw_context's |
547 | * vmw_resources_reserve - Reserve all resources on the sw_context's |
364 | * resource list. |
548 | * resource list. |
365 | * |
549 | * |
366 | * @sw_context: Pointer to the software context. |
550 | * @sw_context: Pointer to the software context. |
367 | * |
551 | * |
368 | * Note that since vmware's command submission currently is protected by |
552 | * Note that since vmware's command submission currently is protected by |
369 | * the cmdbuf mutex, no fancy deadlock avoidance is required for resources, |
553 | * the cmdbuf mutex, no fancy deadlock avoidance is required for resources, |
370 | * since only a single thread at once will attempt this. |
554 | * since only a single thread at once will attempt this. |
371 | */ |
555 | */ |
372 | static int vmw_resources_reserve(struct vmw_sw_context *sw_context) |
556 | static int vmw_resources_reserve(struct vmw_sw_context *sw_context) |
373 | { |
557 | { |
374 | struct vmw_resource_val_node *val; |
558 | struct vmw_resource_val_node *val; |
375 | int ret; |
559 | int ret = 0; |
376 | 560 | ||
377 | list_for_each_entry(val, &sw_context->resource_list, head) { |
561 | list_for_each_entry(val, &sw_context->resource_list, head) { |
378 | struct vmw_resource *res = val->res; |
562 | struct vmw_resource *res = val->res; |
379 | 563 | ||
380 | ret = vmw_resource_reserve(res, val->no_buffer_needed); |
564 | ret = vmw_resource_reserve(res, true, val->no_buffer_needed); |
381 | if (unlikely(ret != 0)) |
565 | if (unlikely(ret != 0)) |
382 | return ret; |
566 | return ret; |
383 | 567 | ||
384 | if (res->backup) { |
568 | if (res->backup) { |
385 | struct ttm_buffer_object *bo = &res->backup->base; |
569 | struct vmw_dma_buffer *vbo = res->backup; |
386 | 570 | ||
387 | ret = vmw_bo_to_validate_list |
571 | ret = vmw_bo_to_validate_list |
388 | (sw_context, bo, |
572 | (sw_context, vbo, |
389 | vmw_resource_needs_backup(res), NULL); |
573 | vmw_resource_needs_backup(res), NULL); |
390 | 574 | ||
391 | if (unlikely(ret != 0)) |
575 | if (unlikely(ret != 0)) |
392 | return ret; |
576 | return ret; |
393 | } |
577 | } |
394 | } |
578 | } |
- | 579 | ||
- | 580 | if (sw_context->dx_query_mob) { |
|
- | 581 | struct vmw_dma_buffer *expected_dx_query_mob; |
|
- | 582 | ||
- | 583 | expected_dx_query_mob = |
|
- | 584 | vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); |
|
- | 585 | if (expected_dx_query_mob && |
|
- | 586 | expected_dx_query_mob != sw_context->dx_query_mob) { |
|
- | 587 | ret = -EINVAL; |
|
- | 588 | } |
|
- | 589 | } |
|
- | 590 | ||
395 | return 0; |
591 | return ret; |
396 | } |
592 | } |
397 | 593 | ||
398 | /** |
594 | /** |
399 | * vmw_resources_validate - Validate all resources on the sw_context's |
595 | * vmw_resources_validate - Validate all resources on the sw_context's |
400 | * resource list. |
596 | * resource list. |
401 | * |
597 | * |
402 | * @sw_context: Pointer to the software context. |
598 | * @sw_context: Pointer to the software context. |
403 | * |
599 | * |
404 | * Before this function is called, all resource backup buffers must have |
600 | * Before this function is called, all resource backup buffers must have |
405 | * been validated. |
601 | * been validated. |
406 | */ |
602 | */ |
407 | static int vmw_resources_validate(struct vmw_sw_context *sw_context) |
603 | static int vmw_resources_validate(struct vmw_sw_context *sw_context) |
408 | { |
604 | { |
409 | struct vmw_resource_val_node *val; |
605 | struct vmw_resource_val_node *val; |
410 | int ret; |
606 | int ret; |
411 | 607 | ||
412 | list_for_each_entry(val, &sw_context->resource_list, head) { |
608 | list_for_each_entry(val, &sw_context->resource_list, head) { |
413 | struct vmw_resource *res = val->res; |
609 | struct vmw_resource *res = val->res; |
- | 610 | struct vmw_dma_buffer *backup = res->backup; |
|
414 | 611 | ||
415 | ret = vmw_resource_validate(res); |
612 | ret = vmw_resource_validate(res); |
416 | if (unlikely(ret != 0)) { |
613 | if (unlikely(ret != 0)) { |
417 | if (ret != -ERESTARTSYS) |
614 | if (ret != -ERESTARTSYS) |
418 | DRM_ERROR("Failed to validate resource.\n"); |
615 | DRM_ERROR("Failed to validate resource.\n"); |
419 | return ret; |
616 | return ret; |
420 | } |
617 | } |
- | 618 | ||
- | 619 | /* Check if the resource switched backup buffer */ |
|
- | 620 | if (backup && res->backup && (backup != res->backup)) { |
|
- | 621 | struct vmw_dma_buffer *vbo = res->backup; |
|
- | 622 | ||
- | 623 | ret = vmw_bo_to_validate_list |
|
- | 624 | (sw_context, vbo, |
|
- | 625 | vmw_resource_needs_backup(res), NULL); |
|
- | 626 | if (ret) { |
|
- | 627 | ttm_bo_unreserve(&vbo->base); |
|
- | 628 | return ret; |
|
- | 629 | } |
|
- | 630 | } |
|
421 | } |
631 | } |
422 | return 0; |
632 | return 0; |
423 | } |
633 | } |
424 | - | ||
425 | 634 | ||
426 | /** |
635 | /** |
427 | * vmw_cmd_res_reloc_add - Add a resource to a software context's |
636 | * vmw_cmd_res_reloc_add - Add a resource to a software context's |
428 | * relocation- and validation lists. |
637 | * relocation- and validation lists. |
429 | * |
638 | * |
430 | * @dev_priv: Pointer to a struct vmw_private identifying the device. |
639 | * @dev_priv: Pointer to a struct vmw_private identifying the device. |
431 | * @sw_context: Pointer to the software context. |
640 | * @sw_context: Pointer to the software context. |
432 | * @res_type: Resource type. |
- | |
433 | * @id_loc: Pointer to where the id that needs translation is located. |
641 | * @id_loc: Pointer to where the id that needs translation is located. |
434 | * @res: Valid pointer to a struct vmw_resource. |
642 | * @res: Valid pointer to a struct vmw_resource. |
435 | * @p_val: If non null, a pointer to the struct vmw_resource_validate_node |
643 | * @p_val: If non null, a pointer to the struct vmw_resource_validate_node |
436 | * used for this resource is returned here. |
644 | * used for this resource is returned here. |
437 | */ |
645 | */ |
438 | static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, |
646 | static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, |
439 | struct vmw_sw_context *sw_context, |
647 | struct vmw_sw_context *sw_context, |
440 | enum vmw_res_type res_type, |
- | |
441 | uint32_t *id_loc, |
648 | uint32_t *id_loc, |
442 | struct vmw_resource *res, |
649 | struct vmw_resource *res, |
443 | struct vmw_resource_val_node **p_val) |
650 | struct vmw_resource_val_node **p_val) |
444 | { |
651 | { |
445 | int ret; |
652 | int ret; |
446 | struct vmw_resource_val_node *node; |
653 | struct vmw_resource_val_node *node; |
447 | 654 | ||
448 | *p_val = NULL; |
655 | *p_val = NULL; |
449 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, |
656 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, |
450 | res, |
657 | res, |
451 | id_loc - sw_context->buf_start); |
658 | id_loc - sw_context->buf_start); |
452 | if (unlikely(ret != 0)) |
659 | if (unlikely(ret != 0)) |
453 | goto out_err; |
660 | return ret; |
454 | 661 | ||
455 | ret = vmw_resource_val_add(sw_context, res, &node); |
662 | ret = vmw_resource_val_add(sw_context, res, &node); |
456 | if (unlikely(ret != 0)) |
663 | if (unlikely(ret != 0)) |
457 | goto out_err; |
664 | return ret; |
458 | - | ||
459 | if (res_type == vmw_res_context && dev_priv->has_mob && |
- | |
460 | node->first_usage) { |
- | |
461 | - | ||
462 | /* |
- | |
463 | * Put contexts first on the list to be able to exit |
- | |
464 | * list traversal for contexts early. |
- | |
465 | */ |
- | |
466 | list_del(&node->head); |
- | |
467 | list_add(&node->head, &sw_context->resource_list); |
- | |
468 | - | ||
469 | ret = vmw_resource_context_res_add(dev_priv, sw_context, res); |
- | |
470 | if (unlikely(ret != 0)) |
- | |
471 | goto out_err; |
- | |
472 | node->staged_bindings = |
- | |
473 | kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); |
- | |
474 | if (node->staged_bindings == NULL) { |
- | |
475 | DRM_ERROR("Failed to allocate context binding " |
- | |
476 | "information.\n"); |
- | |
477 | goto out_err; |
- | |
478 | } |
- | |
479 | INIT_LIST_HEAD(&node->staged_bindings->list); |
- | |
480 | } |
- | |
481 | 665 | ||
482 | if (p_val) |
666 | if (p_val) |
483 | *p_val = node; |
667 | *p_val = node; |
484 | - | ||
485 | out_err: |
668 | |
486 | return ret; |
669 | return 0; |
487 | } |
670 | } |
488 | 671 | ||
489 | 672 | ||
490 | /** |
673 | /** |
491 | * vmw_cmd_res_check - Check that a resource is present and if so, put it |
674 | * vmw_cmd_res_check - Check that a resource is present and if so, put it |
492 | * on the resource validate list unless it's already there. |
675 | * on the resource validate list unless it's already there. |
493 | * |
676 | * |
494 | * @dev_priv: Pointer to a device private structure. |
677 | * @dev_priv: Pointer to a device private structure. |
495 | * @sw_context: Pointer to the software context. |
678 | * @sw_context: Pointer to the software context. |
496 | * @res_type: Resource type. |
679 | * @res_type: Resource type. |
497 | * @converter: User-space visisble type specific information. |
680 | * @converter: User-space visisble type specific information. |
498 | * @id_loc: Pointer to the location in the command buffer currently being |
681 | * @id_loc: Pointer to the location in the command buffer currently being |
499 | * parsed from where the user-space resource id handle is located. |
682 | * parsed from where the user-space resource id handle is located. |
500 | * @p_val: Pointer to pointer to resource validalidation node. Populated |
683 | * @p_val: Pointer to pointer to resource validalidation node. Populated |
501 | * on exit. |
684 | * on exit. |
502 | */ |
685 | */ |
503 | static int |
686 | static int |
504 | vmw_cmd_res_check(struct vmw_private *dev_priv, |
687 | vmw_cmd_res_check(struct vmw_private *dev_priv, |
505 | struct vmw_sw_context *sw_context, |
688 | struct vmw_sw_context *sw_context, |
506 | enum vmw_res_type res_type, |
689 | enum vmw_res_type res_type, |
507 | const struct vmw_user_resource_conv *converter, |
690 | const struct vmw_user_resource_conv *converter, |
508 | uint32_t *id_loc, |
691 | uint32_t *id_loc, |
509 | struct vmw_resource_val_node **p_val) |
692 | struct vmw_resource_val_node **p_val) |
510 | { |
693 | { |
511 | struct vmw_res_cache_entry *rcache = |
694 | struct vmw_res_cache_entry *rcache = |
512 | &sw_context->res_cache[res_type]; |
695 | &sw_context->res_cache[res_type]; |
513 | struct vmw_resource *res; |
696 | struct vmw_resource *res; |
514 | struct vmw_resource_val_node *node; |
697 | struct vmw_resource_val_node *node; |
515 | int ret; |
698 | int ret; |
516 | 699 | ||
517 | if (*id_loc == SVGA3D_INVALID_ID) { |
700 | if (*id_loc == SVGA3D_INVALID_ID) { |
518 | if (p_val) |
701 | if (p_val) |
519 | *p_val = NULL; |
702 | *p_val = NULL; |
520 | if (res_type == vmw_res_context) { |
703 | if (res_type == vmw_res_context) { |
521 | DRM_ERROR("Illegal context invalid id.\n"); |
704 | DRM_ERROR("Illegal context invalid id.\n"); |
522 | return -EINVAL; |
705 | return -EINVAL; |
523 | } |
706 | } |
524 | return 0; |
707 | return 0; |
525 | } |
708 | } |
526 | 709 | ||
527 | /* |
710 | /* |
528 | * Fastpath in case of repeated commands referencing the same |
711 | * Fastpath in case of repeated commands referencing the same |
529 | * resource |
712 | * resource |
530 | */ |
713 | */ |
531 | 714 | ||
532 | if (likely(rcache->valid && *id_loc == rcache->handle)) { |
715 | if (likely(rcache->valid && *id_loc == rcache->handle)) { |
533 | const struct vmw_resource *res = rcache->res; |
716 | const struct vmw_resource *res = rcache->res; |
534 | 717 | ||
535 | rcache->node->first_usage = false; |
718 | rcache->node->first_usage = false; |
536 | if (p_val) |
719 | if (p_val) |
537 | *p_val = rcache->node; |
720 | *p_val = rcache->node; |
538 | 721 | ||
539 | return vmw_resource_relocation_add |
722 | return vmw_resource_relocation_add |
540 | (&sw_context->res_relocations, res, |
723 | (&sw_context->res_relocations, res, |
541 | id_loc - sw_context->buf_start); |
724 | id_loc - sw_context->buf_start); |
542 | } |
725 | } |
543 | 726 | ||
544 | ret = vmw_user_resource_lookup_handle(dev_priv, |
727 | ret = vmw_user_resource_lookup_handle(dev_priv, |
545 | sw_context->fp->tfile, |
728 | sw_context->fp->tfile, |
546 | *id_loc, |
729 | *id_loc, |
547 | converter, |
730 | converter, |
548 | &res); |
731 | &res); |
549 | if (unlikely(ret != 0)) { |
732 | if (unlikely(ret != 0)) { |
550 | DRM_ERROR("Could not find or use resource 0x%08x.\n", |
733 | DRM_ERROR("Could not find or use resource 0x%08x.\n", |
551 | (unsigned) *id_loc); |
734 | (unsigned) *id_loc); |
552 | .. dump_stack(); |
735 | // dump_stack(); |
553 | return ret; |
736 | return ret; |
554 | } |
737 | } |
555 | 738 | ||
556 | rcache->valid = true; |
739 | rcache->valid = true; |
557 | rcache->res = res; |
740 | rcache->res = res; |
558 | rcache->handle = *id_loc; |
741 | rcache->handle = *id_loc; |
559 | 742 | ||
560 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc, |
743 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc, |
561 | res, &node); |
744 | res, &node); |
562 | if (unlikely(ret != 0)) |
745 | if (unlikely(ret != 0)) |
563 | goto out_no_reloc; |
746 | goto out_no_reloc; |
564 | 747 | ||
565 | rcache->node = node; |
748 | rcache->node = node; |
566 | if (p_val) |
749 | if (p_val) |
567 | *p_val = node; |
750 | *p_val = node; |
568 | vmw_resource_unreference(&res); |
751 | vmw_resource_unreference(&res); |
569 | return 0; |
752 | return 0; |
570 | 753 | ||
571 | out_no_reloc: |
754 | out_no_reloc: |
572 | BUG_ON(sw_context->error_resource != NULL); |
755 | BUG_ON(sw_context->error_resource != NULL); |
573 | sw_context->error_resource = res; |
756 | sw_context->error_resource = res; |
574 | 757 | ||
575 | return ret; |
758 | return ret; |
576 | } |
759 | } |
577 | 760 | ||
578 | /** |
761 | /** |
- | 762 | * vmw_rebind_dx_query - Rebind DX query associated with the context |
|
- | 763 | * |
|
- | 764 | * @ctx_res: context the query belongs to |
|
- | 765 | * |
|
- | 766 | * This function assumes binding_mutex is held. |
|
- | 767 | */ |
|
- | 768 | static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) |
|
- | 769 | { |
|
- | 770 | struct vmw_private *dev_priv = ctx_res->dev_priv; |
|
- | 771 | struct vmw_dma_buffer *dx_query_mob; |
|
- | 772 | struct { |
|
- | 773 | SVGA3dCmdHeader header; |
|
- | 774 | SVGA3dCmdDXBindAllQuery body; |
|
- | 775 | } *cmd; |
|
- | 776 | ||
- | 777 | ||
- | 778 | dx_query_mob = vmw_context_get_dx_query_mob(ctx_res); |
|
- | 779 | ||
- | 780 | if (!dx_query_mob || dx_query_mob->dx_query_ctx) |
|
- | 781 | return 0; |
|
- | 782 | ||
- | 783 | cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id); |
|
- | 784 | ||
- | 785 | if (cmd == NULL) { |
|
- | 786 | DRM_ERROR("Failed to rebind queries.\n"); |
|
- | 787 | return -ENOMEM; |
|
- | 788 | } |
|
- | 789 | ||
- | 790 | cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY; |
|
- | 791 | cmd->header.size = sizeof(cmd->body); |
|
- | 792 | cmd->body.cid = ctx_res->id; |
|
- | 793 | cmd->body.mobid = dx_query_mob->base.mem.start; |
|
- | 794 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
|
- | 795 | ||
- | 796 | vmw_context_bind_dx_query(ctx_res, dx_query_mob); |
|
- | 797 | ||
- | 798 | return 0; |
|
- | 799 | } |
|
- | 800 | ||
- | 801 | /** |
|
579 | * vmw_rebind_contexts - Rebind all resources previously bound to |
802 | * vmw_rebind_contexts - Rebind all resources previously bound to |
580 | * referenced contexts. |
803 | * referenced contexts. |
581 | * |
804 | * |
582 | * @sw_context: Pointer to the software context. |
805 | * @sw_context: Pointer to the software context. |
583 | * |
806 | * |
584 | * Rebind context binding points that have been scrubbed because of eviction. |
807 | * Rebind context binding points that have been scrubbed because of eviction. |
585 | */ |
808 | */ |
586 | static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) |
809 | static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) |
587 | { |
810 | { |
588 | struct vmw_resource_val_node *val; |
811 | struct vmw_resource_val_node *val; |
589 | int ret; |
812 | int ret; |
590 | 813 | ||
591 | list_for_each_entry(val, &sw_context->resource_list, head) { |
814 | list_for_each_entry(val, &sw_context->resource_list, head) { |
592 | if (unlikely(!val->staged_bindings)) |
815 | if (unlikely(!val->staged_bindings)) |
593 | break; |
816 | break; |
- | 817 | ||
594 | 818 | ret = vmw_binding_rebind_all |
|
595 | ret = vmw_context_rebind_all(val->res); |
819 | (vmw_context_binding_state(val->res)); |
596 | if (unlikely(ret != 0)) { |
820 | if (unlikely(ret != 0)) { |
597 | if (ret != -ERESTARTSYS) |
821 | if (ret != -ERESTARTSYS) |
598 | DRM_ERROR("Failed to rebind context.\n"); |
822 | DRM_ERROR("Failed to rebind context.\n"); |
599 | return ret; |
823 | return ret; |
600 | } |
824 | } |
- | 825 | ||
- | 826 | ret = vmw_rebind_all_dx_query(val->res); |
|
- | 827 | if (ret != 0) |
|
- | 828 | return ret; |
|
- | 829 | } |
|
- | 830 | ||
- | 831 | return 0; |
|
- | 832 | } |
|
- | 833 | ||
- | 834 | /** |
|
- | 835 | * vmw_view_bindings_add - Add an array of view bindings to a context |
|
- | 836 | * binding state tracker. |
|
- | 837 | * |
|
- | 838 | * @sw_context: The execbuf state used for this command. |
|
- | 839 | * @view_type: View type for the bindings. |
|
- | 840 | * @binding_type: Binding type for the bindings. |
|
- | 841 | * @shader_slot: The shader slot to user for the bindings. |
|
- | 842 | * @view_ids: Array of view ids to be bound. |
|
- | 843 | * @num_views: Number of view ids in @view_ids. |
|
- | 844 | * @first_slot: The binding slot to be used for the first view id in @view_ids. |
|
- | 845 | */ |
|
- | 846 | static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, |
|
- | 847 | enum vmw_view_type view_type, |
|
- | 848 | enum vmw_ctx_binding_type binding_type, |
|
- | 849 | uint32 shader_slot, |
|
- | 850 | uint32 view_ids[], u32 num_views, |
|
- | 851 | u32 first_slot) |
|
- | 852 | { |
|
- | 853 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
|
- | 854 | struct vmw_cmdbuf_res_manager *man; |
|
- | 855 | u32 i; |
|
- | 856 | int ret; |
|
- | 857 | ||
- | 858 | if (!ctx_node) { |
|
- | 859 | DRM_ERROR("DX Context not set.\n"); |
|
- | 860 | return -EINVAL; |
|
- | 861 | } |
|
- | 862 | ||
- | 863 | man = sw_context->man; |
|
- | 864 | for (i = 0; i < num_views; ++i) { |
|
- | 865 | struct vmw_ctx_bindinfo_view binding; |
|
- | 866 | struct vmw_resource *view = NULL; |
|
- | 867 | ||
- | 868 | if (view_ids[i] != SVGA3D_INVALID_ID) { |
|
- | 869 | view = vmw_view_lookup(man, view_type, view_ids[i]); |
|
- | 870 | if (IS_ERR(view)) { |
|
- | 871 | DRM_ERROR("View not found.\n"); |
|
- | 872 | return PTR_ERR(view); |
|
- | 873 | } |
|
- | 874 | ||
- | 875 | ret = vmw_view_res_val_add(sw_context, view); |
|
- | 876 | if (ret) { |
|
- | 877 | DRM_ERROR("Could not add view to " |
|
- | 878 | "validation list.\n"); |
|
- | 879 | vmw_resource_unreference(&view); |
|
- | 880 | return ret; |
|
- | 881 | } |
|
- | 882 | } |
|
- | 883 | binding.bi.ctx = ctx_node->res; |
|
- | 884 | binding.bi.res = view; |
|
- | 885 | binding.bi.bt = binding_type; |
|
- | 886 | binding.shader_slot = shader_slot; |
|
- | 887 | binding.slot = first_slot + i; |
|
- | 888 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, |
|
- | 889 | shader_slot, binding.slot); |
|
- | 890 | if (view) |
|
- | 891 | vmw_resource_unreference(&view); |
|
601 | } |
892 | } |
602 | 893 | ||
603 | return 0; |
894 | return 0; |
604 | } |
895 | } |
605 | 896 | ||
606 | /** |
897 | /** |
607 | * vmw_cmd_cid_check - Check a command header for valid context information. |
898 | * vmw_cmd_cid_check - Check a command header for valid context information. |
608 | * |
899 | * |
609 | * @dev_priv: Pointer to a device private structure. |
900 | * @dev_priv: Pointer to a device private structure. |
610 | * @sw_context: Pointer to the software context. |
901 | * @sw_context: Pointer to the software context. |
611 | * @header: A command header with an embedded user-space context handle. |
902 | * @header: A command header with an embedded user-space context handle. |
612 | * |
903 | * |
613 | * Convenience function: Call vmw_cmd_res_check with the user-space context |
904 | * Convenience function: Call vmw_cmd_res_check with the user-space context |
614 | * handle embedded in @header. |
905 | * handle embedded in @header. |
615 | */ |
906 | */ |
616 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, |
907 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, |
617 | struct vmw_sw_context *sw_context, |
908 | struct vmw_sw_context *sw_context, |
618 | SVGA3dCmdHeader *header) |
909 | SVGA3dCmdHeader *header) |
619 | { |
910 | { |
620 | struct vmw_cid_cmd { |
911 | struct vmw_cid_cmd { |
621 | SVGA3dCmdHeader header; |
912 | SVGA3dCmdHeader header; |
622 | uint32_t cid; |
913 | uint32_t cid; |
623 | } *cmd; |
914 | } *cmd; |
624 | 915 | ||
625 | cmd = container_of(header, struct vmw_cid_cmd, header); |
916 | cmd = container_of(header, struct vmw_cid_cmd, header); |
626 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
917 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
627 | user_context_converter, &cmd->cid, NULL); |
918 | user_context_converter, &cmd->cid, NULL); |
628 | } |
919 | } |
629 | 920 | ||
630 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, |
921 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, |
631 | struct vmw_sw_context *sw_context, |
922 | struct vmw_sw_context *sw_context, |
632 | SVGA3dCmdHeader *header) |
923 | SVGA3dCmdHeader *header) |
633 | { |
924 | { |
634 | struct vmw_sid_cmd { |
925 | struct vmw_sid_cmd { |
635 | SVGA3dCmdHeader header; |
926 | SVGA3dCmdHeader header; |
636 | SVGA3dCmdSetRenderTarget body; |
927 | SVGA3dCmdSetRenderTarget body; |
637 | } *cmd; |
928 | } *cmd; |
638 | struct vmw_resource_val_node *ctx_node; |
929 | struct vmw_resource_val_node *ctx_node; |
639 | struct vmw_resource_val_node *res_node; |
930 | struct vmw_resource_val_node *res_node; |
640 | int ret; |
931 | int ret; |
641 | 932 | ||
642 | cmd = container_of(header, struct vmw_sid_cmd, header); |
933 | cmd = container_of(header, struct vmw_sid_cmd, header); |
- | 934 | ||
- | 935 | if (cmd->body.type >= SVGA3D_RT_MAX) { |
|
- | 936 | DRM_ERROR("Illegal render target type %u.\n", |
|
- | 937 | (unsigned) cmd->body.type); |
|
- | 938 | return -EINVAL; |
|
- | 939 | } |
|
643 | 940 | ||
644 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
941 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
645 | user_context_converter, &cmd->body.cid, |
942 | user_context_converter, &cmd->body.cid, |
646 | &ctx_node); |
943 | &ctx_node); |
647 | if (unlikely(ret != 0)) |
944 | if (unlikely(ret != 0)) |
648 | return ret; |
945 | return ret; |
649 | 946 | ||
650 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
947 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
651 | user_surface_converter, |
948 | user_surface_converter, |
652 | &cmd->body.target.sid, &res_node); |
949 | &cmd->body.target.sid, &res_node); |
653 | if (unlikely(ret != 0)) |
950 | if (unlikely(ret != 0)) |
654 | return ret; |
951 | return ret; |
655 | 952 | ||
656 | if (dev_priv->has_mob) { |
953 | if (dev_priv->has_mob) { |
657 | struct vmw_ctx_bindinfo bi; |
954 | struct vmw_ctx_bindinfo_view binding; |
658 | 955 | ||
659 | bi.ctx = ctx_node->res; |
956 | binding.bi.ctx = ctx_node->res; |
660 | bi.res = res_node ? res_node->res : NULL; |
957 | binding.bi.res = res_node ? res_node->res : NULL; |
661 | bi.bt = vmw_ctx_binding_rt; |
958 | binding.bi.bt = vmw_ctx_binding_rt; |
662 | bi.i1.rt_type = cmd->body.type; |
959 | binding.slot = cmd->body.type; |
- | 960 | vmw_binding_add(ctx_node->staged_bindings, |
|
663 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); |
961 | &binding.bi, 0, binding.slot); |
664 | } |
962 | } |
665 | 963 | ||
666 | return 0; |
964 | return 0; |
667 | } |
965 | } |
668 | 966 | ||
669 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, |
967 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, |
670 | struct vmw_sw_context *sw_context, |
968 | struct vmw_sw_context *sw_context, |
671 | SVGA3dCmdHeader *header) |
969 | SVGA3dCmdHeader *header) |
672 | { |
970 | { |
673 | struct vmw_sid_cmd { |
971 | struct vmw_sid_cmd { |
674 | SVGA3dCmdHeader header; |
972 | SVGA3dCmdHeader header; |
675 | SVGA3dCmdSurfaceCopy body; |
973 | SVGA3dCmdSurfaceCopy body; |
676 | } *cmd; |
974 | } *cmd; |
677 | int ret; |
975 | int ret; |
678 | 976 | ||
679 | cmd = container_of(header, struct vmw_sid_cmd, header); |
977 | cmd = container_of(header, struct vmw_sid_cmd, header); |
- | 978 | ||
680 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
979 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
681 | user_surface_converter, |
980 | user_surface_converter, |
682 | &cmd->body.src.sid, NULL); |
981 | &cmd->body.src.sid, NULL); |
683 | if (unlikely(ret != 0)) |
982 | if (ret) |
684 | return ret; |
983 | return ret; |
- | 984 | ||
685 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
985 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
686 | user_surface_converter, |
986 | user_surface_converter, |
687 | &cmd->body.dest.sid, NULL); |
987 | &cmd->body.dest.sid, NULL); |
688 | } |
988 | } |
- | 989 | ||
- | 990 | static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv, |
|
- | 991 | struct vmw_sw_context *sw_context, |
|
- | 992 | SVGA3dCmdHeader *header) |
|
- | 993 | { |
|
- | 994 | struct { |
|
- | 995 | SVGA3dCmdHeader header; |
|
- | 996 | SVGA3dCmdDXBufferCopy body; |
|
- | 997 | } *cmd; |
|
- | 998 | int ret; |
|
- | 999 | ||
- | 1000 | cmd = container_of(header, typeof(*cmd), header); |
|
- | 1001 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
|
- | 1002 | user_surface_converter, |
|
- | 1003 | &cmd->body.src, NULL); |
|
- | 1004 | if (ret != 0) |
|
- | 1005 | return ret; |
|
- | 1006 | ||
- | 1007 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
|
- | 1008 | user_surface_converter, |
|
- | 1009 | &cmd->body.dest, NULL); |
|
- | 1010 | } |
|
- | 1011 | ||
- | 1012 | static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv, |
|
- | 1013 | struct vmw_sw_context *sw_context, |
|
- | 1014 | SVGA3dCmdHeader *header) |
|
- | 1015 | { |
|
- | 1016 | struct { |
|
- | 1017 | SVGA3dCmdHeader header; |
|
- | 1018 | SVGA3dCmdDXPredCopyRegion body; |
|
- | 1019 | } *cmd; |
|
- | 1020 | int ret; |
|
- | 1021 | ||
- | 1022 | cmd = container_of(header, typeof(*cmd), header); |
|
- | 1023 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
|
- | 1024 | user_surface_converter, |
|
- | 1025 | &cmd->body.srcSid, NULL); |
|
- | 1026 | if (ret != 0) |
|
- | 1027 | return ret; |
|
- | 1028 | ||
- | 1029 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
|
- | 1030 | user_surface_converter, |
|
- | 1031 | &cmd->body.dstSid, NULL); |
|
- | 1032 | } |
|
689 | 1033 | ||
690 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, |
1034 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, |
691 | struct vmw_sw_context *sw_context, |
1035 | struct vmw_sw_context *sw_context, |
692 | SVGA3dCmdHeader *header) |
1036 | SVGA3dCmdHeader *header) |
693 | { |
1037 | { |
694 | struct vmw_sid_cmd { |
1038 | struct vmw_sid_cmd { |
695 | SVGA3dCmdHeader header; |
1039 | SVGA3dCmdHeader header; |
696 | SVGA3dCmdSurfaceStretchBlt body; |
1040 | SVGA3dCmdSurfaceStretchBlt body; |
697 | } *cmd; |
1041 | } *cmd; |
698 | int ret; |
1042 | int ret; |
699 | 1043 | ||
700 | cmd = container_of(header, struct vmw_sid_cmd, header); |
1044 | cmd = container_of(header, struct vmw_sid_cmd, header); |
701 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1045 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
702 | user_surface_converter, |
1046 | user_surface_converter, |
703 | &cmd->body.src.sid, NULL); |
1047 | &cmd->body.src.sid, NULL); |
704 | if (unlikely(ret != 0)) |
1048 | if (unlikely(ret != 0)) |
705 | return ret; |
1049 | return ret; |
706 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1050 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
707 | user_surface_converter, |
1051 | user_surface_converter, |
708 | &cmd->body.dest.sid, NULL); |
1052 | &cmd->body.dest.sid, NULL); |
709 | } |
1053 | } |
710 | 1054 | ||
711 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, |
1055 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, |
712 | struct vmw_sw_context *sw_context, |
1056 | struct vmw_sw_context *sw_context, |
713 | SVGA3dCmdHeader *header) |
1057 | SVGA3dCmdHeader *header) |
714 | { |
1058 | { |
715 | struct vmw_sid_cmd { |
1059 | struct vmw_sid_cmd { |
716 | SVGA3dCmdHeader header; |
1060 | SVGA3dCmdHeader header; |
717 | SVGA3dCmdBlitSurfaceToScreen body; |
1061 | SVGA3dCmdBlitSurfaceToScreen body; |
718 | } *cmd; |
1062 | } *cmd; |
719 | 1063 | ||
720 | cmd = container_of(header, struct vmw_sid_cmd, header); |
1064 | cmd = container_of(header, struct vmw_sid_cmd, header); |
721 | 1065 | ||
722 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1066 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
723 | user_surface_converter, |
1067 | user_surface_converter, |
724 | &cmd->body.srcImage.sid, NULL); |
1068 | &cmd->body.srcImage.sid, NULL); |
725 | } |
1069 | } |
726 | 1070 | ||
727 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, |
1071 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, |
728 | struct vmw_sw_context *sw_context, |
1072 | struct vmw_sw_context *sw_context, |
729 | SVGA3dCmdHeader *header) |
1073 | SVGA3dCmdHeader *header) |
730 | { |
1074 | { |
731 | struct vmw_sid_cmd { |
1075 | struct vmw_sid_cmd { |
732 | SVGA3dCmdHeader header; |
1076 | SVGA3dCmdHeader header; |
733 | SVGA3dCmdPresent body; |
1077 | SVGA3dCmdPresent body; |
734 | } *cmd; |
1078 | } *cmd; |
735 | 1079 | ||
736 | 1080 | ||
737 | cmd = container_of(header, struct vmw_sid_cmd, header); |
1081 | cmd = container_of(header, struct vmw_sid_cmd, header); |
738 | 1082 | ||
739 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1083 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
740 | user_surface_converter, &cmd->body.sid, |
1084 | user_surface_converter, &cmd->body.sid, |
741 | NULL); |
1085 | NULL); |
742 | } |
1086 | } |
743 | 1087 | ||
744 | /** |
1088 | /** |
745 | * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. |
1089 | * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. |
746 | * |
1090 | * |
747 | * @dev_priv: The device private structure. |
1091 | * @dev_priv: The device private structure. |
748 | * @new_query_bo: The new buffer holding query results. |
1092 | * @new_query_bo: The new buffer holding query results. |
749 | * @sw_context: The software context used for this command submission. |
1093 | * @sw_context: The software context used for this command submission. |
750 | * |
1094 | * |
751 | * This function checks whether @new_query_bo is suitable for holding |
1095 | * This function checks whether @new_query_bo is suitable for holding |
752 | * query results, and if another buffer currently is pinned for query |
1096 | * query results, and if another buffer currently is pinned for query |
753 | * results. If so, the function prepares the state of @sw_context for |
1097 | * results. If so, the function prepares the state of @sw_context for |
754 | * switching pinned buffers after successful submission of the current |
1098 | * switching pinned buffers after successful submission of the current |
755 | * command batch. |
1099 | * command batch. |
756 | */ |
1100 | */ |
757 | static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, |
1101 | static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, |
758 | struct ttm_buffer_object *new_query_bo, |
1102 | struct vmw_dma_buffer *new_query_bo, |
759 | struct vmw_sw_context *sw_context) |
1103 | struct vmw_sw_context *sw_context) |
760 | { |
1104 | { |
761 | struct vmw_res_cache_entry *ctx_entry = |
1105 | struct vmw_res_cache_entry *ctx_entry = |
762 | &sw_context->res_cache[vmw_res_context]; |
1106 | &sw_context->res_cache[vmw_res_context]; |
763 | int ret; |
1107 | int ret; |
764 | 1108 | ||
765 | BUG_ON(!ctx_entry->valid); |
1109 | BUG_ON(!ctx_entry->valid); |
766 | sw_context->last_query_ctx = ctx_entry->res; |
1110 | sw_context->last_query_ctx = ctx_entry->res; |
767 | 1111 | ||
768 | if (unlikely(new_query_bo != sw_context->cur_query_bo)) { |
1112 | if (unlikely(new_query_bo != sw_context->cur_query_bo)) { |
769 | 1113 | ||
770 | if (unlikely(new_query_bo->num_pages > 4)) { |
1114 | if (unlikely(new_query_bo->base.num_pages > 4)) { |
771 | DRM_ERROR("Query buffer too large.\n"); |
1115 | DRM_ERROR("Query buffer too large.\n"); |
772 | return -EINVAL; |
1116 | return -EINVAL; |
773 | } |
1117 | } |
774 | 1118 | ||
775 | if (unlikely(sw_context->cur_query_bo != NULL)) { |
1119 | if (unlikely(sw_context->cur_query_bo != NULL)) { |
776 | sw_context->needs_post_query_barrier = true; |
1120 | sw_context->needs_post_query_barrier = true; |
777 | ret = vmw_bo_to_validate_list(sw_context, |
1121 | ret = vmw_bo_to_validate_list(sw_context, |
778 | sw_context->cur_query_bo, |
1122 | sw_context->cur_query_bo, |
779 | dev_priv->has_mob, NULL); |
1123 | dev_priv->has_mob, NULL); |
780 | if (unlikely(ret != 0)) |
1124 | if (unlikely(ret != 0)) |
781 | return ret; |
1125 | return ret; |
782 | } |
1126 | } |
783 | sw_context->cur_query_bo = new_query_bo; |
1127 | sw_context->cur_query_bo = new_query_bo; |
784 | 1128 | ||
785 | ret = vmw_bo_to_validate_list(sw_context, |
1129 | ret = vmw_bo_to_validate_list(sw_context, |
786 | dev_priv->dummy_query_bo, |
1130 | dev_priv->dummy_query_bo, |
787 | dev_priv->has_mob, NULL); |
1131 | dev_priv->has_mob, NULL); |
788 | if (unlikely(ret != 0)) |
1132 | if (unlikely(ret != 0)) |
789 | return ret; |
1133 | return ret; |
790 | 1134 | ||
791 | } |
1135 | } |
792 | 1136 | ||
793 | return 0; |
1137 | return 0; |
794 | } |
1138 | } |
795 | 1139 | ||
796 | 1140 | ||
797 | /** |
1141 | /** |
798 | * vmw_query_bo_switch_commit - Finalize switching pinned query buffer |
1142 | * vmw_query_bo_switch_commit - Finalize switching pinned query buffer |
799 | * |
1143 | * |
800 | * @dev_priv: The device private structure. |
1144 | * @dev_priv: The device private structure. |
801 | * @sw_context: The software context used for this command submission batch. |
1145 | * @sw_context: The software context used for this command submission batch. |
802 | * |
1146 | * |
803 | * This function will check if we're switching query buffers, and will then, |
1147 | * This function will check if we're switching query buffers, and will then, |
804 | * issue a dummy occlusion query wait used as a query barrier. When the fence |
1148 | * issue a dummy occlusion query wait used as a query barrier. When the fence |
805 | * object following that query wait has signaled, we are sure that all |
1149 | * object following that query wait has signaled, we are sure that all |
806 | * preceding queries have finished, and the old query buffer can be unpinned. |
1150 | * preceding queries have finished, and the old query buffer can be unpinned. |
807 | * However, since both the new query buffer and the old one are fenced with |
1151 | * However, since both the new query buffer and the old one are fenced with |
808 | * that fence, we can do an asynchronus unpin now, and be sure that the |
1152 | * that fence, we can do an asynchronus unpin now, and be sure that the |
809 | * old query buffer won't be moved until the fence has signaled. |
1153 | * old query buffer won't be moved until the fence has signaled. |
810 | * |
1154 | * |
811 | * As mentioned above, both the new - and old query buffers need to be fenced |
1155 | * As mentioned above, both the new - and old query buffers need to be fenced |
812 | * using a sequence emitted *after* calling this function. |
1156 | * using a sequence emitted *after* calling this function. |
813 | */ |
1157 | */ |
814 | static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, |
1158 | static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, |
815 | struct vmw_sw_context *sw_context) |
1159 | struct vmw_sw_context *sw_context) |
816 | { |
1160 | { |
817 | /* |
1161 | /* |
818 | * The validate list should still hold references to all |
1162 | * The validate list should still hold references to all |
819 | * contexts here. |
1163 | * contexts here. |
820 | */ |
1164 | */ |
821 | 1165 | ||
822 | if (sw_context->needs_post_query_barrier) { |
1166 | if (sw_context->needs_post_query_barrier) { |
823 | struct vmw_res_cache_entry *ctx_entry = |
1167 | struct vmw_res_cache_entry *ctx_entry = |
824 | &sw_context->res_cache[vmw_res_context]; |
1168 | &sw_context->res_cache[vmw_res_context]; |
825 | struct vmw_resource *ctx; |
1169 | struct vmw_resource *ctx; |
826 | int ret; |
1170 | int ret; |
827 | 1171 | ||
828 | BUG_ON(!ctx_entry->valid); |
1172 | BUG_ON(!ctx_entry->valid); |
829 | ctx = ctx_entry->res; |
1173 | ctx = ctx_entry->res; |
830 | 1174 | ||
831 | ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); |
1175 | ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); |
832 | 1176 | ||
833 | if (unlikely(ret != 0)) |
1177 | if (unlikely(ret != 0)) |
834 | DRM_ERROR("Out of fifo space for dummy query.\n"); |
1178 | DRM_ERROR("Out of fifo space for dummy query.\n"); |
835 | } |
1179 | } |
836 | 1180 | ||
837 | if (dev_priv->pinned_bo != sw_context->cur_query_bo) { |
1181 | if (dev_priv->pinned_bo != sw_context->cur_query_bo) { |
838 | if (dev_priv->pinned_bo) { |
1182 | if (dev_priv->pinned_bo) { |
839 | vmw_bo_pin(dev_priv->pinned_bo, false); |
1183 | vmw_bo_pin_reserved(dev_priv->pinned_bo, false); |
840 | ttm_bo_unref(&dev_priv->pinned_bo); |
1184 | vmw_dmabuf_unreference(&dev_priv->pinned_bo); |
841 | } |
1185 | } |
842 | 1186 | ||
843 | if (!sw_context->needs_post_query_barrier) { |
1187 | if (!sw_context->needs_post_query_barrier) { |
844 | vmw_bo_pin(sw_context->cur_query_bo, true); |
1188 | vmw_bo_pin_reserved(sw_context->cur_query_bo, true); |
845 | 1189 | ||
846 | /* |
1190 | /* |
847 | * We pin also the dummy_query_bo buffer so that we |
1191 | * We pin also the dummy_query_bo buffer so that we |
848 | * don't need to validate it when emitting |
1192 | * don't need to validate it when emitting |
849 | * dummy queries in context destroy paths. |
1193 | * dummy queries in context destroy paths. |
850 | */ |
1194 | */ |
- | 1195 | ||
851 | 1196 | if (!dev_priv->dummy_query_bo_pinned) { |
|
- | 1197 | vmw_bo_pin_reserved(dev_priv->dummy_query_bo, |
|
852 | vmw_bo_pin(dev_priv->dummy_query_bo, true); |
1198 | true); |
- | 1199 | dev_priv->dummy_query_bo_pinned = true; |
|
853 | dev_priv->dummy_query_bo_pinned = true; |
1200 | } |
854 | 1201 | ||
855 | BUG_ON(sw_context->last_query_ctx == NULL); |
1202 | BUG_ON(sw_context->last_query_ctx == NULL); |
856 | dev_priv->query_cid = sw_context->last_query_ctx->id; |
1203 | dev_priv->query_cid = sw_context->last_query_ctx->id; |
857 | dev_priv->query_cid_valid = true; |
1204 | dev_priv->query_cid_valid = true; |
858 | dev_priv->pinned_bo = |
1205 | dev_priv->pinned_bo = |
859 | ttm_bo_reference(sw_context->cur_query_bo); |
1206 | vmw_dmabuf_reference(sw_context->cur_query_bo); |
860 | } |
1207 | } |
861 | } |
1208 | } |
862 | } |
1209 | } |
863 | 1210 | ||
864 | /** |
1211 | /** |
865 | * vmw_translate_mob_pointer - Prepare to translate a user-space buffer |
1212 | * vmw_translate_mob_pointer - Prepare to translate a user-space buffer |
866 | * handle to a MOB id. |
1213 | * handle to a MOB id. |
867 | * |
1214 | * |
868 | * @dev_priv: Pointer to a device private structure. |
1215 | * @dev_priv: Pointer to a device private structure. |
869 | * @sw_context: The software context used for this command batch validation. |
1216 | * @sw_context: The software context used for this command batch validation. |
870 | * @id: Pointer to the user-space handle to be translated. |
1217 | * @id: Pointer to the user-space handle to be translated. |
871 | * @vmw_bo_p: Points to a location that, on successful return will carry |
1218 | * @vmw_bo_p: Points to a location that, on successful return will carry |
872 | * a reference-counted pointer to the DMA buffer identified by the |
1219 | * a reference-counted pointer to the DMA buffer identified by the |
873 | * user-space handle in @id. |
1220 | * user-space handle in @id. |
874 | * |
1221 | * |
875 | * This function saves information needed to translate a user-space buffer |
1222 | * This function saves information needed to translate a user-space buffer |
876 | * handle to a MOB id. The translation does not take place immediately, but |
1223 | * handle to a MOB id. The translation does not take place immediately, but |
877 | * during a call to vmw_apply_relocations(). This function builds a relocation |
1224 | * during a call to vmw_apply_relocations(). This function builds a relocation |
878 | * list and a list of buffers to validate. The former needs to be freed using |
1225 | * list and a list of buffers to validate. The former needs to be freed using |
879 | * either vmw_apply_relocations() or vmw_free_relocations(). The latter |
1226 | * either vmw_apply_relocations() or vmw_free_relocations(). The latter |
880 | * needs to be freed using vmw_clear_validations. |
1227 | * needs to be freed using vmw_clear_validations. |
881 | */ |
1228 | */ |
882 | static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, |
1229 | static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, |
883 | struct vmw_sw_context *sw_context, |
1230 | struct vmw_sw_context *sw_context, |
884 | SVGAMobId *id, |
1231 | SVGAMobId *id, |
885 | struct vmw_dma_buffer **vmw_bo_p) |
1232 | struct vmw_dma_buffer **vmw_bo_p) |
886 | { |
1233 | { |
887 | struct vmw_dma_buffer *vmw_bo = NULL; |
1234 | struct vmw_dma_buffer *vmw_bo = NULL; |
888 | struct ttm_buffer_object *bo; |
- | |
889 | uint32_t handle = *id; |
1235 | uint32_t handle = *id; |
890 | struct vmw_relocation *reloc; |
1236 | struct vmw_relocation *reloc; |
891 | int ret; |
1237 | int ret; |
892 | 1238 | ||
- | 1239 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, |
|
893 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
1240 | NULL); |
894 | if (unlikely(ret != 0)) { |
1241 | if (unlikely(ret != 0)) { |
895 | DRM_ERROR("Could not find or use MOB buffer.\n"); |
1242 | DRM_ERROR("Could not find or use MOB buffer.\n"); |
- | 1243 | ret = -EINVAL; |
|
896 | return -EINVAL; |
1244 | goto out_no_reloc; |
897 | } |
- | |
898 | bo = &vmw_bo->base; |
1245 | } |
899 | 1246 | ||
900 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
1247 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
901 | DRM_ERROR("Max number relocations per submission" |
1248 | DRM_ERROR("Max number relocations per submission" |
902 | " exceeded\n"); |
1249 | " exceeded\n"); |
903 | ret = -EINVAL; |
1250 | ret = -EINVAL; |
904 | goto out_no_reloc; |
1251 | goto out_no_reloc; |
905 | } |
1252 | } |
906 | 1253 | ||
907 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
1254 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
908 | reloc->mob_loc = id; |
1255 | reloc->mob_loc = id; |
909 | reloc->location = NULL; |
1256 | reloc->location = NULL; |
910 | 1257 | ||
911 | ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); |
1258 | ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index); |
912 | if (unlikely(ret != 0)) |
1259 | if (unlikely(ret != 0)) |
913 | goto out_no_reloc; |
1260 | goto out_no_reloc; |
914 | 1261 | ||
915 | *vmw_bo_p = vmw_bo; |
1262 | *vmw_bo_p = vmw_bo; |
916 | return 0; |
1263 | return 0; |
917 | 1264 | ||
918 | out_no_reloc: |
1265 | out_no_reloc: |
919 | vmw_dmabuf_unreference(&vmw_bo); |
1266 | vmw_dmabuf_unreference(&vmw_bo); |
920 | vmw_bo_p = NULL; |
1267 | *vmw_bo_p = NULL; |
921 | return ret; |
1268 | return ret; |
922 | } |
1269 | } |
923 | 1270 | ||
924 | /** |
1271 | /** |
925 | * vmw_translate_guest_pointer - Prepare to translate a user-space buffer |
1272 | * vmw_translate_guest_pointer - Prepare to translate a user-space buffer |
926 | * handle to a valid SVGAGuestPtr |
1273 | * handle to a valid SVGAGuestPtr |
927 | * |
1274 | * |
928 | * @dev_priv: Pointer to a device private structure. |
1275 | * @dev_priv: Pointer to a device private structure. |
929 | * @sw_context: The software context used for this command batch validation. |
1276 | * @sw_context: The software context used for this command batch validation. |
930 | * @ptr: Pointer to the user-space handle to be translated. |
1277 | * @ptr: Pointer to the user-space handle to be translated. |
931 | * @vmw_bo_p: Points to a location that, on successful return will carry |
1278 | * @vmw_bo_p: Points to a location that, on successful return will carry |
932 | * a reference-counted pointer to the DMA buffer identified by the |
1279 | * a reference-counted pointer to the DMA buffer identified by the |
933 | * user-space handle in @id. |
1280 | * user-space handle in @id. |
934 | * |
1281 | * |
935 | * This function saves information needed to translate a user-space buffer |
1282 | * This function saves information needed to translate a user-space buffer |
936 | * handle to a valid SVGAGuestPtr. The translation does not take place |
1283 | * handle to a valid SVGAGuestPtr. The translation does not take place |
937 | * immediately, but during a call to vmw_apply_relocations(). |
1284 | * immediately, but during a call to vmw_apply_relocations(). |
938 | * This function builds a relocation list and a list of buffers to validate. |
1285 | * This function builds a relocation list and a list of buffers to validate. |
939 | * The former needs to be freed using either vmw_apply_relocations() or |
1286 | * The former needs to be freed using either vmw_apply_relocations() or |
940 | * vmw_free_relocations(). The latter needs to be freed using |
1287 | * vmw_free_relocations(). The latter needs to be freed using |
941 | * vmw_clear_validations. |
1288 | * vmw_clear_validations. |
942 | */ |
1289 | */ |
943 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
1290 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
944 | struct vmw_sw_context *sw_context, |
1291 | struct vmw_sw_context *sw_context, |
945 | SVGAGuestPtr *ptr, |
1292 | SVGAGuestPtr *ptr, |
946 | struct vmw_dma_buffer **vmw_bo_p) |
1293 | struct vmw_dma_buffer **vmw_bo_p) |
947 | { |
1294 | { |
948 | struct vmw_dma_buffer *vmw_bo = NULL; |
1295 | struct vmw_dma_buffer *vmw_bo = NULL; |
949 | struct ttm_buffer_object *bo; |
- | |
950 | uint32_t handle = ptr->gmrId; |
1296 | uint32_t handle = ptr->gmrId; |
951 | struct vmw_relocation *reloc; |
1297 | struct vmw_relocation *reloc; |
952 | int ret; |
1298 | int ret; |
953 | 1299 | ||
- | 1300 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, |
|
954 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
1301 | NULL); |
955 | if (unlikely(ret != 0)) { |
1302 | if (unlikely(ret != 0)) { |
956 | DRM_ERROR("Could not find or use GMR region.\n"); |
1303 | DRM_ERROR("Could not find or use GMR region.\n"); |
- | 1304 | ret = -EINVAL; |
|
957 | return -EINVAL; |
1305 | goto out_no_reloc; |
958 | } |
- | |
959 | bo = &vmw_bo->base; |
1306 | } |
960 | 1307 | ||
961 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
1308 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
962 | DRM_ERROR("Max number relocations per submission" |
1309 | DRM_ERROR("Max number relocations per submission" |
963 | " exceeded\n"); |
1310 | " exceeded\n"); |
964 | ret = -EINVAL; |
1311 | ret = -EINVAL; |
965 | goto out_no_reloc; |
1312 | goto out_no_reloc; |
966 | } |
1313 | } |
967 | 1314 | ||
968 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
1315 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
969 | reloc->location = ptr; |
1316 | reloc->location = ptr; |
970 | 1317 | ||
971 | ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); |
1318 | ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index); |
972 | if (unlikely(ret != 0)) |
1319 | if (unlikely(ret != 0)) |
973 | goto out_no_reloc; |
1320 | goto out_no_reloc; |
974 | 1321 | ||
975 | *vmw_bo_p = vmw_bo; |
1322 | *vmw_bo_p = vmw_bo; |
976 | return 0; |
1323 | return 0; |
977 | 1324 | ||
978 | out_no_reloc: |
1325 | out_no_reloc: |
979 | vmw_dmabuf_unreference(&vmw_bo); |
1326 | vmw_dmabuf_unreference(&vmw_bo); |
980 | vmw_bo_p = NULL; |
1327 | *vmw_bo_p = NULL; |
981 | return ret; |
1328 | return ret; |
982 | } |
1329 | } |
- | 1330 | ||
- | 1331 | ||
- | 1332 | ||
- | 1333 | /** |
|
- | 1334 | * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command. |
|
- | 1335 | * |
|
- | 1336 | * @dev_priv: Pointer to a device private struct. |
|
- | 1337 | * @sw_context: The software context used for this command submission. |
|
- | 1338 | * @header: Pointer to the command header in the command stream. |
|
- | 1339 | * |
|
- | 1340 | * This function adds the new query into the query COTABLE |
|
- | 1341 | */ |
|
- | 1342 | static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, |
|
- | 1343 | struct vmw_sw_context *sw_context, |
|
- | 1344 | SVGA3dCmdHeader *header) |
|
- | 1345 | { |
|
- | 1346 | struct vmw_dx_define_query_cmd { |
|
- | 1347 | SVGA3dCmdHeader header; |
|
- | 1348 | SVGA3dCmdDXDefineQuery q; |
|
- | 1349 | } *cmd; |
|
- | 1350 | ||
- | 1351 | int ret; |
|
- | 1352 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
|
- | 1353 | struct vmw_resource *cotable_res; |
|
- | 1354 | ||
- | 1355 | ||
- | 1356 | if (ctx_node == NULL) { |
|
- | 1357 | DRM_ERROR("DX Context not set for query.\n"); |
|
- | 1358 | return -EINVAL; |
|
- | 1359 | } |
|
- | 1360 | ||
- | 1361 | cmd = container_of(header, struct vmw_dx_define_query_cmd, header); |
|
- | 1362 | ||
- | 1363 | if (cmd->q.type < SVGA3D_QUERYTYPE_MIN || |
|
- | 1364 | cmd->q.type >= SVGA3D_QUERYTYPE_MAX) |
|
- | 1365 | return -EINVAL; |
|
- | 1366 | ||
- | 1367 | cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY); |
|
- | 1368 | ret = vmw_cotable_notify(cotable_res, cmd->q.queryId); |
|
- | 1369 | vmw_resource_unreference(&cotable_res); |
|
- | 1370 | ||
- | 1371 | return ret; |
|
- | 1372 | } |
|
- | 1373 | ||
- | 1374 | ||
- | 1375 | ||
- | 1376 | /** |
|
- | 1377 | * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command. |
|
- | 1378 | * |
|
- | 1379 | * @dev_priv: Pointer to a device private struct. |
|
- | 1380 | * @sw_context: The software context used for this command submission. |
|
- | 1381 | * @header: Pointer to the command header in the command stream. |
|
- | 1382 | * |
|
- | 1383 | * The query bind operation will eventually associate the query ID |
|
- | 1384 | * with its backing MOB. In this function, we take the user mode |
|
- | 1385 | * MOB ID and use vmw_translate_mob_ptr() to translate it to its |
|
- | 1386 | * kernel mode equivalent. |
|
- | 1387 | */ |
|
- | 1388 | static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, |
|
- | 1389 | struct vmw_sw_context *sw_context, |
|
- | 1390 | SVGA3dCmdHeader *header) |
|
- | 1391 | { |
|
- | 1392 | struct vmw_dx_bind_query_cmd { |
|
- | 1393 | SVGA3dCmdHeader header; |
|
- | 1394 | SVGA3dCmdDXBindQuery q; |
|
- | 1395 | } *cmd; |
|
- | 1396 | ||
- | 1397 | struct vmw_dma_buffer *vmw_bo; |
|
- | 1398 | int ret; |
|
- | 1399 | ||
- | 1400 | ||
- | 1401 | cmd = container_of(header, struct vmw_dx_bind_query_cmd, header); |
|
- | 1402 | ||
- | 1403 | /* |
|
- | 1404 | * Look up the buffer pointed to by q.mobid, put it on the relocation |
|
- | 1405 | * list so its kernel mode MOB ID can be filled in later |
|
- | 1406 | */ |
|
- | 1407 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid, |
|
- | 1408 | &vmw_bo); |
|
- | 1409 | ||
- | 1410 | if (ret != 0) |
|
- | 1411 | return ret; |
|
- | 1412 | ||
- | 1413 | sw_context->dx_query_mob = vmw_bo; |
|
- | 1414 | sw_context->dx_query_ctx = sw_context->dx_ctx_node->res; |
|
- | 1415 | ||
- | 1416 | vmw_dmabuf_unreference(&vmw_bo); |
|
- | 1417 | ||
- | 1418 | return ret; |
|
- | 1419 | } |
|
- | 1420 | ||
- | 1421 | ||
983 | 1422 | ||
984 | /** |
1423 | /** |
985 | * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. |
1424 | * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. |
986 | * |
1425 | * |
987 | * @dev_priv: Pointer to a device private struct. |
1426 | * @dev_priv: Pointer to a device private struct. |
988 | * @sw_context: The software context used for this command submission. |
1427 | * @sw_context: The software context used for this command submission. |
989 | * @header: Pointer to the command header in the command stream. |
1428 | * @header: Pointer to the command header in the command stream. |
990 | */ |
1429 | */ |
991 | static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, |
1430 | static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, |
992 | struct vmw_sw_context *sw_context, |
1431 | struct vmw_sw_context *sw_context, |
993 | SVGA3dCmdHeader *header) |
1432 | SVGA3dCmdHeader *header) |
994 | { |
1433 | { |
995 | struct vmw_begin_gb_query_cmd { |
1434 | struct vmw_begin_gb_query_cmd { |
996 | SVGA3dCmdHeader header; |
1435 | SVGA3dCmdHeader header; |
997 | SVGA3dCmdBeginGBQuery q; |
1436 | SVGA3dCmdBeginGBQuery q; |
998 | } *cmd; |
1437 | } *cmd; |
999 | 1438 | ||
1000 | cmd = container_of(header, struct vmw_begin_gb_query_cmd, |
1439 | cmd = container_of(header, struct vmw_begin_gb_query_cmd, |
1001 | header); |
1440 | header); |
1002 | 1441 | ||
1003 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1442 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1004 | user_context_converter, &cmd->q.cid, |
1443 | user_context_converter, &cmd->q.cid, |
1005 | NULL); |
1444 | NULL); |
1006 | } |
1445 | } |
1007 | 1446 | ||
1008 | /** |
1447 | /** |
1009 | * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. |
1448 | * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. |
1010 | * |
1449 | * |
1011 | * @dev_priv: Pointer to a device private struct. |
1450 | * @dev_priv: Pointer to a device private struct. |
1012 | * @sw_context: The software context used for this command submission. |
1451 | * @sw_context: The software context used for this command submission. |
1013 | * @header: Pointer to the command header in the command stream. |
1452 | * @header: Pointer to the command header in the command stream. |
1014 | */ |
1453 | */ |
1015 | static int vmw_cmd_begin_query(struct vmw_private *dev_priv, |
1454 | static int vmw_cmd_begin_query(struct vmw_private *dev_priv, |
1016 | struct vmw_sw_context *sw_context, |
1455 | struct vmw_sw_context *sw_context, |
1017 | SVGA3dCmdHeader *header) |
1456 | SVGA3dCmdHeader *header) |
1018 | { |
1457 | { |
1019 | struct vmw_begin_query_cmd { |
1458 | struct vmw_begin_query_cmd { |
1020 | SVGA3dCmdHeader header; |
1459 | SVGA3dCmdHeader header; |
1021 | SVGA3dCmdBeginQuery q; |
1460 | SVGA3dCmdBeginQuery q; |
1022 | } *cmd; |
1461 | } *cmd; |
1023 | 1462 | ||
1024 | cmd = container_of(header, struct vmw_begin_query_cmd, |
1463 | cmd = container_of(header, struct vmw_begin_query_cmd, |
1025 | header); |
1464 | header); |
1026 | 1465 | ||
1027 | if (unlikely(dev_priv->has_mob)) { |
1466 | if (unlikely(dev_priv->has_mob)) { |
1028 | struct { |
1467 | struct { |
1029 | SVGA3dCmdHeader header; |
1468 | SVGA3dCmdHeader header; |
1030 | SVGA3dCmdBeginGBQuery q; |
1469 | SVGA3dCmdBeginGBQuery q; |
1031 | } gb_cmd; |
1470 | } gb_cmd; |
1032 | 1471 | ||
1033 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
1472 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
1034 | 1473 | ||
1035 | gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; |
1474 | gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; |
1036 | gb_cmd.header.size = cmd->header.size; |
1475 | gb_cmd.header.size = cmd->header.size; |
1037 | gb_cmd.q.cid = cmd->q.cid; |
1476 | gb_cmd.q.cid = cmd->q.cid; |
1038 | gb_cmd.q.type = cmd->q.type; |
1477 | gb_cmd.q.type = cmd->q.type; |
1039 | 1478 | ||
1040 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
1479 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
1041 | return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); |
1480 | return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); |
1042 | } |
1481 | } |
1043 | 1482 | ||
1044 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1483 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1045 | user_context_converter, &cmd->q.cid, |
1484 | user_context_converter, &cmd->q.cid, |
1046 | NULL); |
1485 | NULL); |
1047 | } |
1486 | } |
1048 | 1487 | ||
1049 | /** |
1488 | /** |
1050 | * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command. |
1489 | * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command. |
1051 | * |
1490 | * |
1052 | * @dev_priv: Pointer to a device private struct. |
1491 | * @dev_priv: Pointer to a device private struct. |
1053 | * @sw_context: The software context used for this command submission. |
1492 | * @sw_context: The software context used for this command submission. |
1054 | * @header: Pointer to the command header in the command stream. |
1493 | * @header: Pointer to the command header in the command stream. |
1055 | */ |
1494 | */ |
1056 | static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, |
1495 | static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, |
1057 | struct vmw_sw_context *sw_context, |
1496 | struct vmw_sw_context *sw_context, |
1058 | SVGA3dCmdHeader *header) |
1497 | SVGA3dCmdHeader *header) |
1059 | { |
1498 | { |
1060 | struct vmw_dma_buffer *vmw_bo; |
1499 | struct vmw_dma_buffer *vmw_bo; |
1061 | struct vmw_query_cmd { |
1500 | struct vmw_query_cmd { |
1062 | SVGA3dCmdHeader header; |
1501 | SVGA3dCmdHeader header; |
1063 | SVGA3dCmdEndGBQuery q; |
1502 | SVGA3dCmdEndGBQuery q; |
1064 | } *cmd; |
1503 | } *cmd; |
1065 | int ret; |
1504 | int ret; |
1066 | 1505 | ||
1067 | cmd = container_of(header, struct vmw_query_cmd, header); |
1506 | cmd = container_of(header, struct vmw_query_cmd, header); |
1068 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1507 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1069 | if (unlikely(ret != 0)) |
1508 | if (unlikely(ret != 0)) |
1070 | return ret; |
1509 | return ret; |
1071 | 1510 | ||
1072 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, |
1511 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, |
1073 | &cmd->q.mobid, |
1512 | &cmd->q.mobid, |
1074 | &vmw_bo); |
1513 | &vmw_bo); |
1075 | if (unlikely(ret != 0)) |
1514 | if (unlikely(ret != 0)) |
1076 | return ret; |
1515 | return ret; |
1077 | 1516 | ||
1078 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); |
1517 | ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); |
1079 | 1518 | ||
1080 | vmw_dmabuf_unreference(&vmw_bo); |
1519 | vmw_dmabuf_unreference(&vmw_bo); |
1081 | return ret; |
1520 | return ret; |
1082 | } |
1521 | } |
1083 | 1522 | ||
1084 | /** |
1523 | /** |
1085 | * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. |
1524 | * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. |
1086 | * |
1525 | * |
1087 | * @dev_priv: Pointer to a device private struct. |
1526 | * @dev_priv: Pointer to a device private struct. |
1088 | * @sw_context: The software context used for this command submission. |
1527 | * @sw_context: The software context used for this command submission. |
1089 | * @header: Pointer to the command header in the command stream. |
1528 | * @header: Pointer to the command header in the command stream. |
1090 | */ |
1529 | */ |
1091 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, |
1530 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, |
1092 | struct vmw_sw_context *sw_context, |
1531 | struct vmw_sw_context *sw_context, |
1093 | SVGA3dCmdHeader *header) |
1532 | SVGA3dCmdHeader *header) |
1094 | { |
1533 | { |
1095 | struct vmw_dma_buffer *vmw_bo; |
1534 | struct vmw_dma_buffer *vmw_bo; |
1096 | struct vmw_query_cmd { |
1535 | struct vmw_query_cmd { |
1097 | SVGA3dCmdHeader header; |
1536 | SVGA3dCmdHeader header; |
1098 | SVGA3dCmdEndQuery q; |
1537 | SVGA3dCmdEndQuery q; |
1099 | } *cmd; |
1538 | } *cmd; |
1100 | int ret; |
1539 | int ret; |
1101 | 1540 | ||
1102 | cmd = container_of(header, struct vmw_query_cmd, header); |
1541 | cmd = container_of(header, struct vmw_query_cmd, header); |
1103 | if (dev_priv->has_mob) { |
1542 | if (dev_priv->has_mob) { |
1104 | struct { |
1543 | struct { |
1105 | SVGA3dCmdHeader header; |
1544 | SVGA3dCmdHeader header; |
1106 | SVGA3dCmdEndGBQuery q; |
1545 | SVGA3dCmdEndGBQuery q; |
1107 | } gb_cmd; |
1546 | } gb_cmd; |
1108 | 1547 | ||
1109 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
1548 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
1110 | 1549 | ||
1111 | gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; |
1550 | gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; |
1112 | gb_cmd.header.size = cmd->header.size; |
1551 | gb_cmd.header.size = cmd->header.size; |
1113 | gb_cmd.q.cid = cmd->q.cid; |
1552 | gb_cmd.q.cid = cmd->q.cid; |
1114 | gb_cmd.q.type = cmd->q.type; |
1553 | gb_cmd.q.type = cmd->q.type; |
1115 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; |
1554 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; |
1116 | gb_cmd.q.offset = cmd->q.guestResult.offset; |
1555 | gb_cmd.q.offset = cmd->q.guestResult.offset; |
1117 | 1556 | ||
1118 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
1557 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
1119 | return vmw_cmd_end_gb_query(dev_priv, sw_context, header); |
1558 | return vmw_cmd_end_gb_query(dev_priv, sw_context, header); |
1120 | } |
1559 | } |
1121 | 1560 | ||
1122 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1561 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1123 | if (unlikely(ret != 0)) |
1562 | if (unlikely(ret != 0)) |
1124 | return ret; |
1563 | return ret; |
1125 | 1564 | ||
1126 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1565 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1127 | &cmd->q.guestResult, |
1566 | &cmd->q.guestResult, |
1128 | &vmw_bo); |
1567 | &vmw_bo); |
1129 | if (unlikely(ret != 0)) |
1568 | if (unlikely(ret != 0)) |
1130 | return ret; |
1569 | return ret; |
1131 | 1570 | ||
1132 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); |
1571 | ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); |
1133 | 1572 | ||
1134 | vmw_dmabuf_unreference(&vmw_bo); |
1573 | vmw_dmabuf_unreference(&vmw_bo); |
1135 | return ret; |
1574 | return ret; |
1136 | } |
1575 | } |
1137 | 1576 | ||
1138 | /** |
1577 | /** |
1139 | * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command. |
1578 | * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command. |
1140 | * |
1579 | * |
1141 | * @dev_priv: Pointer to a device private struct. |
1580 | * @dev_priv: Pointer to a device private struct. |
1142 | * @sw_context: The software context used for this command submission. |
1581 | * @sw_context: The software context used for this command submission. |
1143 | * @header: Pointer to the command header in the command stream. |
1582 | * @header: Pointer to the command header in the command stream. |
1144 | */ |
1583 | */ |
1145 | static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, |
1584 | static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, |
1146 | struct vmw_sw_context *sw_context, |
1585 | struct vmw_sw_context *sw_context, |
1147 | SVGA3dCmdHeader *header) |
1586 | SVGA3dCmdHeader *header) |
1148 | { |
1587 | { |
1149 | struct vmw_dma_buffer *vmw_bo; |
1588 | struct vmw_dma_buffer *vmw_bo; |
1150 | struct vmw_query_cmd { |
1589 | struct vmw_query_cmd { |
1151 | SVGA3dCmdHeader header; |
1590 | SVGA3dCmdHeader header; |
1152 | SVGA3dCmdWaitForGBQuery q; |
1591 | SVGA3dCmdWaitForGBQuery q; |
1153 | } *cmd; |
1592 | } *cmd; |
1154 | int ret; |
1593 | int ret; |
1155 | 1594 | ||
1156 | cmd = container_of(header, struct vmw_query_cmd, header); |
1595 | cmd = container_of(header, struct vmw_query_cmd, header); |
1157 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1596 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1158 | if (unlikely(ret != 0)) |
1597 | if (unlikely(ret != 0)) |
1159 | return ret; |
1598 | return ret; |
1160 | 1599 | ||
1161 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, |
1600 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, |
1162 | &cmd->q.mobid, |
1601 | &cmd->q.mobid, |
1163 | &vmw_bo); |
1602 | &vmw_bo); |
1164 | if (unlikely(ret != 0)) |
1603 | if (unlikely(ret != 0)) |
1165 | return ret; |
1604 | return ret; |
1166 | 1605 | ||
1167 | vmw_dmabuf_unreference(&vmw_bo); |
1606 | vmw_dmabuf_unreference(&vmw_bo); |
1168 | return 0; |
1607 | return 0; |
1169 | } |
1608 | } |
1170 | 1609 | ||
1171 | /** |
1610 | /** |
1172 | * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. |
1611 | * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. |
1173 | * |
1612 | * |
1174 | * @dev_priv: Pointer to a device private struct. |
1613 | * @dev_priv: Pointer to a device private struct. |
1175 | * @sw_context: The software context used for this command submission. |
1614 | * @sw_context: The software context used for this command submission. |
1176 | * @header: Pointer to the command header in the command stream. |
1615 | * @header: Pointer to the command header in the command stream. |
1177 | */ |
1616 | */ |
1178 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, |
1617 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, |
1179 | struct vmw_sw_context *sw_context, |
1618 | struct vmw_sw_context *sw_context, |
1180 | SVGA3dCmdHeader *header) |
1619 | SVGA3dCmdHeader *header) |
1181 | { |
1620 | { |
1182 | struct vmw_dma_buffer *vmw_bo; |
1621 | struct vmw_dma_buffer *vmw_bo; |
1183 | struct vmw_query_cmd { |
1622 | struct vmw_query_cmd { |
1184 | SVGA3dCmdHeader header; |
1623 | SVGA3dCmdHeader header; |
1185 | SVGA3dCmdWaitForQuery q; |
1624 | SVGA3dCmdWaitForQuery q; |
1186 | } *cmd; |
1625 | } *cmd; |
1187 | int ret; |
1626 | int ret; |
1188 | 1627 | ||
1189 | cmd = container_of(header, struct vmw_query_cmd, header); |
1628 | cmd = container_of(header, struct vmw_query_cmd, header); |
1190 | if (dev_priv->has_mob) { |
1629 | if (dev_priv->has_mob) { |
1191 | struct { |
1630 | struct { |
1192 | SVGA3dCmdHeader header; |
1631 | SVGA3dCmdHeader header; |
1193 | SVGA3dCmdWaitForGBQuery q; |
1632 | SVGA3dCmdWaitForGBQuery q; |
1194 | } gb_cmd; |
1633 | } gb_cmd; |
1195 | 1634 | ||
1196 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
1635 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
1197 | 1636 | ||
1198 | gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; |
1637 | gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; |
1199 | gb_cmd.header.size = cmd->header.size; |
1638 | gb_cmd.header.size = cmd->header.size; |
1200 | gb_cmd.q.cid = cmd->q.cid; |
1639 | gb_cmd.q.cid = cmd->q.cid; |
1201 | gb_cmd.q.type = cmd->q.type; |
1640 | gb_cmd.q.type = cmd->q.type; |
1202 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; |
1641 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; |
1203 | gb_cmd.q.offset = cmd->q.guestResult.offset; |
1642 | gb_cmd.q.offset = cmd->q.guestResult.offset; |
1204 | 1643 | ||
1205 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
1644 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
1206 | return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); |
1645 | return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); |
1207 | } |
1646 | } |
1208 | 1647 | ||
1209 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1648 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1210 | if (unlikely(ret != 0)) |
1649 | if (unlikely(ret != 0)) |
1211 | return ret; |
1650 | return ret; |
1212 | 1651 | ||
1213 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1652 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1214 | &cmd->q.guestResult, |
1653 | &cmd->q.guestResult, |
1215 | &vmw_bo); |
1654 | &vmw_bo); |
1216 | if (unlikely(ret != 0)) |
1655 | if (unlikely(ret != 0)) |
1217 | return ret; |
1656 | return ret; |
1218 | 1657 | ||
1219 | vmw_dmabuf_unreference(&vmw_bo); |
1658 | vmw_dmabuf_unreference(&vmw_bo); |
1220 | return 0; |
1659 | return 0; |
1221 | } |
1660 | } |
1222 | 1661 | ||
1223 | static int vmw_cmd_dma(struct vmw_private *dev_priv, |
1662 | static int vmw_cmd_dma(struct vmw_private *dev_priv, |
1224 | struct vmw_sw_context *sw_context, |
1663 | struct vmw_sw_context *sw_context, |
1225 | SVGA3dCmdHeader *header) |
1664 | SVGA3dCmdHeader *header) |
1226 | { |
1665 | { |
1227 | struct vmw_dma_buffer *vmw_bo = NULL; |
1666 | struct vmw_dma_buffer *vmw_bo = NULL; |
1228 | struct vmw_surface *srf = NULL; |
1667 | struct vmw_surface *srf = NULL; |
1229 | struct vmw_dma_cmd { |
1668 | struct vmw_dma_cmd { |
1230 | SVGA3dCmdHeader header; |
1669 | SVGA3dCmdHeader header; |
1231 | SVGA3dCmdSurfaceDMA dma; |
1670 | SVGA3dCmdSurfaceDMA dma; |
1232 | } *cmd; |
1671 | } *cmd; |
1233 | int ret; |
1672 | int ret; |
1234 | SVGA3dCmdSurfaceDMASuffix *suffix; |
1673 | SVGA3dCmdSurfaceDMASuffix *suffix; |
1235 | uint32_t bo_size; |
1674 | uint32_t bo_size; |
1236 | 1675 | ||
1237 | cmd = container_of(header, struct vmw_dma_cmd, header); |
1676 | cmd = container_of(header, struct vmw_dma_cmd, header); |
1238 | suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma + |
1677 | suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma + |
1239 | header->size - sizeof(*suffix)); |
1678 | header->size - sizeof(*suffix)); |
1240 | 1679 | ||
1241 | /* Make sure device and verifier stays in sync. */ |
1680 | /* Make sure device and verifier stays in sync. */ |
1242 | if (unlikely(suffix->suffixSize != sizeof(*suffix))) { |
1681 | if (unlikely(suffix->suffixSize != sizeof(*suffix))) { |
1243 | DRM_ERROR("Invalid DMA suffix size.\n"); |
1682 | DRM_ERROR("Invalid DMA suffix size.\n"); |
1244 | return -EINVAL; |
1683 | return -EINVAL; |
1245 | } |
1684 | } |
1246 | 1685 | ||
1247 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1686 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1248 | &cmd->dma.guest.ptr, |
1687 | &cmd->dma.guest.ptr, |
1249 | &vmw_bo); |
1688 | &vmw_bo); |
1250 | if (unlikely(ret != 0)) |
1689 | if (unlikely(ret != 0)) |
1251 | return ret; |
1690 | return ret; |
1252 | 1691 | ||
1253 | /* Make sure DMA doesn't cross BO boundaries. */ |
1692 | /* Make sure DMA doesn't cross BO boundaries. */ |
1254 | bo_size = vmw_bo->base.num_pages * PAGE_SIZE; |
1693 | bo_size = vmw_bo->base.num_pages * PAGE_SIZE; |
1255 | if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) { |
1694 | if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) { |
1256 | DRM_ERROR("Invalid DMA offset.\n"); |
1695 | DRM_ERROR("Invalid DMA offset.\n"); |
1257 | return -EINVAL; |
1696 | return -EINVAL; |
1258 | } |
1697 | } |
1259 | 1698 | ||
1260 | bo_size -= cmd->dma.guest.ptr.offset; |
1699 | bo_size -= cmd->dma.guest.ptr.offset; |
1261 | if (unlikely(suffix->maximumOffset > bo_size)) |
1700 | if (unlikely(suffix->maximumOffset > bo_size)) |
1262 | suffix->maximumOffset = bo_size; |
1701 | suffix->maximumOffset = bo_size; |
1263 | 1702 | ||
1264 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1703 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1265 | user_surface_converter, &cmd->dma.host.sid, |
1704 | user_surface_converter, &cmd->dma.host.sid, |
1266 | NULL); |
1705 | NULL); |
1267 | if (unlikely(ret != 0)) { |
1706 | if (unlikely(ret != 0)) { |
1268 | if (unlikely(ret != -ERESTARTSYS)) |
1707 | if (unlikely(ret != -ERESTARTSYS)) |
1269 | DRM_ERROR("could not find surface for DMA.\n"); |
1708 | DRM_ERROR("could not find surface for DMA.\n"); |
1270 | goto out_no_surface; |
1709 | goto out_no_surface; |
1271 | } |
1710 | } |
1272 | 1711 | ||
1273 | srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); |
1712 | srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); |
1274 | 1713 | ||
1275 | // vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); |
1714 | // vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); |
1276 | 1715 | ||
1277 | out_no_surface: |
1716 | out_no_surface: |
1278 | vmw_dmabuf_unreference(&vmw_bo); |
1717 | vmw_dmabuf_unreference(&vmw_bo); |
1279 | return ret; |
1718 | return ret; |
1280 | } |
1719 | } |
1281 | 1720 | ||
1282 | static int vmw_cmd_draw(struct vmw_private *dev_priv, |
1721 | static int vmw_cmd_draw(struct vmw_private *dev_priv, |
1283 | struct vmw_sw_context *sw_context, |
1722 | struct vmw_sw_context *sw_context, |
1284 | SVGA3dCmdHeader *header) |
1723 | SVGA3dCmdHeader *header) |
1285 | { |
1724 | { |
1286 | struct vmw_draw_cmd { |
1725 | struct vmw_draw_cmd { |
1287 | SVGA3dCmdHeader header; |
1726 | SVGA3dCmdHeader header; |
1288 | SVGA3dCmdDrawPrimitives body; |
1727 | SVGA3dCmdDrawPrimitives body; |
1289 | } *cmd; |
1728 | } *cmd; |
1290 | SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( |
1729 | SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( |
1291 | (unsigned long)header + sizeof(*cmd)); |
1730 | (unsigned long)header + sizeof(*cmd)); |
1292 | SVGA3dPrimitiveRange *range; |
1731 | SVGA3dPrimitiveRange *range; |
1293 | uint32_t i; |
1732 | uint32_t i; |
1294 | uint32_t maxnum; |
1733 | uint32_t maxnum; |
1295 | int ret; |
1734 | int ret; |
1296 | 1735 | ||
1297 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1736 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1298 | if (unlikely(ret != 0)) |
1737 | if (unlikely(ret != 0)) |
1299 | return ret; |
1738 | return ret; |
1300 | 1739 | ||
1301 | cmd = container_of(header, struct vmw_draw_cmd, header); |
1740 | cmd = container_of(header, struct vmw_draw_cmd, header); |
1302 | maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); |
1741 | maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); |
1303 | 1742 | ||
1304 | if (unlikely(cmd->body.numVertexDecls > maxnum)) { |
1743 | if (unlikely(cmd->body.numVertexDecls > maxnum)) { |
1305 | DRM_ERROR("Illegal number of vertex declarations.\n"); |
1744 | DRM_ERROR("Illegal number of vertex declarations.\n"); |
1306 | return -EINVAL; |
1745 | return -EINVAL; |
1307 | } |
1746 | } |
1308 | 1747 | ||
1309 | for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { |
1748 | for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { |
1310 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1749 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1311 | user_surface_converter, |
1750 | user_surface_converter, |
1312 | &decl->array.surfaceId, NULL); |
1751 | &decl->array.surfaceId, NULL); |
1313 | if (unlikely(ret != 0)) |
1752 | if (unlikely(ret != 0)) |
1314 | return ret; |
1753 | return ret; |
1315 | } |
1754 | } |
1316 | 1755 | ||
1317 | maxnum = (header->size - sizeof(cmd->body) - |
1756 | maxnum = (header->size - sizeof(cmd->body) - |
1318 | cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); |
1757 | cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); |
1319 | if (unlikely(cmd->body.numRanges > maxnum)) { |
1758 | if (unlikely(cmd->body.numRanges > maxnum)) { |
1320 | DRM_ERROR("Illegal number of index ranges.\n"); |
1759 | DRM_ERROR("Illegal number of index ranges.\n"); |
1321 | return -EINVAL; |
1760 | return -EINVAL; |
1322 | } |
1761 | } |
1323 | 1762 | ||
1324 | range = (SVGA3dPrimitiveRange *) decl; |
1763 | range = (SVGA3dPrimitiveRange *) decl; |
1325 | for (i = 0; i < cmd->body.numRanges; ++i, ++range) { |
1764 | for (i = 0; i < cmd->body.numRanges; ++i, ++range) { |
1326 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1765 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1327 | user_surface_converter, |
1766 | user_surface_converter, |
1328 | &range->indexArray.surfaceId, NULL); |
1767 | &range->indexArray.surfaceId, NULL); |
1329 | if (unlikely(ret != 0)) |
1768 | if (unlikely(ret != 0)) |
1330 | return ret; |
1769 | return ret; |
1331 | } |
1770 | } |
1332 | return 0; |
1771 | return 0; |
1333 | } |
1772 | } |
1334 | 1773 | ||
1335 | 1774 | ||
1336 | static int vmw_cmd_tex_state(struct vmw_private *dev_priv, |
1775 | static int vmw_cmd_tex_state(struct vmw_private *dev_priv, |
1337 | struct vmw_sw_context *sw_context, |
1776 | struct vmw_sw_context *sw_context, |
1338 | SVGA3dCmdHeader *header) |
1777 | SVGA3dCmdHeader *header) |
1339 | { |
1778 | { |
1340 | struct vmw_tex_state_cmd { |
1779 | struct vmw_tex_state_cmd { |
1341 | SVGA3dCmdHeader header; |
1780 | SVGA3dCmdHeader header; |
1342 | SVGA3dCmdSetTextureState state; |
1781 | SVGA3dCmdSetTextureState state; |
1343 | } *cmd; |
1782 | } *cmd; |
1344 | 1783 | ||
1345 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) |
1784 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) |
1346 | ((unsigned long) header + header->size + sizeof(header)); |
1785 | ((unsigned long) header + header->size + sizeof(header)); |
1347 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) |
1786 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) |
1348 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); |
1787 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); |
1349 | struct vmw_resource_val_node *ctx_node; |
1788 | struct vmw_resource_val_node *ctx_node; |
1350 | struct vmw_resource_val_node *res_node; |
1789 | struct vmw_resource_val_node *res_node; |
1351 | int ret; |
1790 | int ret; |
1352 | 1791 | ||
1353 | cmd = container_of(header, struct vmw_tex_state_cmd, |
1792 | cmd = container_of(header, struct vmw_tex_state_cmd, |
1354 | header); |
1793 | header); |
1355 | 1794 | ||
1356 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1795 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1357 | user_context_converter, &cmd->state.cid, |
1796 | user_context_converter, &cmd->state.cid, |
1358 | &ctx_node); |
1797 | &ctx_node); |
1359 | if (unlikely(ret != 0)) |
1798 | if (unlikely(ret != 0)) |
1360 | return ret; |
1799 | return ret; |
1361 | 1800 | ||
1362 | for (; cur_state < last_state; ++cur_state) { |
1801 | for (; cur_state < last_state; ++cur_state) { |
1363 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) |
1802 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) |
1364 | continue; |
1803 | continue; |
- | 1804 | ||
- | 1805 | if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) { |
|
- | 1806 | DRM_ERROR("Illegal texture/sampler unit %u.\n", |
|
- | 1807 | (unsigned) cur_state->stage); |
|
- | 1808 | return -EINVAL; |
|
- | 1809 | } |
|
1365 | 1810 | ||
1366 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1811 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1367 | user_surface_converter, |
1812 | user_surface_converter, |
1368 | &cur_state->value, &res_node); |
1813 | &cur_state->value, &res_node); |
1369 | if (unlikely(ret != 0)) |
1814 | if (unlikely(ret != 0)) |
1370 | return ret; |
1815 | return ret; |
1371 | 1816 | ||
1372 | if (dev_priv->has_mob) { |
1817 | if (dev_priv->has_mob) { |
1373 | struct vmw_ctx_bindinfo bi; |
1818 | struct vmw_ctx_bindinfo_tex binding; |
1374 | 1819 | ||
1375 | bi.ctx = ctx_node->res; |
1820 | binding.bi.ctx = ctx_node->res; |
1376 | bi.res = res_node ? res_node->res : NULL; |
1821 | binding.bi.res = res_node ? res_node->res : NULL; |
1377 | bi.bt = vmw_ctx_binding_tex; |
1822 | binding.bi.bt = vmw_ctx_binding_tex; |
1378 | bi.i1.texture_stage = cur_state->stage; |
1823 | binding.texture_stage = cur_state->stage; |
1379 | vmw_context_binding_add(ctx_node->staged_bindings, |
1824 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, |
1380 | &bi); |
1825 | 0, binding.texture_stage); |
1381 | } |
1826 | } |
1382 | } |
1827 | } |
1383 | 1828 | ||
1384 | return 0; |
1829 | return 0; |
1385 | } |
1830 | } |
1386 | 1831 | ||
1387 | static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, |
1832 | static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, |
1388 | struct vmw_sw_context *sw_context, |
1833 | struct vmw_sw_context *sw_context, |
1389 | void *buf) |
1834 | void *buf) |
1390 | { |
1835 | { |
1391 | struct vmw_dma_buffer *vmw_bo; |
1836 | struct vmw_dma_buffer *vmw_bo; |
1392 | int ret; |
1837 | int ret; |
1393 | 1838 | ||
1394 | struct { |
1839 | struct { |
1395 | uint32_t header; |
1840 | uint32_t header; |
1396 | SVGAFifoCmdDefineGMRFB body; |
1841 | SVGAFifoCmdDefineGMRFB body; |
1397 | } *cmd = buf; |
1842 | } *cmd = buf; |
1398 | 1843 | ||
1399 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1844 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1400 | &cmd->body.ptr, |
1845 | &cmd->body.ptr, |
1401 | &vmw_bo); |
1846 | &vmw_bo); |
1402 | if (unlikely(ret != 0)) |
1847 | if (unlikely(ret != 0)) |
1403 | return ret; |
1848 | return ret; |
1404 | 1849 | ||
1405 | vmw_dmabuf_unreference(&vmw_bo); |
1850 | vmw_dmabuf_unreference(&vmw_bo); |
1406 | 1851 | ||
1407 | return ret; |
1852 | return ret; |
1408 | } |
1853 | } |
- | 1854 | ||
- | 1855 | ||
- | 1856 | /** |
|
- | 1857 | * vmw_cmd_res_switch_backup - Utility function to handle backup buffer |
|
- | 1858 | * switching |
|
- | 1859 | * |
|
- | 1860 | * @dev_priv: Pointer to a device private struct. |
|
- | 1861 | * @sw_context: The software context being used for this batch. |
|
- | 1862 | * @val_node: The validation node representing the resource. |
|
- | 1863 | * @buf_id: Pointer to the user-space backup buffer handle in the command |
|
- | 1864 | * stream. |
|
- | 1865 | * @backup_offset: Offset of backup into MOB. |
|
- | 1866 | * |
|
- | 1867 | * This function prepares for registering a switch of backup buffers |
|
- | 1868 | * in the resource metadata just prior to unreserving. It's basically a wrapper |
|
- | 1869 | * around vmw_cmd_res_switch_backup with a different interface. |
|
- | 1870 | */ |
|
- | 1871 | static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, |
|
- | 1872 | struct vmw_sw_context *sw_context, |
|
- | 1873 | struct vmw_resource_val_node *val_node, |
|
- | 1874 | uint32_t *buf_id, |
|
- | 1875 | unsigned long backup_offset) |
|
- | 1876 | { |
|
- | 1877 | struct vmw_dma_buffer *dma_buf; |
|
- | 1878 | int ret; |
|
- | 1879 | ||
- | 1880 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); |
|
- | 1881 | if (ret) |
|
- | 1882 | return ret; |
|
- | 1883 | ||
- | 1884 | val_node->switching_backup = true; |
|
- | 1885 | if (val_node->first_usage) |
|
- | 1886 | val_node->no_buffer_needed = true; |
|
- | 1887 | ||
- | 1888 | vmw_dmabuf_unreference(&val_node->new_backup); |
|
- | 1889 | val_node->new_backup = dma_buf; |
|
- | 1890 | val_node->new_backup_offset = backup_offset; |
|
- | 1891 | ||
- | 1892 | return 0; |
|
- | 1893 | } |
|
- | 1894 | ||
1409 | 1895 | ||
1410 | /** |
1896 | /** |
1411 | * vmw_cmd_switch_backup - Utility function to handle backup buffer switching |
1897 | * vmw_cmd_switch_backup - Utility function to handle backup buffer switching |
1412 | * |
1898 | * |
1413 | * @dev_priv: Pointer to a device private struct. |
1899 | * @dev_priv: Pointer to a device private struct. |
1414 | * @sw_context: The software context being used for this batch. |
1900 | * @sw_context: The software context being used for this batch. |
1415 | * @res_type: The resource type. |
1901 | * @res_type: The resource type. |
1416 | * @converter: Information about user-space binding for this resource type. |
1902 | * @converter: Information about user-space binding for this resource type. |
1417 | * @res_id: Pointer to the user-space resource handle in the command stream. |
1903 | * @res_id: Pointer to the user-space resource handle in the command stream. |
1418 | * @buf_id: Pointer to the user-space backup buffer handle in the command |
1904 | * @buf_id: Pointer to the user-space backup buffer handle in the command |
1419 | * stream. |
1905 | * stream. |
1420 | * @backup_offset: Offset of backup into MOB. |
1906 | * @backup_offset: Offset of backup into MOB. |
1421 | * |
1907 | * |
1422 | * This function prepares for registering a switch of backup buffers |
1908 | * This function prepares for registering a switch of backup buffers |
1423 | * in the resource metadata just prior to unreserving. |
1909 | * in the resource metadata just prior to unreserving. It's basically a wrapper |
- | 1910 | * around vmw_cmd_res_switch_backup with a different interface. |
|
1424 | */ |
1911 | */ |
1425 | static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, |
1912 | static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, |
1426 | struct vmw_sw_context *sw_context, |
1913 | struct vmw_sw_context *sw_context, |
1427 | enum vmw_res_type res_type, |
1914 | enum vmw_res_type res_type, |
1428 | const struct vmw_user_resource_conv |
1915 | const struct vmw_user_resource_conv |
1429 | *converter, |
1916 | *converter, |
1430 | uint32_t *res_id, |
1917 | uint32_t *res_id, |
1431 | uint32_t *buf_id, |
1918 | uint32_t *buf_id, |
1432 | unsigned long backup_offset) |
1919 | unsigned long backup_offset) |
1433 | { |
1920 | { |
1434 | int ret; |
- | |
1435 | struct vmw_dma_buffer *dma_buf; |
- | |
1436 | struct vmw_resource_val_node *val_node; |
1921 | struct vmw_resource_val_node *val_node; |
- | 1922 | int ret; |
|
1437 | 1923 | ||
1438 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, |
1924 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, |
1439 | converter, res_id, &val_node); |
1925 | converter, res_id, &val_node); |
1440 | if (unlikely(ret != 0)) |
- | |
1441 | return ret; |
1926 | if (ret) |
1442 | - | ||
1443 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); |
- | |
1444 | if (unlikely(ret != 0)) |
- | |
1445 | return ret; |
1927 | return ret; |
1446 | - | ||
1447 | if (val_node->first_usage) |
- | |
1448 | val_node->no_buffer_needed = true; |
- | |
1449 | 1928 | ||
1450 | vmw_dmabuf_unreference(&val_node->new_backup); |
- | |
1451 | val_node->new_backup = dma_buf; |
1929 | return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node, |
1452 | val_node->new_backup_offset = backup_offset; |
- | |
1453 | - | ||
1454 | return 0; |
1930 | buf_id, backup_offset); |
1455 | } |
1931 | } |
1456 | 1932 | ||
1457 | /** |
1933 | /** |
1458 | * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE |
1934 | * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE |
1459 | * command |
1935 | * command |
1460 | * |
1936 | * |
1461 | * @dev_priv: Pointer to a device private struct. |
1937 | * @dev_priv: Pointer to a device private struct. |
1462 | * @sw_context: The software context being used for this batch. |
1938 | * @sw_context: The software context being used for this batch. |
1463 | * @header: Pointer to the command header in the command stream. |
1939 | * @header: Pointer to the command header in the command stream. |
1464 | */ |
1940 | */ |
1465 | static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, |
1941 | static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, |
1466 | struct vmw_sw_context *sw_context, |
1942 | struct vmw_sw_context *sw_context, |
1467 | SVGA3dCmdHeader *header) |
1943 | SVGA3dCmdHeader *header) |
1468 | { |
1944 | { |
1469 | struct vmw_bind_gb_surface_cmd { |
1945 | struct vmw_bind_gb_surface_cmd { |
1470 | SVGA3dCmdHeader header; |
1946 | SVGA3dCmdHeader header; |
1471 | SVGA3dCmdBindGBSurface body; |
1947 | SVGA3dCmdBindGBSurface body; |
1472 | } *cmd; |
1948 | } *cmd; |
1473 | 1949 | ||
1474 | cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header); |
1950 | cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header); |
1475 | 1951 | ||
1476 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, |
1952 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, |
1477 | user_surface_converter, |
1953 | user_surface_converter, |
1478 | &cmd->body.sid, &cmd->body.mobid, |
1954 | &cmd->body.sid, &cmd->body.mobid, |
1479 | 0); |
1955 | 0); |
1480 | } |
1956 | } |
1481 | 1957 | ||
1482 | /** |
1958 | /** |
1483 | * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE |
1959 | * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE |
1484 | * command |
1960 | * command |
1485 | * |
1961 | * |
1486 | * @dev_priv: Pointer to a device private struct. |
1962 | * @dev_priv: Pointer to a device private struct. |
1487 | * @sw_context: The software context being used for this batch. |
1963 | * @sw_context: The software context being used for this batch. |
1488 | * @header: Pointer to the command header in the command stream. |
1964 | * @header: Pointer to the command header in the command stream. |
1489 | */ |
1965 | */ |
1490 | static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, |
1966 | static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, |
1491 | struct vmw_sw_context *sw_context, |
1967 | struct vmw_sw_context *sw_context, |
1492 | SVGA3dCmdHeader *header) |
1968 | SVGA3dCmdHeader *header) |
1493 | { |
1969 | { |
1494 | struct vmw_gb_surface_cmd { |
1970 | struct vmw_gb_surface_cmd { |
1495 | SVGA3dCmdHeader header; |
1971 | SVGA3dCmdHeader header; |
1496 | SVGA3dCmdUpdateGBImage body; |
1972 | SVGA3dCmdUpdateGBImage body; |
1497 | } *cmd; |
1973 | } *cmd; |
1498 | 1974 | ||
1499 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1975 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1500 | 1976 | ||
1501 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1977 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1502 | user_surface_converter, |
1978 | user_surface_converter, |
1503 | &cmd->body.image.sid, NULL); |
1979 | &cmd->body.image.sid, NULL); |
1504 | } |
1980 | } |
1505 | 1981 | ||
1506 | /** |
1982 | /** |
1507 | * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE |
1983 | * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE |
1508 | * command |
1984 | * command |
1509 | * |
1985 | * |
1510 | * @dev_priv: Pointer to a device private struct. |
1986 | * @dev_priv: Pointer to a device private struct. |
1511 | * @sw_context: The software context being used for this batch. |
1987 | * @sw_context: The software context being used for this batch. |
1512 | * @header: Pointer to the command header in the command stream. |
1988 | * @header: Pointer to the command header in the command stream. |
1513 | */ |
1989 | */ |
1514 | static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, |
1990 | static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, |
1515 | struct vmw_sw_context *sw_context, |
1991 | struct vmw_sw_context *sw_context, |
1516 | SVGA3dCmdHeader *header) |
1992 | SVGA3dCmdHeader *header) |
1517 | { |
1993 | { |
1518 | struct vmw_gb_surface_cmd { |
1994 | struct vmw_gb_surface_cmd { |
1519 | SVGA3dCmdHeader header; |
1995 | SVGA3dCmdHeader header; |
1520 | SVGA3dCmdUpdateGBSurface body; |
1996 | SVGA3dCmdUpdateGBSurface body; |
1521 | } *cmd; |
1997 | } *cmd; |
1522 | 1998 | ||
1523 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1999 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1524 | 2000 | ||
1525 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
2001 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1526 | user_surface_converter, |
2002 | user_surface_converter, |
1527 | &cmd->body.sid, NULL); |
2003 | &cmd->body.sid, NULL); |
1528 | } |
2004 | } |
1529 | 2005 | ||
1530 | /** |
2006 | /** |
1531 | * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE |
2007 | * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE |
1532 | * command |
2008 | * command |
1533 | * |
2009 | * |
1534 | * @dev_priv: Pointer to a device private struct. |
2010 | * @dev_priv: Pointer to a device private struct. |
1535 | * @sw_context: The software context being used for this batch. |
2011 | * @sw_context: The software context being used for this batch. |
1536 | * @header: Pointer to the command header in the command stream. |
2012 | * @header: Pointer to the command header in the command stream. |
1537 | */ |
2013 | */ |
1538 | static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, |
2014 | static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, |
1539 | struct vmw_sw_context *sw_context, |
2015 | struct vmw_sw_context *sw_context, |
1540 | SVGA3dCmdHeader *header) |
2016 | SVGA3dCmdHeader *header) |
1541 | { |
2017 | { |
1542 | struct vmw_gb_surface_cmd { |
2018 | struct vmw_gb_surface_cmd { |
1543 | SVGA3dCmdHeader header; |
2019 | SVGA3dCmdHeader header; |
1544 | SVGA3dCmdReadbackGBImage body; |
2020 | SVGA3dCmdReadbackGBImage body; |
1545 | } *cmd; |
2021 | } *cmd; |
1546 | 2022 | ||
1547 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
2023 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1548 | 2024 | ||
1549 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
2025 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1550 | user_surface_converter, |
2026 | user_surface_converter, |
1551 | &cmd->body.image.sid, NULL); |
2027 | &cmd->body.image.sid, NULL); |
1552 | } |
2028 | } |
1553 | 2029 | ||
1554 | /** |
2030 | /** |
1555 | * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE |
2031 | * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE |
1556 | * command |
2032 | * command |
1557 | * |
2033 | * |
1558 | * @dev_priv: Pointer to a device private struct. |
2034 | * @dev_priv: Pointer to a device private struct. |
1559 | * @sw_context: The software context being used for this batch. |
2035 | * @sw_context: The software context being used for this batch. |
1560 | * @header: Pointer to the command header in the command stream. |
2036 | * @header: Pointer to the command header in the command stream. |
1561 | */ |
2037 | */ |
1562 | static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, |
2038 | static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, |
1563 | struct vmw_sw_context *sw_context, |
2039 | struct vmw_sw_context *sw_context, |
1564 | SVGA3dCmdHeader *header) |
2040 | SVGA3dCmdHeader *header) |
1565 | { |
2041 | { |
1566 | struct vmw_gb_surface_cmd { |
2042 | struct vmw_gb_surface_cmd { |
1567 | SVGA3dCmdHeader header; |
2043 | SVGA3dCmdHeader header; |
1568 | SVGA3dCmdReadbackGBSurface body; |
2044 | SVGA3dCmdReadbackGBSurface body; |
1569 | } *cmd; |
2045 | } *cmd; |
1570 | 2046 | ||
1571 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
2047 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1572 | 2048 | ||
1573 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
2049 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1574 | user_surface_converter, |
2050 | user_surface_converter, |
1575 | &cmd->body.sid, NULL); |
2051 | &cmd->body.sid, NULL); |
1576 | } |
2052 | } |
1577 | 2053 | ||
1578 | /** |
2054 | /** |
1579 | * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE |
2055 | * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE |
1580 | * command |
2056 | * command |
1581 | * |
2057 | * |
1582 | * @dev_priv: Pointer to a device private struct. |
2058 | * @dev_priv: Pointer to a device private struct. |
1583 | * @sw_context: The software context being used for this batch. |
2059 | * @sw_context: The software context being used for this batch. |
1584 | * @header: Pointer to the command header in the command stream. |
2060 | * @header: Pointer to the command header in the command stream. |
1585 | */ |
2061 | */ |
1586 | static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, |
2062 | static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, |
1587 | struct vmw_sw_context *sw_context, |
2063 | struct vmw_sw_context *sw_context, |
1588 | SVGA3dCmdHeader *header) |
2064 | SVGA3dCmdHeader *header) |
1589 | { |
2065 | { |
1590 | struct vmw_gb_surface_cmd { |
2066 | struct vmw_gb_surface_cmd { |
1591 | SVGA3dCmdHeader header; |
2067 | SVGA3dCmdHeader header; |
1592 | SVGA3dCmdInvalidateGBImage body; |
2068 | SVGA3dCmdInvalidateGBImage body; |
1593 | } *cmd; |
2069 | } *cmd; |
1594 | 2070 | ||
1595 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
2071 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1596 | 2072 | ||
1597 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
2073 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1598 | user_surface_converter, |
2074 | user_surface_converter, |
1599 | &cmd->body.image.sid, NULL); |
2075 | &cmd->body.image.sid, NULL); |
1600 | } |
2076 | } |
1601 | 2077 | ||
1602 | /** |
2078 | /** |
1603 | * vmw_cmd_invalidate_gb_surface - Validate an |
2079 | * vmw_cmd_invalidate_gb_surface - Validate an |
1604 | * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command |
2080 | * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command |
1605 | * |
2081 | * |
1606 | * @dev_priv: Pointer to a device private struct. |
2082 | * @dev_priv: Pointer to a device private struct. |
1607 | * @sw_context: The software context being used for this batch. |
2083 | * @sw_context: The software context being used for this batch. |
1608 | * @header: Pointer to the command header in the command stream. |
2084 | * @header: Pointer to the command header in the command stream. |
1609 | */ |
2085 | */ |
1610 | static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, |
2086 | static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, |
1611 | struct vmw_sw_context *sw_context, |
2087 | struct vmw_sw_context *sw_context, |
1612 | SVGA3dCmdHeader *header) |
2088 | SVGA3dCmdHeader *header) |
1613 | { |
2089 | { |
1614 | struct vmw_gb_surface_cmd { |
2090 | struct vmw_gb_surface_cmd { |
1615 | SVGA3dCmdHeader header; |
2091 | SVGA3dCmdHeader header; |
1616 | SVGA3dCmdInvalidateGBSurface body; |
2092 | SVGA3dCmdInvalidateGBSurface body; |
1617 | } *cmd; |
2093 | } *cmd; |
1618 | 2094 | ||
1619 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
2095 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1620 | 2096 | ||
1621 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
2097 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1622 | user_surface_converter, |
2098 | user_surface_converter, |
1623 | &cmd->body.sid, NULL); |
2099 | &cmd->body.sid, NULL); |
1624 | } |
2100 | } |
- | 2101 | ||
- | 2102 | ||
- | 2103 | /** |
|
- | 2104 | * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE |
|
- | 2105 | * command |
|
- | 2106 | * |
|
- | 2107 | * @dev_priv: Pointer to a device private struct. |
|
- | 2108 | * @sw_context: The software context being used for this batch. |
|
- | 2109 | * @header: Pointer to the command header in the command stream. |
|
- | 2110 | */ |
|
- | 2111 | static int vmw_cmd_shader_define(struct vmw_private *dev_priv, |
|
- | 2112 | struct vmw_sw_context *sw_context, |
|
- | 2113 | SVGA3dCmdHeader *header) |
|
- | 2114 | { |
|
- | 2115 | struct vmw_shader_define_cmd { |
|
- | 2116 | SVGA3dCmdHeader header; |
|
- | 2117 | SVGA3dCmdDefineShader body; |
|
- | 2118 | } *cmd; |
|
- | 2119 | int ret; |
|
- | 2120 | size_t size; |
|
- | 2121 | struct vmw_resource_val_node *val; |
|
- | 2122 | ||
- | 2123 | cmd = container_of(header, struct vmw_shader_define_cmd, |
|
- | 2124 | header); |
|
- | 2125 | ||
- | 2126 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
|
- | 2127 | user_context_converter, &cmd->body.cid, |
|
- | 2128 | &val); |
|
- | 2129 | if (unlikely(ret != 0)) |
|
- | 2130 | return ret; |
|
- | 2131 | ||
- | 2132 | if (unlikely(!dev_priv->has_mob)) |
|
- | 2133 | return 0; |
|
- | 2134 | ||
- | 2135 | size = cmd->header.size - sizeof(cmd->body); |
|
- | 2136 | ret = vmw_compat_shader_add(dev_priv, |
|
- | 2137 | vmw_context_res_man(val->res), |
|
- | 2138 | cmd->body.shid, cmd + 1, |
|
- | 2139 | cmd->body.type, size, |
|
- | 2140 | &sw_context->staged_cmd_res); |
|
- | 2141 | if (unlikely(ret != 0)) |
|
- | 2142 | return ret; |
|
- | 2143 | ||
- | 2144 | return vmw_resource_relocation_add(&sw_context->res_relocations, |
|
- | 2145 | NULL, &cmd->header.id - |
|
- | 2146 | sw_context->buf_start); |
|
- | 2147 | ||
- | 2148 | return 0; |
|
- | 2149 | } |
|
- | 2150 | ||
- | 2151 | /** |
|
- | 2152 | * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY |
|
1625 | 2153 | * command |
|
- | 2154 | * |
|
- | 2155 | * @dev_priv: Pointer to a device private struct. |
|
- | 2156 | * @sw_context: The software context being used for this batch. |
|
- | 2157 | * @header: Pointer to the command header in the command stream. |
|
- | 2158 | */ |
|
- | 2159 | static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, |
|
- | 2160 | struct vmw_sw_context *sw_context, |
|
- | 2161 | SVGA3dCmdHeader *header) |
|
- | 2162 | { |
|
- | 2163 | struct vmw_shader_destroy_cmd { |
|
- | 2164 | SVGA3dCmdHeader header; |
|
- | 2165 | SVGA3dCmdDestroyShader body; |
|
- | 2166 | } *cmd; |
|
- | 2167 | int ret; |
|
- | 2168 | struct vmw_resource_val_node *val; |
|
- | 2169 | ||
- | 2170 | cmd = container_of(header, struct vmw_shader_destroy_cmd, |
|
- | 2171 | header); |
|
- | 2172 | ||
- | 2173 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
|
- | 2174 | user_context_converter, &cmd->body.cid, |
|
- | 2175 | &val); |
|
- | 2176 | if (unlikely(ret != 0)) |
|
- | 2177 | return ret; |
|
- | 2178 | ||
- | 2179 | if (unlikely(!dev_priv->has_mob)) |
|
- | 2180 | return 0; |
|
- | 2181 | ||
- | 2182 | ret = vmw_shader_remove(vmw_context_res_man(val->res), |
|
- | 2183 | cmd->body.shid, |
|
- | 2184 | cmd->body.type, |
|
- | 2185 | &sw_context->staged_cmd_res); |
|
- | 2186 | if (unlikely(ret != 0)) |
|
- | 2187 | return ret; |
|
- | 2188 | ||
- | 2189 | return vmw_resource_relocation_add(&sw_context->res_relocations, |
|
- | 2190 | NULL, &cmd->header.id - |
|
- | 2191 | sw_context->buf_start); |
|
- | 2192 | ||
- | 2193 | return 0; |
|
- | 2194 | } |
|
1626 | #if 0 |
2195 | |
1627 | /** |
2196 | /** |
1628 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER |
2197 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER |
1629 | * command |
2198 | * command |
1630 | * |
2199 | * |
1631 | * @dev_priv: Pointer to a device private struct. |
2200 | * @dev_priv: Pointer to a device private struct. |
1632 | * @sw_context: The software context being used for this batch. |
2201 | * @sw_context: The software context being used for this batch. |
1633 | * @header: Pointer to the command header in the command stream. |
2202 | * @header: Pointer to the command header in the command stream. |
1634 | */ |
2203 | */ |
1635 | static int vmw_cmd_set_shader(struct vmw_private *dev_priv, |
2204 | static int vmw_cmd_set_shader(struct vmw_private *dev_priv, |
1636 | struct vmw_sw_context *sw_context, |
2205 | struct vmw_sw_context *sw_context, |
1637 | SVGA3dCmdHeader *header) |
2206 | SVGA3dCmdHeader *header) |
1638 | { |
2207 | { |
1639 | struct vmw_set_shader_cmd { |
2208 | struct vmw_set_shader_cmd { |
1640 | SVGA3dCmdHeader header; |
2209 | SVGA3dCmdHeader header; |
1641 | SVGA3dCmdSetShader body; |
2210 | SVGA3dCmdSetShader body; |
1642 | } *cmd; |
2211 | } *cmd; |
1643 | struct vmw_resource_val_node *ctx_node, *res_node = NULL; |
2212 | struct vmw_resource_val_node *ctx_node, *res_node = NULL; |
1644 | struct vmw_ctx_bindinfo bi; |
2213 | struct vmw_ctx_bindinfo_shader binding; |
1645 | struct vmw_resource *res = NULL; |
2214 | struct vmw_resource *res = NULL; |
1646 | int ret; |
2215 | int ret; |
1647 | 2216 | ||
1648 | cmd = container_of(header, struct vmw_set_shader_cmd, |
2217 | cmd = container_of(header, struct vmw_set_shader_cmd, |
1649 | header); |
2218 | header); |
- | 2219 | ||
- | 2220 | if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) { |
|
- | 2221 | DRM_ERROR("Illegal shader type %u.\n", |
|
- | 2222 | (unsigned) cmd->body.type); |
|
- | 2223 | return -EINVAL; |
|
- | 2224 | } |
|
1650 | 2225 | ||
1651 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
2226 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1652 | user_context_converter, &cmd->body.cid, |
2227 | user_context_converter, &cmd->body.cid, |
1653 | &ctx_node); |
2228 | &ctx_node); |
1654 | if (unlikely(ret != 0)) |
2229 | if (unlikely(ret != 0)) |
1655 | return ret; |
2230 | return ret; |
1656 | 2231 | ||
1657 | if (!dev_priv->has_mob) |
2232 | if (!dev_priv->has_mob) |
1658 | return 0; |
2233 | return 0; |
1659 | 2234 | ||
1660 | if (cmd->body.shid != SVGA3D_INVALID_ID) { |
2235 | if (cmd->body.shid != SVGA3D_INVALID_ID) { |
1661 | res = vmw_compat_shader_lookup |
- | |
1662 | (vmw_context_res_man(ctx_node->res), |
2236 | res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res), |
1663 | cmd->body.shid, |
2237 | cmd->body.shid, |
1664 | cmd->body.type); |
2238 | cmd->body.type); |
1665 | 2239 | ||
1666 | if (!IS_ERR(res)) { |
2240 | if (!IS_ERR(res)) { |
1667 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, |
2241 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, |
1668 | vmw_res_shader, |
- | |
1669 | &cmd->body.shid, res, |
2242 | &cmd->body.shid, res, |
1670 | &res_node); |
2243 | &res_node); |
1671 | vmw_resource_unreference(&res); |
2244 | vmw_resource_unreference(&res); |
1672 | if (unlikely(ret != 0)) |
2245 | if (unlikely(ret != 0)) |
1673 | return ret; |
2246 | return ret; |
1674 | } |
2247 | } |
1675 | } |
2248 | } |
1676 | 2249 | ||
1677 | if (!res_node) { |
2250 | if (!res_node) { |
1678 | ret = vmw_cmd_res_check(dev_priv, sw_context, |
2251 | ret = vmw_cmd_res_check(dev_priv, sw_context, |
1679 | vmw_res_shader, |
2252 | vmw_res_shader, |
1680 | user_shader_converter, |
2253 | user_shader_converter, |
1681 | &cmd->body.shid, &res_node); |
2254 | &cmd->body.shid, &res_node); |
1682 | if (unlikely(ret != 0)) |
2255 | if (unlikely(ret != 0)) |
1683 | return ret; |
2256 | return ret; |
1684 | } |
2257 | } |
1685 | 2258 | ||
1686 | bi.ctx = ctx_node->res; |
2259 | binding.bi.ctx = ctx_node->res; |
1687 | bi.res = res_node ? res_node->res : NULL; |
2260 | binding.bi.res = res_node ? res_node->res : NULL; |
1688 | bi.bt = vmw_ctx_binding_shader; |
2261 | binding.bi.bt = vmw_ctx_binding_shader; |
1689 | bi.i1.shader_type = cmd->body.type; |
2262 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; |
- | 2263 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, |
|
- | 2264 | binding.shader_slot, 0); |
|
1690 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); |
2265 | return 0; |
1691 | } |
- | |
1692 | #endif |
2266 | } |
1693 | 2267 | ||
1694 | /** |
2268 | /** |
1695 | * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST |
2269 | * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST |
1696 | * command |
2270 | * command |
1697 | * |
2271 | * |
1698 | * @dev_priv: Pointer to a device private struct. |
2272 | * @dev_priv: Pointer to a device private struct. |
1699 | * @sw_context: The software context being used for this batch. |
2273 | * @sw_context: The software context being used for this batch. |
1700 | * @header: Pointer to the command header in the command stream. |
2274 | * @header: Pointer to the command header in the command stream. |
1701 | */ |
2275 | */ |
1702 | static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, |
2276 | static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, |
1703 | struct vmw_sw_context *sw_context, |
2277 | struct vmw_sw_context *sw_context, |
1704 | SVGA3dCmdHeader *header) |
2278 | SVGA3dCmdHeader *header) |
1705 | { |
2279 | { |
1706 | struct vmw_set_shader_const_cmd { |
2280 | struct vmw_set_shader_const_cmd { |
1707 | SVGA3dCmdHeader header; |
2281 | SVGA3dCmdHeader header; |
1708 | SVGA3dCmdSetShaderConst body; |
2282 | SVGA3dCmdSetShaderConst body; |
1709 | } *cmd; |
2283 | } *cmd; |
1710 | int ret; |
2284 | int ret; |
1711 | 2285 | ||
1712 | cmd = container_of(header, struct vmw_set_shader_const_cmd, |
2286 | cmd = container_of(header, struct vmw_set_shader_const_cmd, |
1713 | header); |
2287 | header); |
1714 | 2288 | ||
1715 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
2289 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1716 | user_context_converter, &cmd->body.cid, |
2290 | user_context_converter, &cmd->body.cid, |
1717 | NULL); |
2291 | NULL); |
1718 | if (unlikely(ret != 0)) |
2292 | if (unlikely(ret != 0)) |
1719 | return ret; |
2293 | return ret; |
1720 | 2294 | ||
1721 | if (dev_priv->has_mob) |
2295 | if (dev_priv->has_mob) |
1722 | header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; |
2296 | header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; |
1723 | 2297 | ||
1724 | return 0; |
2298 | return 0; |
1725 | } |
2299 | } |
1726 | - | ||
1727 | #if 0 |
2300 | |
1728 | /** |
2301 | /** |
1729 | * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER |
2302 | * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER |
1730 | * command |
2303 | * command |
1731 | * |
2304 | * |
1732 | * @dev_priv: Pointer to a device private struct. |
2305 | * @dev_priv: Pointer to a device private struct. |
1733 | * @sw_context: The software context being used for this batch. |
2306 | * @sw_context: The software context being used for this batch. |
1734 | * @header: Pointer to the command header in the command stream. |
2307 | * @header: Pointer to the command header in the command stream. |
1735 | */ |
2308 | */ |
1736 | static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, |
2309 | static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, |
1737 | struct vmw_sw_context *sw_context, |
2310 | struct vmw_sw_context *sw_context, |
1738 | SVGA3dCmdHeader *header) |
2311 | SVGA3dCmdHeader *header) |
1739 | { |
2312 | { |
1740 | struct vmw_bind_gb_shader_cmd { |
2313 | struct vmw_bind_gb_shader_cmd { |
1741 | SVGA3dCmdHeader header; |
2314 | SVGA3dCmdHeader header; |
1742 | SVGA3dCmdBindGBShader body; |
2315 | SVGA3dCmdBindGBShader body; |
1743 | } *cmd; |
2316 | } *cmd; |
1744 | 2317 | ||
1745 | cmd = container_of(header, struct vmw_bind_gb_shader_cmd, |
2318 | cmd = container_of(header, struct vmw_bind_gb_shader_cmd, |
1746 | header); |
2319 | header); |
1747 | 2320 | ||
1748 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, |
2321 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, |
1749 | user_shader_converter, |
2322 | user_shader_converter, |
1750 | &cmd->body.shid, &cmd->body.mobid, |
2323 | &cmd->body.shid, &cmd->body.mobid, |
1751 | cmd->body.offsetInBytes); |
2324 | cmd->body.offsetInBytes); |
1752 | } |
2325 | } |
- | 2326 | ||
- | 2327 | /** |
|
- | 2328 | * vmw_cmd_dx_set_single_constant_buffer - Validate an |
|
- | 2329 | * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command. |
|
- | 2330 | * |
|
- | 2331 | * @dev_priv: Pointer to a device private struct. |
|
- | 2332 | * @sw_context: The software context being used for this batch. |
|
- | 2333 | * @header: Pointer to the command header in the command stream. |
|
- | 2334 | */ |
|
- | 2335 | static int |
|
- | 2336 | vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, |
|
- | 2337 | struct vmw_sw_context *sw_context, |
|
- | 2338 | SVGA3dCmdHeader *header) |
|
- | 2339 | { |
|
- | 2340 | struct { |
|
- | 2341 | SVGA3dCmdHeader header; |
|
- | 2342 | SVGA3dCmdDXSetSingleConstantBuffer body; |
|
- | 2343 | } *cmd; |
|
- | 2344 | struct vmw_resource_val_node *res_node = NULL; |
|
- | 2345 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
|
- | 2346 | struct vmw_ctx_bindinfo_cb binding; |
|
- | 2347 | int ret; |
|
- | 2348 | ||
- | 2349 | if (unlikely(ctx_node == NULL)) { |
|
- | 2350 | DRM_ERROR("DX Context not set.\n"); |
|
- | 2351 | return -EINVAL; |
|
- | 2352 | } |
|
- | 2353 | ||
- | 2354 | cmd = container_of(header, typeof(*cmd), header); |
|
- | 2355 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
|
- | 2356 | user_surface_converter, |
|
- | 2357 | &cmd->body.sid, &res_node); |
|
- | 2358 | if (unlikely(ret != 0)) |
|
- | 2359 | return ret; |
|
- | 2360 | ||
- | 2361 | binding.bi.ctx = ctx_node->res; |
|
- | 2362 | binding.bi.res = res_node ? res_node->res : NULL; |
|
- | 2363 | binding.bi.bt = vmw_ctx_binding_cb; |
|
- | 2364 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; |
|
- | 2365 | binding.offset = cmd->body.offsetInBytes; |
|
- | 2366 | binding.size = cmd->body.sizeInBytes; |
|
- | 2367 | binding.slot = cmd->body.slot; |
|
- | 2368 | ||
- | 2369 | if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 || |
|
- | 2370 | binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { |
|
- | 2371 | DRM_ERROR("Illegal const buffer shader %u slot %u.\n", |
|
- | 2372 | (unsigned) cmd->body.type, |
|
- | 2373 | (unsigned) binding.slot); |
|
- | 2374 | return -EINVAL; |
|
- | 2375 | } |
|
- | 2376 | ||
- | 2377 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, |
|
- | 2378 | binding.shader_slot, binding.slot); |
|
- | 2379 | ||
- | 2380 | return 0; |
|
- | 2381 | } |
|
- | 2382 | ||
- | 2383 | /** |
|
- | 2384 | * vmw_cmd_dx_set_shader_res - Validate an |
|
- | 2385 | * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command |
|
- | 2386 | * |
|
- | 2387 | * @dev_priv: Pointer to a device private struct. |
|
- | 2388 | * @sw_context: The software context being used for this batch. |
|
- | 2389 | * @header: Pointer to the command header in the command stream. |
|
- | 2390 | */ |
|
- | 2391 | static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, |
|
- | 2392 | struct vmw_sw_context *sw_context, |
|
- | 2393 | SVGA3dCmdHeader *header) |
|
- | 2394 | { |
|
- | 2395 | struct { |
|
- | 2396 | SVGA3dCmdHeader header; |
|
- | 2397 | SVGA3dCmdDXSetShaderResources body; |
|
- | 2398 | } *cmd = container_of(header, typeof(*cmd), header); |
|
- | 2399 | u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / |
|
- | 2400 | sizeof(SVGA3dShaderResourceViewId); |
|
- | 2401 | ||
- | 2402 | if ((u64) cmd->body.startView + (u64) num_sr_view > |
|
- | 2403 | (u64) SVGA3D_DX_MAX_SRVIEWS || |
|
- | 2404 | cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { |
|
- | 2405 | DRM_ERROR("Invalid shader binding.\n"); |
|
- | 2406 | return -EINVAL; |
|
- | 2407 | } |
|
- | 2408 | ||
- | 2409 | return vmw_view_bindings_add(sw_context, vmw_view_sr, |
|
- | 2410 | vmw_ctx_binding_sr, |
|
- | 2411 | cmd->body.type - SVGA3D_SHADERTYPE_MIN, |
|
- | 2412 | (void *) &cmd[1], num_sr_view, |
|
- | 2413 | cmd->body.startView); |
|
- | 2414 | } |
|
- | 2415 | ||
- | 2416 | /** |
|
- | 2417 | * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER |
|
- | 2418 | * command |
|
- | 2419 | * |
|
- | 2420 | * @dev_priv: Pointer to a device private struct. |
|
- | 2421 | * @sw_context: The software context being used for this batch. |
|
- | 2422 | * @header: Pointer to the command header in the command stream. |
|
- | 2423 | */ |
|
- | 2424 | static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, |
|
- | 2425 | struct vmw_sw_context *sw_context, |
|
- | 2426 | SVGA3dCmdHeader *header) |
|
- | 2427 | { |
|
- | 2428 | struct { |
|
- | 2429 | SVGA3dCmdHeader header; |
|
- | 2430 | SVGA3dCmdDXSetShader body; |
|
- | 2431 | } *cmd; |
|
- | 2432 | struct vmw_resource *res = NULL; |
|
- | 2433 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
|
- | 2434 | struct vmw_ctx_bindinfo_shader binding; |
|
- | 2435 | int ret = 0; |
|
- | 2436 | ||
- | 2437 | if (unlikely(ctx_node == NULL)) { |
|
- | 2438 | DRM_ERROR("DX Context not set.\n"); |
|
- | 2439 | return -EINVAL; |
|
- | 2440 | } |
|
- | 2441 | ||
- | 2442 | cmd = container_of(header, typeof(*cmd), header); |
|
- | 2443 | ||
- | 2444 | if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { |
|
- | 2445 | DRM_ERROR("Illegal shader type %u.\n", |
|
- | 2446 | (unsigned) cmd->body.type); |
|
- | 2447 | return -EINVAL; |
|
- | 2448 | } |
|
- | 2449 | ||
- | 2450 | if (cmd->body.shaderId != SVGA3D_INVALID_ID) { |
|
- | 2451 | res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0); |
|
- | 2452 | if (IS_ERR(res)) { |
|
- | 2453 | DRM_ERROR("Could not find shader for binding.\n"); |
|
- | 2454 | return PTR_ERR(res); |
|
- | 2455 | } |
|
- | 2456 | ||
- | 2457 | ret = vmw_resource_val_add(sw_context, res, NULL); |
|
- | 2458 | if (ret) |
|
- | 2459 | goto out_unref; |
|
- | 2460 | } |
|
- | 2461 | ||
- | 2462 | binding.bi.ctx = ctx_node->res; |
|
- | 2463 | binding.bi.res = res; |
|
- | 2464 | binding.bi.bt = vmw_ctx_binding_dx_shader; |
|
- | 2465 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; |
|
- | 2466 | ||
- | 2467 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, |
|
- | 2468 | binding.shader_slot, 0); |
|
1753 | #endif |
2469 | out_unref: |
- | 2470 | if (res) |
|
- | 2471 | vmw_resource_unreference(&res); |
|
- | 2472 | ||
- | 2473 | return ret; |
|
- | 2474 | } |
|
- | 2475 | ||
- | 2476 | /** |
|
- | 2477 | * vmw_cmd_dx_set_vertex_buffers - Validates an |
|
- | 2478 | * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command |
|
- | 2479 | * |
|
- | 2480 | * @dev_priv: Pointer to a device private struct. |
|
- | 2481 | * @sw_context: The software context being used for this batch. |
|
- | 2482 | * @header: Pointer to the command header in the command stream. |
|
- | 2483 | */ |
|
- | 2484 | static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, |
|
- | 2485 | struct vmw_sw_context *sw_context, |
|
- | 2486 | SVGA3dCmdHeader *header) |
|
- | 2487 | { |
|
- | 2488 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
|
- | 2489 | struct vmw_ctx_bindinfo_vb binding; |
|
- | 2490 | struct vmw_resource_val_node *res_node; |
|
- | 2491 | struct { |
|
- | 2492 | SVGA3dCmdHeader header; |
|
- | 2493 | SVGA3dCmdDXSetVertexBuffers body; |
|
- | 2494 | SVGA3dVertexBuffer buf[]; |
|
- | 2495 | } *cmd; |
|
- | 2496 | int i, ret, num; |
|
- | 2497 | ||
- | 2498 | if (unlikely(ctx_node == NULL)) { |
|
- | 2499 | DRM_ERROR("DX Context not set.\n"); |
|
- | 2500 | return -EINVAL; |
|
- | 2501 | } |
|
- | 2502 | ||
- | 2503 | cmd = container_of(header, typeof(*cmd), header); |
|
- | 2504 | num = (cmd->header.size - sizeof(cmd->body)) / |
|
- | 2505 | sizeof(SVGA3dVertexBuffer); |
|
- | 2506 | if ((u64)num + (u64)cmd->body.startBuffer > |
|
- | 2507 | (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) { |
|
- | 2508 | DRM_ERROR("Invalid number of vertex buffers.\n"); |
|
- | 2509 | return -EINVAL; |
|
- | 2510 | } |
|
- | 2511 | ||
- | 2512 | for (i = 0; i < num; i++) { |
|
- | 2513 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
|
- | 2514 | user_surface_converter, |
|
- | 2515 | &cmd->buf[i].sid, &res_node); |
|
- | 2516 | if (unlikely(ret != 0)) |
|
- | 2517 | return ret; |
|
- | 2518 | ||
- | 2519 | binding.bi.ctx = ctx_node->res; |
|
- | 2520 | binding.bi.bt = vmw_ctx_binding_vb; |
|
- | 2521 | binding.bi.res = ((res_node) ? res_node->res : NULL); |
|
- | 2522 | binding.offset = cmd->buf[i].offset; |
|
- | 2523 | binding.stride = cmd->buf[i].stride; |
|
- | 2524 | binding.slot = i + cmd->body.startBuffer; |
|
- | 2525 | ||
- | 2526 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, |
|
- | 2527 | 0, binding.slot); |
|
- | 2528 | } |
|
- | 2529 | ||
- | 2530 | return 0; |
|
- | 2531 | } |
|
- | 2532 | ||
- | 2533 | /** |
|
- | 2534 | * vmw_cmd_dx_ia_set_vertex_buffers - Validate an |
|
- | 2535 | * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command. |
|
- | 2536 | * |
|
- | 2537 | * @dev_priv: Pointer to a device private struct. |
|
- | 2538 | * @sw_context: The software context being used for this batch. |
|
- | 2539 | * @header: Pointer to the command header in the command stream. |
|
- | 2540 | */ |
|
- | 2541 | static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, |
|
- | 2542 | struct vmw_sw_context *sw_context, |
|
- | 2543 | SVGA3dCmdHeader *header) |
|
- | 2544 | { |
|
- | 2545 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
|
- | 2546 | struct vmw_ctx_bindinfo_ib binding; |
|
- | 2547 | struct vmw_resource_val_node *res_node; |
|
- | 2548 | struct { |
|
- | 2549 | SVGA3dCmdHeader header; |
|
- | 2550 | SVGA3dCmdDXSetIndexBuffer body; |
|
- | 2551 | } *cmd; |
|
- | 2552 | int ret; |
|
- | 2553 | ||
- | 2554 | if (unlikely(ctx_node == NULL)) { |
|
- | 2555 | DRM_ERROR("DX Context not set.\n"); |
|
- | 2556 | return -EINVAL; |
|
- | 2557 | } |
|
- | 2558 | ||
- | 2559 | cmd = container_of(header, typeof(*cmd), header); |
|
- | 2560 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
|
- | 2561 | user_surface_converter, |
|
- | 2562 | &cmd->body.sid, &res_node); |
|
- | 2563 | if (unlikely(ret != 0)) |
|
- | 2564 | return ret; |
|
- | 2565 | ||
- | 2566 | binding.bi.ctx = ctx_node->res; |
|
- | 2567 | binding.bi.res = ((res_node) ? res_node->res : NULL); |
|
- | 2568 | binding.bi.bt = vmw_ctx_binding_ib; |
|
- | 2569 | binding.offset = cmd->body.offset; |
|
- | 2570 | binding.format = cmd->body.format; |
|
- | 2571 | ||
- | 2572 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0); |
|
- | 2573 | ||
- | 2574 | return 0; |
|
- | 2575 | } |
|
- | 2576 | ||
- | 2577 | /** |
|
- | 2578 | * vmw_cmd_dx_set_rendertarget - Validate an |
|
- | 2579 | * SVGA_3D_CMD_DX_SET_RENDERTARGETS command |
|
- | 2580 | * |
|
- | 2581 | * @dev_priv: Pointer to a device private struct. |
|
- | 2582 | * @sw_context: The software context being used for this batch. |
|
- | 2583 | * @header: Pointer to the command header in the command stream. |
|
- | 2584 | */ |
|
- | 2585 | static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv, |
|
- | 2586 | struct vmw_sw_context *sw_context, |
|
- | 2587 | SVGA3dCmdHeader *header) |
|
- | 2588 | { |
|
- | 2589 | struct { |
|
- | 2590 | SVGA3dCmdHeader header; |
|
- | 2591 | SVGA3dCmdDXSetRenderTargets body; |
|
- | 2592 | } *cmd = container_of(header, typeof(*cmd), header); |
|
- | 2593 | int ret; |
|
- | 2594 | u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) / |
|
- | 2595 | sizeof(SVGA3dRenderTargetViewId); |
|
- | 2596 | ||
- | 2597 | if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) { |
|
- | 2598 | DRM_ERROR("Invalid DX Rendertarget binding.\n"); |
|
- | 2599 | return -EINVAL; |
|
- | 2600 | } |
|
- | 2601 | ||
- | 2602 | ret = vmw_view_bindings_add(sw_context, vmw_view_ds, |
|
- | 2603 | vmw_ctx_binding_ds, 0, |
|
- | 2604 | &cmd->body.depthStencilViewId, 1, 0); |
|
- | 2605 | if (ret) |
|
- | 2606 | return ret; |
|
- | 2607 | ||
- | 2608 | return vmw_view_bindings_add(sw_context, vmw_view_rt, |
|
- | 2609 | vmw_ctx_binding_dx_rt, 0, |
|
- | 2610 | (void *)&cmd[1], num_rt_view, 0); |
|
- | 2611 | } |
|
- | 2612 | ||
- | 2613 | /** |
|
- | 2614 | * vmw_cmd_dx_clear_rendertarget_view - Validate an |
|
- | 2615 | * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command |
|
- | 2616 | * |
|
- | 2617 | * @dev_priv: Pointer to a device private struct. |
|
- | 2618 | * @sw_context: The software context being used for this batch. |
|
- | 2619 | * @header: Pointer to the command header in the command stream. |
|
- | 2620 | */ |
|
- | 2621 | static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv, |
|
- | 2622 | struct vmw_sw_context *sw_context, |
|
- | 2623 | SVGA3dCmdHeader *header) |
|
- | 2624 | { |
|
- | 2625 | struct { |
|
- | 2626 | SVGA3dCmdHeader header; |
|
- | 2627 | SVGA3dCmdDXClearRenderTargetView body; |
|
- | 2628 | } *cmd = container_of(header, typeof(*cmd), header); |
|
- | 2629 | ||
- | 2630 | return vmw_view_id_val_add(sw_context, vmw_view_rt, |
|
- | 2631 | cmd->body.renderTargetViewId); |
|
- | 2632 | } |
|
- | 2633 | ||
- | 2634 | /** |
|
- | 2635 | * vmw_cmd_dx_clear_rendertarget_view - Validate an |
|
- | 2636 | * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command |
|
- | 2637 | * |
|
- | 2638 | * @dev_priv: Pointer to a device private struct. |
|
- | 2639 | * @sw_context: The software context being used for this batch. |
|
- | 2640 | * @header: Pointer to the command header in the command stream. |
|
- | 2641 | */ |
|
- | 2642 | static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv, |
|
- | 2643 | struct vmw_sw_context *sw_context, |
|
- | 2644 | SVGA3dCmdHeader *header) |
|
- | 2645 | { |
|
- | 2646 | struct { |
|
- | 2647 | SVGA3dCmdHeader header; |
|
- | 2648 | SVGA3dCmdDXClearDepthStencilView body; |
|
- | 2649 | } *cmd = container_of(header, typeof(*cmd), header); |
|
- | 2650 | ||
- | 2651 | return vmw_view_id_val_add(sw_context, vmw_view_ds, |
|
- | 2652 | cmd->body.depthStencilViewId); |
|
- | 2653 | } |
|
- | 2654 | ||
- | 2655 | static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, |
|
- | 2656 | struct vmw_sw_context *sw_context, |
|
- | 2657 | SVGA3dCmdHeader *header) |
|
- | 2658 | { |
|
- | 2659 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
|
- | 2660 | struct vmw_resource_val_node *srf_node; |
|
- | 2661 | struct vmw_resource *res; |
|
- | 2662 | enum vmw_view_type view_type; |
|
- | 2663 | int ret; |
|
- | 2664 | /* |
|
- | 2665 | * This is based on the fact that all affected define commands have |
|
- | 2666 | * the same initial command body layout. |
|
- | 2667 | */ |
|
- | 2668 | struct { |
|
- | 2669 | SVGA3dCmdHeader header; |
|
- | 2670 | uint32 defined_id; |
|
- | 2671 | uint32 sid; |
|
- | 2672 | } *cmd; |
|
- | 2673 | ||
- | 2674 | if (unlikely(ctx_node == NULL)) { |
|
- | 2675 | DRM_ERROR("DX Context not set.\n"); |
|
- | 2676 | return -EINVAL; |
|
- | 2677 | } |
|
- | 2678 | ||
- | 2679 | view_type = vmw_view_cmd_to_type(header->id); |
|
- | 2680 | cmd = container_of(header, typeof(*cmd), header); |
|
- | 2681 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
|
- | 2682 | user_surface_converter, |
|
- | 2683 | &cmd->sid, &srf_node); |
|
- | 2684 | if (unlikely(ret != 0)) |
|
- | 2685 | return ret; |
|
- | 2686 | ||
- | 2687 | res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]); |
|
- | 2688 | ret = vmw_cotable_notify(res, cmd->defined_id); |
|
- | 2689 | vmw_resource_unreference(&res); |
|
- | 2690 | if (unlikely(ret != 0)) |
|
- | 2691 | return ret; |
|
- | 2692 | ||
- | 2693 | return vmw_view_add(sw_context->man, |
|
- | 2694 | ctx_node->res, |
|
- | 2695 | srf_node->res, |
|
- | 2696 | view_type, |
|
- | 2697 | cmd->defined_id, |
|
- | 2698 | header, |
|
- | 2699 | header->size + sizeof(*header), |
|
- | 2700 | &sw_context->staged_cmd_res); |
|
- | 2701 | } |
|
- | 2702 | ||
- | 2703 | /** |
|
- | 2704 | * vmw_cmd_dx_set_so_targets - Validate an |
|
- | 2705 | * SVGA_3D_CMD_DX_SET_SOTARGETS command. |
|
- | 2706 | * |
|
- | 2707 | * @dev_priv: Pointer to a device private struct. |
|
- | 2708 | * @sw_context: The software context being used for this batch. |
|
- | 2709 | * @header: Pointer to the command header in the command stream. |
|
- | 2710 | */ |
|
- | 2711 | static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv, |
|
- | 2712 | struct vmw_sw_context *sw_context, |
|
- | 2713 | SVGA3dCmdHeader *header) |
|
- | 2714 | { |
|
- | 2715 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
|
- | 2716 | struct vmw_ctx_bindinfo_so binding; |
|
- | 2717 | struct vmw_resource_val_node *res_node; |
|
- | 2718 | struct { |
|
- | 2719 | SVGA3dCmdHeader header; |
|
- | 2720 | SVGA3dCmdDXSetSOTargets body; |
|
- | 2721 | SVGA3dSoTarget targets[]; |
|
- | 2722 | } *cmd; |
|
- | 2723 | int i, ret, num; |
|
- | 2724 | ||
- | 2725 | if (unlikely(ctx_node == NULL)) { |
|
- | 2726 | DRM_ERROR("DX Context not set.\n"); |
|
- | 2727 | return -EINVAL; |
|
- | 2728 | } |
|
- | 2729 | ||
- | 2730 | cmd = container_of(header, typeof(*cmd), header); |
|
- | 2731 | num = (cmd->header.size - sizeof(cmd->body)) / |
|
- | 2732 | sizeof(SVGA3dSoTarget); |
|
- | 2733 | ||
- | 2734 | if (num > SVGA3D_DX_MAX_SOTARGETS) { |
|
- | 2735 | DRM_ERROR("Invalid DX SO binding.\n"); |
|
- | 2736 | return -EINVAL; |
|
- | 2737 | } |
|
- | 2738 | ||
- | 2739 | for (i = 0; i < num; i++) { |
|
- | 2740 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
|
- | 2741 | user_surface_converter, |
|
- | 2742 | &cmd->targets[i].sid, &res_node); |
|
- | 2743 | if (unlikely(ret != 0)) |
|
- | 2744 | return ret; |
|
- | 2745 | ||
- | 2746 | binding.bi.ctx = ctx_node->res; |
|
- | 2747 | binding.bi.res = ((res_node) ? res_node->res : NULL); |
|
- | 2748 | binding.bi.bt = vmw_ctx_binding_so, |
|
- | 2749 | binding.offset = cmd->targets[i].offset; |
|
- | 2750 | binding.size = cmd->targets[i].sizeInBytes; |
|
- | 2751 | binding.slot = i; |
|
- | 2752 | ||
- | 2753 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, |
|
- | 2754 | 0, binding.slot); |
|
- | 2755 | } |
|
- | 2756 | ||
- | 2757 | return 0; |
|
- | 2758 | } |
|
- | 2759 | ||
- | 2760 | static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, |
|
- | 2761 | struct vmw_sw_context *sw_context, |
|
- | 2762 | SVGA3dCmdHeader *header) |
|
- | 2763 | { |
|
- | 2764 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
|
- | 2765 | struct vmw_resource *res; |
|
- | 2766 | /* |
|
- | 2767 | * This is based on the fact that all affected define commands have |
|
- | 2768 | * the same initial command body layout. |
|
- | 2769 | */ |
|
- | 2770 | struct { |
|
- | 2771 | SVGA3dCmdHeader header; |
|
- | 2772 | uint32 defined_id; |
|
- | 2773 | } *cmd; |
|
- | 2774 | enum vmw_so_type so_type; |
|
- | 2775 | int ret; |
|
- | 2776 | ||
- | 2777 | if (unlikely(ctx_node == NULL)) { |
|
- | 2778 | DRM_ERROR("DX Context not set.\n"); |
|
- | 2779 | return -EINVAL; |
|
- | 2780 | } |
|
- | 2781 | ||
- | 2782 | so_type = vmw_so_cmd_to_type(header->id); |
|
- | 2783 | res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]); |
|
- | 2784 | cmd = container_of(header, typeof(*cmd), header); |
|
- | 2785 | ret = vmw_cotable_notify(res, cmd->defined_id); |
|
- | 2786 | vmw_resource_unreference(&res); |
|
- | 2787 | ||
- | 2788 | return ret; |
|
- | 2789 | } |
|
- | 2790 | ||
- | 2791 | /** |
|
- | 2792 | * vmw_cmd_dx_check_subresource - Validate an |
|
- | 2793 | * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command |
|
- | 2794 | * |
|
- | 2795 | * @dev_priv: Pointer to a device private struct. |
|
- | 2796 | * @sw_context: The software context being used for this batch. |
|
- | 2797 | * @header: Pointer to the command header in the command stream. |
|
- | 2798 | */ |
|
- | 2799 | static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv, |
|
- | 2800 | struct vmw_sw_context *sw_context, |
|
- | 2801 | SVGA3dCmdHeader *header) |
|
- | 2802 | { |
|
- | 2803 | struct { |
|
- | 2804 | SVGA3dCmdHeader header; |
|
- | 2805 | union { |
|
- | 2806 | SVGA3dCmdDXReadbackSubResource r_body; |
|
- | 2807 | SVGA3dCmdDXInvalidateSubResource i_body; |
|
- | 2808 | SVGA3dCmdDXUpdateSubResource u_body; |
|
- | 2809 | SVGA3dSurfaceId sid; |
|
- | 2810 | }; |
|
- | 2811 | } *cmd; |
|
- | 2812 | ||
- | 2813 | BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) != |
|
- | 2814 | offsetof(typeof(*cmd), sid)); |
|
- | 2815 | BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) != |
|
- | 2816 | offsetof(typeof(*cmd), sid)); |
|
- | 2817 | BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) != |
|
- | 2818 | offsetof(typeof(*cmd), sid)); |
|
- | 2819 | ||
- | 2820 | cmd = container_of(header, typeof(*cmd), header); |
|
- | 2821 | ||
- | 2822 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
|
- | 2823 | user_surface_converter, |
|
- | 2824 | &cmd->sid, NULL); |
|
- | 2825 | } |
|
- | 2826 | ||
- | 2827 | static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, |
|
- | 2828 | struct vmw_sw_context *sw_context, |
|
- | 2829 | SVGA3dCmdHeader *header) |
|
- | 2830 | { |
|
- | 2831 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
|
- | 2832 | ||
- | 2833 | if (unlikely(ctx_node == NULL)) { |
|
- | 2834 | DRM_ERROR("DX Context not set.\n"); |
|
- | 2835 | return -EINVAL; |
|
- | 2836 | } |
|
- | 2837 | ||
- | 2838 | return 0; |
|
- | 2839 | } |
|
- | 2840 | ||
- | 2841 | /** |
|
- | 2842 | * vmw_cmd_dx_view_remove - validate a view remove command and |
|
- | 2843 | * schedule the view resource for removal. |
|
- | 2844 | * |
|
- | 2845 | * @dev_priv: Pointer to a device private struct. |
|
- | 2846 | * @sw_context: The software context being used for this batch. |
|
- | 2847 | * @header: Pointer to the command header in the command stream. |
|
- | 2848 | * |
|
- | 2849 | * Check that the view exists, and if it was not created using this |
|
- | 2850 | * command batch, make sure it's validated (present in the device) so that |
|
- | 2851 | * the remove command will not confuse the device. |
|
- | 2852 | */ |
|
- | 2853 | static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, |
|
- | 2854 | struct vmw_sw_context *sw_context, |
|
- | 2855 | SVGA3dCmdHeader *header) |
|
- | 2856 | { |
|
- | 2857 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
|
- | 2858 | struct { |
|
- | 2859 | SVGA3dCmdHeader header; |
|
- | 2860 | union vmw_view_destroy body; |
|
- | 2861 | } *cmd = container_of(header, typeof(*cmd), header); |
|
- | 2862 | enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id); |
|
- | 2863 | struct vmw_resource *view; |
|
- | 2864 | int ret; |
|
- | 2865 | ||
- | 2866 | if (!ctx_node) { |
|
- | 2867 | DRM_ERROR("DX Context not set.\n"); |
|
- | 2868 | return -EINVAL; |
|
- | 2869 | } |
|
- | 2870 | ||
- | 2871 | ret = vmw_view_remove(sw_context->man, |
|
- | 2872 | cmd->body.view_id, view_type, |
|
- | 2873 | &sw_context->staged_cmd_res, |
|
- | 2874 | &view); |
|
- | 2875 | if (ret || !view) |
|
- | 2876 | return ret; |
|
- | 2877 | ||
- | 2878 | /* |
|
- | 2879 | * Add view to the validate list iff it was not created using this |
|
- | 2880 | * command batch. |
|
- | 2881 | */ |
|
- | 2882 | return vmw_view_res_val_add(sw_context, view); |
|
- | 2883 | } |
|
- | 2884 | ||
- | 2885 | /** |
|
- | 2886 | * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER |
|
- | 2887 | * command |
|
- | 2888 | * |
|
- | 2889 | * @dev_priv: Pointer to a device private struct. |
|
- | 2890 | * @sw_context: The software context being used for this batch. |
|
- | 2891 | * @header: Pointer to the command header in the command stream. |
|
- | 2892 | */ |
|
- | 2893 | static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, |
|
- | 2894 | struct vmw_sw_context *sw_context, |
|
- | 2895 | SVGA3dCmdHeader *header) |
|
- | 2896 | { |
|
- | 2897 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
|
- | 2898 | struct vmw_resource *res; |
|
- | 2899 | struct { |
|
- | 2900 | SVGA3dCmdHeader header; |
|
- | 2901 | SVGA3dCmdDXDefineShader body; |
|
- | 2902 | } *cmd = container_of(header, typeof(*cmd), header); |
|
- | 2903 | int ret; |
|
- | 2904 | ||
- | 2905 | if (!ctx_node) { |
|
- | 2906 | DRM_ERROR("DX Context not set.\n"); |
|
- | 2907 | return -EINVAL; |
|
- | 2908 | } |
|
- | 2909 | ||
- | 2910 | res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER); |
|
- | 2911 | ret = vmw_cotable_notify(res, cmd->body.shaderId); |
|
- | 2912 | vmw_resource_unreference(&res); |
|
- | 2913 | if (ret) |
|
- | 2914 | return ret; |
|
- | 2915 | ||
- | 2916 | return vmw_dx_shader_add(sw_context->man, ctx_node->res, |
|
- | 2917 | cmd->body.shaderId, cmd->body.type, |
|
- | 2918 | &sw_context->staged_cmd_res); |
|
- | 2919 | } |
|
- | 2920 | ||
- | 2921 | /** |
|
- | 2922 | * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER |
|
- | 2923 | * command |
|
- | 2924 | * |
|
- | 2925 | * @dev_priv: Pointer to a device private struct. |
|
- | 2926 | * @sw_context: The software context being used for this batch. |
|
- | 2927 | * @header: Pointer to the command header in the command stream. |
|
- | 2928 | */ |
|
- | 2929 | static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv, |
|
- | 2930 | struct vmw_sw_context *sw_context, |
|
- | 2931 | SVGA3dCmdHeader *header) |
|
- | 2932 | { |
|
- | 2933 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; |
|
- | 2934 | struct { |
|
- | 2935 | SVGA3dCmdHeader header; |
|
- | 2936 | SVGA3dCmdDXDestroyShader body; |
|
- | 2937 | } *cmd = container_of(header, typeof(*cmd), header); |
|
- | 2938 | int ret; |
|
- | 2939 | ||
- | 2940 | if (!ctx_node) { |
|
- | 2941 | DRM_ERROR("DX Context not set.\n"); |
|
- | 2942 | return -EINVAL; |
|
- | 2943 | } |
|
- | 2944 | ||
- | 2945 | ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0, |
|
- | 2946 | &sw_context->staged_cmd_res); |
|
- | 2947 | if (ret) |
|
- | 2948 | DRM_ERROR("Could not find shader to remove.\n"); |
|
- | 2949 | ||
- | 2950 | return ret; |
|
- | 2951 | } |
|
- | 2952 | ||
- | 2953 | /** |
|
- | 2954 | * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER |
|
- | 2955 | * command |
|
- | 2956 | * |
|
- | 2957 | * @dev_priv: Pointer to a device private struct. |
|
- | 2958 | * @sw_context: The software context being used for this batch. |
|
- | 2959 | * @header: Pointer to the command header in the command stream. |
|
- | 2960 | */ |
|
- | 2961 | static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, |
|
- | 2962 | struct vmw_sw_context *sw_context, |
|
- | 2963 | SVGA3dCmdHeader *header) |
|
- | 2964 | { |
|
- | 2965 | struct vmw_resource_val_node *ctx_node; |
|
- | 2966 | struct vmw_resource_val_node *res_node; |
|
- | 2967 | struct vmw_resource *res; |
|
- | 2968 | struct { |
|
- | 2969 | SVGA3dCmdHeader header; |
|
- | 2970 | SVGA3dCmdDXBindShader body; |
|
- | 2971 | } *cmd = container_of(header, typeof(*cmd), header); |
|
- | 2972 | int ret; |
|
- | 2973 | ||
- | 2974 | if (cmd->body.cid != SVGA3D_INVALID_ID) { |
|
- | 2975 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
|
- | 2976 | user_context_converter, |
|
- | 2977 | &cmd->body.cid, &ctx_node); |
|
- | 2978 | if (ret) |
|
- | 2979 | return ret; |
|
- | 2980 | } else { |
|
- | 2981 | ctx_node = sw_context->dx_ctx_node; |
|
- | 2982 | if (!ctx_node) { |
|
- | 2983 | DRM_ERROR("DX Context not set.\n"); |
|
- | 2984 | return -EINVAL; |
|
- | 2985 | } |
|
- | 2986 | } |
|
- | 2987 | ||
- | 2988 | res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res), |
|
- | 2989 | cmd->body.shid, 0); |
|
- | 2990 | if (IS_ERR(res)) { |
|
- | 2991 | DRM_ERROR("Could not find shader to bind.\n"); |
|
- | 2992 | return PTR_ERR(res); |
|
- | 2993 | } |
|
- | 2994 | ||
- | 2995 | ret = vmw_resource_val_add(sw_context, res, &res_node); |
|
- | 2996 | if (ret) { |
|
- | 2997 | DRM_ERROR("Error creating resource validation node.\n"); |
|
- | 2998 | goto out_unref; |
|
- | 2999 | } |
|
- | 3000 | ||
- | 3001 | ||
- | 3002 | ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node, |
|
- | 3003 | &cmd->body.mobid, |
|
- | 3004 | cmd->body.offsetInBytes); |
|
- | 3005 | out_unref: |
|
- | 3006 | vmw_resource_unreference(&res); |
|
- | 3007 | ||
- | 3008 | return ret; |
|
- | 3009 | } |
|
1754 | 3010 | ||
1755 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
3011 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
1756 | struct vmw_sw_context *sw_context, |
3012 | struct vmw_sw_context *sw_context, |
1757 | void *buf, uint32_t *size) |
3013 | void *buf, uint32_t *size) |
1758 | { |
3014 | { |
1759 | uint32_t size_remaining = *size; |
3015 | uint32_t size_remaining = *size; |
1760 | uint32_t cmd_id; |
3016 | uint32_t cmd_id; |
1761 | 3017 | ||
1762 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
3018 | cmd_id = ((uint32_t *)buf)[0]; |
1763 | switch (cmd_id) { |
3019 | switch (cmd_id) { |
1764 | case SVGA_CMD_UPDATE: |
3020 | case SVGA_CMD_UPDATE: |
1765 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); |
3021 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); |
1766 | break; |
3022 | break; |
1767 | case SVGA_CMD_DEFINE_GMRFB: |
3023 | case SVGA_CMD_DEFINE_GMRFB: |
1768 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); |
3024 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); |
1769 | break; |
3025 | break; |
1770 | case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: |
3026 | case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: |
1771 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
3027 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
1772 | break; |
3028 | break; |
1773 | case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: |
3029 | case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: |
1774 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
3030 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
1775 | break; |
3031 | break; |
1776 | default: |
3032 | default: |
1777 | DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id); |
3033 | DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id); |
1778 | return -EINVAL; |
3034 | return -EINVAL; |
1779 | } |
3035 | } |
1780 | 3036 | ||
1781 | if (*size > size_remaining) { |
3037 | if (*size > size_remaining) { |
1782 | DRM_ERROR("Invalid SVGA command (size mismatch):" |
3038 | DRM_ERROR("Invalid SVGA command (size mismatch):" |
1783 | " %u.\n", cmd_id); |
3039 | " %u.\n", cmd_id); |
1784 | return -EINVAL; |
3040 | return -EINVAL; |
1785 | } |
3041 | } |
1786 | 3042 | ||
1787 | if (unlikely(!sw_context->kernel)) { |
3043 | if (unlikely(!sw_context->kernel)) { |
1788 | DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id); |
3044 | DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id); |
1789 | return -EPERM; |
3045 | return -EPERM; |
1790 | } |
3046 | } |
1791 | 3047 | ||
1792 | if (cmd_id == SVGA_CMD_DEFINE_GMRFB) |
3048 | if (cmd_id == SVGA_CMD_DEFINE_GMRFB) |
1793 | return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); |
3049 | return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); |
1794 | 3050 | ||
1795 | return 0; |
3051 | return 0; |
1796 | } |
3052 | } |
1797 | 3053 | ||
1798 | static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { |
3054 | static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { |
1799 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, |
3055 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, |
1800 | false, false, false), |
3056 | false, false, false), |
1801 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, |
3057 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, |
1802 | false, false, false), |
3058 | false, false, false), |
1803 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, |
3059 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, |
1804 | true, false, false), |
3060 | true, false, false), |
1805 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, |
3061 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, |
1806 | true, false, false), |
3062 | true, false, false), |
1807 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, |
3063 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, |
1808 | true, false, false), |
3064 | true, false, false), |
1809 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, |
3065 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, |
1810 | false, false, false), |
3066 | false, false, false), |
1811 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, |
3067 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, |
1812 | false, false, false), |
3068 | false, false, false), |
1813 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, |
3069 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, |
1814 | true, false, false), |
3070 | true, false, false), |
1815 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, |
3071 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, |
1816 | true, false, false), |
3072 | true, false, false), |
1817 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, |
3073 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, |
1818 | true, false, false), |
3074 | true, false, false), |
1819 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, |
3075 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, |
1820 | &vmw_cmd_set_render_target_check, true, false, false), |
3076 | &vmw_cmd_set_render_target_check, true, false, false), |
1821 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, |
3077 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, |
1822 | true, false, false), |
3078 | true, false, false), |
1823 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, |
3079 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, |
1824 | true, false, false), |
3080 | true, false, false), |
1825 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, |
3081 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, |
1826 | true, false, false), |
3082 | true, false, false), |
1827 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, |
3083 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, |
1828 | true, false, false), |
3084 | true, false, false), |
1829 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, |
3085 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, |
1830 | true, false, false), |
3086 | true, false, false), |
1831 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, |
3087 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, |
1832 | true, false, false), |
3088 | true, false, false), |
1833 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, |
3089 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, |
1834 | true, false, false), |
3090 | true, false, false), |
1835 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, |
3091 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, |
1836 | false, false, false), |
3092 | false, false, false), |
1837 | // VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, |
3093 | // VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, |
1838 | // true, false, false), |
3094 | // true, false, false), |
1839 | // VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, |
3095 | // VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, |
1840 | // true, false, false), |
3096 | // true, false, false), |
1841 | // VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, |
3097 | // VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, |
1842 | // true, false, false), |
3098 | // true, false, false), |
1843 | // VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, |
3099 | // VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, |
1844 | // true, false, false), |
3100 | // true, false, false), |
1845 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, |
3101 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, |
1846 | true, false, false), |
3102 | true, false, false), |
1847 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, |
3103 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, |
1848 | true, false, false), |
3104 | true, false, false), |
1849 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, |
3105 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, |
1850 | true, false, false), |
3106 | true, false, false), |
1851 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, |
3107 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, |
1852 | true, false, false), |
3108 | true, false, false), |
1853 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, |
3109 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, |
1854 | true, false, false), |
3110 | true, false, false), |
1855 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, |
3111 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, |
1856 | true, false, false), |
3112 | true, false, false), |
1857 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
3113 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
1858 | &vmw_cmd_blt_surf_screen_check, false, false, false), |
3114 | &vmw_cmd_blt_surf_screen_check, false, false, false), |
1859 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, |
3115 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, |
1860 | false, false, false), |
3116 | false, false, false), |
1861 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, |
3117 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, |
1862 | false, false, false), |
3118 | false, false, false), |
1863 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, |
3119 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, |
1864 | false, false, false), |
3120 | false, false, false), |
1865 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, |
3121 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, |
1866 | false, false, false), |
3122 | false, false, false), |
1867 | VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, |
3123 | VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, |
1868 | false, false, false), |
3124 | false, false, false), |
1869 | VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid, |
3125 | VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid, |
1870 | false, false, false), |
3126 | false, false, false), |
1871 | VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid, |
3127 | VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid, |
1872 | false, false, false), |
3128 | false, false, false), |
1873 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, |
3129 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, |
1874 | false, false, false), |
3130 | false, false, false), |
1875 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid, |
3131 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid, |
1876 | false, false, false), |
3132 | false, false, false), |
1877 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid, |
3133 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid, |
1878 | false, false, false), |
3134 | false, false, false), |
1879 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid, |
3135 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid, |
1880 | false, false, false), |
3136 | false, false, false), |
1881 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid, |
3137 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid, |
1882 | false, false, false), |
3138 | false, false, false), |
1883 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid, |
3139 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid, |
1884 | false, false, false), |
3140 | false, false, false), |
1885 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, |
3141 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, |
1886 | false, false, true), |
3142 | false, false, true), |
1887 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, |
3143 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, |
1888 | false, false, true), |
3144 | false, false, true), |
1889 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, |
3145 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, |
1890 | false, false, true), |
3146 | false, false, true), |
1891 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, |
3147 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, |
1892 | false, false, true), |
3148 | false, false, true), |
1893 | VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid, |
3149 | VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid, |
1894 | false, false, true), |
3150 | false, false, true), |
1895 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, |
3151 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, |
1896 | false, false, true), |
3152 | false, false, true), |
1897 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, |
3153 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, |
1898 | false, false, true), |
3154 | false, false, true), |
1899 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, |
3155 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, |
1900 | false, false, true), |
3156 | false, false, true), |
1901 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, |
3157 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, |
1902 | true, false, true), |
3158 | true, false, true), |
1903 | VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, |
3159 | VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, |
1904 | false, false, true), |
3160 | false, false, true), |
1905 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, |
3161 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, |
1906 | true, false, true), |
3162 | true, false, true), |
1907 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, |
3163 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, |
1908 | &vmw_cmd_update_gb_surface, true, false, true), |
3164 | &vmw_cmd_update_gb_surface, true, false, true), |
1909 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, |
3165 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, |
1910 | &vmw_cmd_readback_gb_image, true, false, true), |
3166 | &vmw_cmd_readback_gb_image, true, false, true), |
1911 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, |
3167 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, |
1912 | &vmw_cmd_readback_gb_surface, true, false, true), |
3168 | &vmw_cmd_readback_gb_surface, true, false, true), |
1913 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, |
3169 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, |
1914 | &vmw_cmd_invalidate_gb_image, true, false, true), |
3170 | &vmw_cmd_invalidate_gb_image, true, false, true), |
1915 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, |
3171 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, |
1916 | &vmw_cmd_invalidate_gb_surface, true, false, true), |
3172 | &vmw_cmd_invalidate_gb_surface, true, false, true), |
1917 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, |
3173 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, |
1918 | false, false, true), |
3174 | false, false, true), |
1919 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, |
3175 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, |
1920 | false, false, true), |
3176 | false, false, true), |
1921 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, |
3177 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, |
1922 | false, false, true), |
3178 | false, false, true), |
1923 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, |
3179 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, |
1924 | false, false, true), |
3180 | false, false, true), |
1925 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, |
3181 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, |
1926 | false, false, true), |
3182 | false, false, true), |
1927 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, |
3183 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, |
1928 | false, false, true), |
3184 | false, false, true), |
1929 | // VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, |
3185 | // VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, |
1930 | // true, false, true), |
3186 | // true, false, true), |
1931 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, |
3187 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, |
1932 | false, false, true), |
3188 | false, false, true), |
1933 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, |
3189 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, |
1934 | false, false, false), |
3190 | false, false, false), |
1935 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, |
3191 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, |
1936 | true, false, true), |
3192 | true, false, true), |
1937 | VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, |
3193 | VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, |
1938 | true, false, true), |
3194 | true, false, true), |
1939 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, |
3195 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, |
1940 | true, false, true), |
3196 | true, false, true), |
1941 | VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, |
3197 | VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, |
1942 | true, false, true), |
3198 | true, false, true), |
1943 | VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, |
3199 | VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, |
1944 | false, false, true), |
3200 | false, false, true), |
1945 | VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, |
3201 | VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, |
1946 | false, false, true), |
3202 | false, false, true), |
1947 | VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, |
3203 | VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, |
1948 | false, false, true), |
3204 | false, false, true), |
1949 | VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, |
3205 | VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, |
1950 | false, false, true), |
3206 | false, false, true), |
1951 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, |
3207 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, |
1952 | false, false, true), |
3208 | false, false, true), |
1953 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, |
3209 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, |
1954 | false, false, true), |
3210 | false, false, true), |
1955 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, |
3211 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, |
1956 | false, false, true), |
3212 | false, false, true), |
1957 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, |
3213 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, |
1958 | false, false, true), |
3214 | false, false, true), |
1959 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
3215 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
1960 | false, false, true), |
3216 | false, false, true), |
1961 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
3217 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
1962 | false, false, true), |
3218 | false, false, true), |
1963 | VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, |
3219 | VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, |
1964 | true, false, true) |
3220 | true, false, true) |
1965 | }; |
3221 | }; |
1966 | 3222 | ||
1967 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
3223 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
1968 | struct vmw_sw_context *sw_context, |
3224 | struct vmw_sw_context *sw_context, |
1969 | void *buf, uint32_t *size) |
3225 | void *buf, uint32_t *size) |
1970 | { |
3226 | { |
1971 | uint32_t cmd_id; |
3227 | uint32_t cmd_id; |
1972 | uint32_t size_remaining = *size; |
3228 | uint32_t size_remaining = *size; |
1973 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; |
3229 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; |
1974 | int ret; |
3230 | int ret; |
1975 | const struct vmw_cmd_entry *entry; |
3231 | const struct vmw_cmd_entry *entry; |
1976 | bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; |
3232 | bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; |
1977 | 3233 | ||
1978 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
3234 | cmd_id = ((uint32_t *)buf)[0]; |
1979 | /* Handle any none 3D commands */ |
3235 | /* Handle any none 3D commands */ |
1980 | if (unlikely(cmd_id < SVGA_CMD_MAX)) |
3236 | if (unlikely(cmd_id < SVGA_CMD_MAX)) |
1981 | return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); |
3237 | return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); |
1982 | 3238 | ||
1983 | 3239 | ||
1984 | cmd_id = le32_to_cpu(header->id); |
3240 | cmd_id = header->id; |
1985 | *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); |
3241 | *size = header->size + sizeof(SVGA3dCmdHeader); |
1986 | 3242 | ||
1987 | cmd_id -= SVGA_3D_CMD_BASE; |
3243 | cmd_id -= SVGA_3D_CMD_BASE; |
1988 | if (unlikely(*size > size_remaining)) |
3244 | if (unlikely(*size > size_remaining)) |
1989 | goto out_invalid; |
3245 | goto out_invalid; |
1990 | 3246 | ||
1991 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) |
3247 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) |
1992 | goto out_invalid; |
3248 | goto out_invalid; |
1993 | 3249 | ||
1994 | entry = &vmw_cmd_entries[cmd_id]; |
3250 | entry = &vmw_cmd_entries[cmd_id]; |
1995 | if (unlikely(!entry->func)) |
3251 | if (unlikely(!entry->func)) |
1996 | goto out_invalid; |
3252 | goto out_invalid; |
1997 | 3253 | ||
1998 | if (unlikely(!entry->user_allow && !sw_context->kernel)) |
3254 | if (unlikely(!entry->user_allow && !sw_context->kernel)) |
1999 | goto out_privileged; |
3255 | goto out_privileged; |
2000 | 3256 | ||
2001 | if (unlikely(entry->gb_disable && gb)) |
3257 | if (unlikely(entry->gb_disable && gb)) |
2002 | goto out_old; |
3258 | goto out_old; |
2003 | 3259 | ||
2004 | if (unlikely(entry->gb_enable && !gb)) |
3260 | if (unlikely(entry->gb_enable && !gb)) |
2005 | goto out_new; |
3261 | goto out_new; |
2006 | 3262 | ||
2007 | ret = entry->func(dev_priv, sw_context, header); |
3263 | ret = entry->func(dev_priv, sw_context, header); |
2008 | if (unlikely(ret != 0)) |
3264 | if (unlikely(ret != 0)) |
2009 | goto out_invalid; |
3265 | goto out_invalid; |
2010 | 3266 | ||
2011 | return 0; |
3267 | return 0; |
2012 | out_invalid: |
3268 | out_invalid: |
2013 | DRM_ERROR("Invalid SVGA3D command: %d\n", |
3269 | DRM_ERROR("Invalid SVGA3D command: %d\n", |
2014 | cmd_id + SVGA_3D_CMD_BASE); |
3270 | cmd_id + SVGA_3D_CMD_BASE); |
2015 | return -EINVAL; |
3271 | return -EINVAL; |
2016 | out_privileged: |
3272 | out_privileged: |
2017 | DRM_ERROR("Privileged SVGA3D command: %d\n", |
3273 | DRM_ERROR("Privileged SVGA3D command: %d\n", |
2018 | cmd_id + SVGA_3D_CMD_BASE); |
3274 | cmd_id + SVGA_3D_CMD_BASE); |
2019 | return -EPERM; |
3275 | return -EPERM; |
2020 | out_old: |
3276 | out_old: |
2021 | DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n", |
3277 | DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n", |
2022 | cmd_id + SVGA_3D_CMD_BASE); |
3278 | cmd_id + SVGA_3D_CMD_BASE); |
2023 | return -EINVAL; |
3279 | return -EINVAL; |
2024 | out_new: |
3280 | out_new: |
2025 | DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n", |
3281 | DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n", |
2026 | cmd_id + SVGA_3D_CMD_BASE); |
3282 | cmd_id + SVGA_3D_CMD_BASE); |
2027 | return -EINVAL; |
3283 | return -EINVAL; |
2028 | } |
3284 | } |
2029 | 3285 | ||
2030 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, |
3286 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, |
2031 | struct vmw_sw_context *sw_context, |
3287 | struct vmw_sw_context *sw_context, |
2032 | void *buf, |
3288 | void *buf, |
2033 | uint32_t size) |
3289 | uint32_t size) |
2034 | { |
3290 | { |
2035 | int32_t cur_size = size; |
3291 | int32_t cur_size = size; |
2036 | int ret; |
3292 | int ret; |
2037 | 3293 | ||
2038 | sw_context->buf_start = buf; |
3294 | sw_context->buf_start = buf; |
2039 | 3295 | ||
2040 | while (cur_size > 0) { |
3296 | while (cur_size > 0) { |
2041 | size = cur_size; |
3297 | size = cur_size; |
2042 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); |
3298 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); |
2043 | if (unlikely(ret != 0)) |
3299 | if (unlikely(ret != 0)) |
2044 | return ret; |
3300 | return ret; |
2045 | buf = (void *)((unsigned long) buf + size); |
3301 | buf = (void *)((unsigned long) buf + size); |
2046 | cur_size -= size; |
3302 | cur_size -= size; |
2047 | } |
3303 | } |
2048 | 3304 | ||
2049 | if (unlikely(cur_size != 0)) { |
3305 | if (unlikely(cur_size != 0)) { |
2050 | DRM_ERROR("Command verifier out of sync.\n"); |
3306 | DRM_ERROR("Command verifier out of sync.\n"); |
2051 | return -EINVAL; |
3307 | return -EINVAL; |
2052 | } |
3308 | } |
2053 | 3309 | ||
2054 | return 0; |
3310 | return 0; |
2055 | } |
3311 | } |
2056 | 3312 | ||
2057 | static void vmw_free_relocations(struct vmw_sw_context *sw_context) |
3313 | static void vmw_free_relocations(struct vmw_sw_context *sw_context) |
2058 | { |
3314 | { |
2059 | sw_context->cur_reloc = 0; |
3315 | sw_context->cur_reloc = 0; |
2060 | } |
3316 | } |
2061 | 3317 | ||
2062 | static void vmw_apply_relocations(struct vmw_sw_context *sw_context) |
3318 | static void vmw_apply_relocations(struct vmw_sw_context *sw_context) |
2063 | { |
3319 | { |
2064 | uint32_t i; |
3320 | uint32_t i; |
2065 | struct vmw_relocation *reloc; |
3321 | struct vmw_relocation *reloc; |
2066 | struct ttm_validate_buffer *validate; |
3322 | struct ttm_validate_buffer *validate; |
2067 | struct ttm_buffer_object *bo; |
3323 | struct ttm_buffer_object *bo; |
2068 | 3324 | ||
2069 | for (i = 0; i < sw_context->cur_reloc; ++i) { |
3325 | for (i = 0; i < sw_context->cur_reloc; ++i) { |
2070 | reloc = &sw_context->relocs[i]; |
3326 | reloc = &sw_context->relocs[i]; |
2071 | validate = &sw_context->val_bufs[reloc->index].base; |
3327 | validate = &sw_context->val_bufs[reloc->index].base; |
2072 | bo = validate->bo; |
3328 | bo = validate->bo; |
2073 | switch (bo->mem.mem_type) { |
3329 | switch (bo->mem.mem_type) { |
2074 | case TTM_PL_VRAM: |
3330 | case TTM_PL_VRAM: |
2075 | reloc->location->offset += bo->offset; |
3331 | reloc->location->offset += bo->offset; |
2076 | reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; |
3332 | reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; |
2077 | break; |
3333 | break; |
2078 | case VMW_PL_GMR: |
3334 | case VMW_PL_GMR: |
2079 | reloc->location->gmrId = bo->mem.start; |
3335 | reloc->location->gmrId = bo->mem.start; |
2080 | break; |
3336 | break; |
2081 | case VMW_PL_MOB: |
3337 | case VMW_PL_MOB: |
2082 | *reloc->mob_loc = bo->mem.start; |
3338 | *reloc->mob_loc = bo->mem.start; |
2083 | break; |
3339 | break; |
2084 | default: |
3340 | default: |
2085 | BUG(); |
3341 | BUG(); |
2086 | } |
3342 | } |
2087 | } |
3343 | } |
2088 | vmw_free_relocations(sw_context); |
3344 | vmw_free_relocations(sw_context); |
2089 | } |
3345 | } |
2090 | 3346 | ||
2091 | /** |
3347 | /** |
2092 | * vmw_resource_list_unrefererence - Free up a resource list and unreference |
3348 | * vmw_resource_list_unrefererence - Free up a resource list and unreference |
2093 | * all resources referenced by it. |
3349 | * all resources referenced by it. |
2094 | * |
3350 | * |
2095 | * @list: The resource list. |
3351 | * @list: The resource list. |
2096 | */ |
3352 | */ |
2097 | static void vmw_resource_list_unreference(struct list_head *list) |
3353 | static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context, |
- | 3354 | struct list_head *list) |
|
2098 | { |
3355 | { |
2099 | struct vmw_resource_val_node *val, *val_next; |
3356 | struct vmw_resource_val_node *val, *val_next; |
2100 | 3357 | ||
2101 | /* |
3358 | /* |
2102 | * Drop references to resources held during command submission. |
3359 | * Drop references to resources held during command submission. |
2103 | */ |
3360 | */ |
2104 | 3361 | ||
2105 | list_for_each_entry_safe(val, val_next, list, head) { |
3362 | list_for_each_entry_safe(val, val_next, list, head) { |
2106 | list_del_init(&val->head); |
3363 | list_del_init(&val->head); |
2107 | vmw_resource_unreference(&val->res); |
3364 | vmw_resource_unreference(&val->res); |
- | 3365 | ||
2108 | if (unlikely(val->staged_bindings)) |
3366 | if (val->staged_bindings) { |
- | 3367 | if (val->staged_bindings != sw_context->staged_bindings) |
|
- | 3368 | vmw_binding_state_free(val->staged_bindings); |
|
- | 3369 | else |
|
- | 3370 | sw_context->staged_bindings_inuse = false; |
|
2109 | kfree(val->staged_bindings); |
3371 | val->staged_bindings = NULL; |
- | 3372 | } |
|
- | 3373 | ||
2110 | kfree(val); |
3374 | kfree(val); |
2111 | } |
3375 | } |
2112 | } |
3376 | } |
2113 | 3377 | ||
2114 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) |
3378 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) |
2115 | { |
3379 | { |
2116 | struct vmw_validate_buffer *entry, *next; |
3380 | struct vmw_validate_buffer *entry, *next; |
2117 | struct vmw_resource_val_node *val; |
3381 | struct vmw_resource_val_node *val; |
2118 | 3382 | ||
2119 | /* |
3383 | /* |
2120 | * Drop references to DMA buffers held during command submission. |
3384 | * Drop references to DMA buffers held during command submission. |
2121 | */ |
3385 | */ |
2122 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, |
3386 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, |
2123 | base.head) { |
3387 | base.head) { |
2124 | list_del(&entry->base.head); |
3388 | list_del(&entry->base.head); |
2125 | ttm_bo_unref(&entry->base.bo); |
3389 | ttm_bo_unref(&entry->base.bo); |
2126 | (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash); |
3390 | (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash); |
2127 | sw_context->cur_val_buf--; |
3391 | sw_context->cur_val_buf--; |
2128 | } |
3392 | } |
2129 | BUG_ON(sw_context->cur_val_buf != 0); |
3393 | BUG_ON(sw_context->cur_val_buf != 0); |
2130 | 3394 | ||
2131 | list_for_each_entry(val, &sw_context->resource_list, head) |
3395 | list_for_each_entry(val, &sw_context->resource_list, head) |
2132 | (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); |
3396 | (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); |
2133 | } |
3397 | } |
2134 | 3398 | ||
2135 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
3399 | int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
- | 3400 | struct ttm_buffer_object *bo, |
|
2136 | struct ttm_buffer_object *bo, |
3401 | bool interruptible, |
2137 | bool validate_as_mob) |
3402 | bool validate_as_mob) |
- | 3403 | { |
|
- | 3404 | struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer, |
|
2138 | { |
3405 | base); |
2139 | int ret; |
- | |
2140 | - | ||
2141 | - | ||
2142 | /* |
- | |
2143 | * Don't validate pinned buffers. |
- | |
2144 | */ |
3406 | int ret; |
2145 | - | ||
2146 | if (bo == dev_priv->pinned_bo || |
- | |
2147 | (bo == dev_priv->dummy_query_bo && |
3407 | |
2148 | dev_priv->dummy_query_bo_pinned)) |
3408 | if (vbo->pin_count > 0) |
2149 | return 0; |
3409 | return 0; |
2150 | 3410 | ||
2151 | if (validate_as_mob) |
3411 | if (validate_as_mob) |
2152 | return ttm_bo_validate(bo, &vmw_mob_placement, true, false); |
3412 | return ttm_bo_validate(bo, &vmw_mob_placement, interruptible, |
- | 3413 | false); |
|
2153 | 3414 | ||
2154 | /** |
3415 | /** |
2155 | * Put BO in VRAM if there is space, otherwise as a GMR. |
3416 | * Put BO in VRAM if there is space, otherwise as a GMR. |
2156 | * If there is no space in VRAM and GMR ids are all used up, |
3417 | * If there is no space in VRAM and GMR ids are all used up, |
2157 | * start evicting GMRs to make room. If the DMA buffer can't be |
3418 | * start evicting GMRs to make room. If the DMA buffer can't be |
2158 | * used as a GMR, this will return -ENOMEM. |
3419 | * used as a GMR, this will return -ENOMEM. |
2159 | */ |
3420 | */ |
2160 | 3421 | ||
- | 3422 | ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible, |
|
2161 | ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false); |
3423 | false); |
2162 | if (likely(ret == 0 || ret == -ERESTARTSYS)) |
3424 | if (likely(ret == 0 || ret == -ERESTARTSYS)) |
2163 | return ret; |
3425 | return ret; |
2164 | 3426 | ||
2165 | /** |
3427 | /** |
2166 | * If that failed, try VRAM again, this time evicting |
3428 | * If that failed, try VRAM again, this time evicting |
2167 | * previous contents. |
3429 | * previous contents. |
2168 | */ |
3430 | */ |
2169 | - | ||
2170 | DRM_INFO("Falling through to VRAM.\n"); |
3431 | |
2171 | ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); |
3432 | ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false); |
2172 | return ret; |
3433 | return ret; |
2173 | } |
3434 | } |
2174 | 3435 | ||
2175 | static int vmw_validate_buffers(struct vmw_private *dev_priv, |
3436 | static int vmw_validate_buffers(struct vmw_private *dev_priv, |
2176 | struct vmw_sw_context *sw_context) |
3437 | struct vmw_sw_context *sw_context) |
2177 | { |
3438 | { |
2178 | struct vmw_validate_buffer *entry; |
3439 | struct vmw_validate_buffer *entry; |
2179 | int ret; |
3440 | int ret; |
2180 | 3441 | ||
2181 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { |
3442 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { |
2182 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, |
3443 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, |
- | 3444 | true, |
|
2183 | entry->validate_as_mob); |
3445 | entry->validate_as_mob); |
2184 | if (unlikely(ret != 0)) |
3446 | if (unlikely(ret != 0)) |
2185 | return ret; |
3447 | return ret; |
2186 | } |
3448 | } |
2187 | return 0; |
3449 | return 0; |
2188 | } |
3450 | } |
2189 | 3451 | ||
2190 | static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, |
3452 | static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, |
2191 | uint32_t size) |
3453 | uint32_t size) |
2192 | { |
3454 | { |
2193 | if (likely(sw_context->cmd_bounce_size >= size)) |
3455 | if (likely(sw_context->cmd_bounce_size >= size)) |
2194 | return 0; |
3456 | return 0; |
2195 | 3457 | ||
2196 | if (sw_context->cmd_bounce_size == 0) |
3458 | if (sw_context->cmd_bounce_size == 0) |
2197 | sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; |
3459 | sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; |
2198 | 3460 | ||
2199 | while (sw_context->cmd_bounce_size < size) { |
3461 | while (sw_context->cmd_bounce_size < size) { |
2200 | sw_context->cmd_bounce_size = |
3462 | sw_context->cmd_bounce_size = |
2201 | PAGE_ALIGN(sw_context->cmd_bounce_size + |
3463 | PAGE_ALIGN(sw_context->cmd_bounce_size + |
2202 | (sw_context->cmd_bounce_size >> 1)); |
3464 | (sw_context->cmd_bounce_size >> 1)); |
2203 | } |
3465 | } |
2204 | 3466 | ||
2205 | if (sw_context->cmd_bounce != NULL) |
3467 | if (sw_context->cmd_bounce != NULL) |
2206 | vfree(sw_context->cmd_bounce); |
3468 | vfree(sw_context->cmd_bounce); |
2207 | 3469 | ||
2208 | sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); |
3470 | sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); |
2209 | 3471 | ||
2210 | if (sw_context->cmd_bounce == NULL) { |
3472 | if (sw_context->cmd_bounce == NULL) { |
2211 | DRM_ERROR("Failed to allocate command bounce buffer.\n"); |
3473 | DRM_ERROR("Failed to allocate command bounce buffer.\n"); |
2212 | sw_context->cmd_bounce_size = 0; |
3474 | sw_context->cmd_bounce_size = 0; |
2213 | return -ENOMEM; |
3475 | return -ENOMEM; |
2214 | } |
3476 | } |
2215 | 3477 | ||
2216 | return 0; |
3478 | return 0; |
2217 | } |
3479 | } |
2218 | 3480 | ||
2219 | /** |
3481 | /** |
2220 | * vmw_execbuf_fence_commands - create and submit a command stream fence |
3482 | * vmw_execbuf_fence_commands - create and submit a command stream fence |
2221 | * |
3483 | * |
2222 | * Creates a fence object and submits a command stream marker. |
3484 | * Creates a fence object and submits a command stream marker. |
2223 | * If this fails for some reason, We sync the fifo and return NULL. |
3485 | * If this fails for some reason, We sync the fifo and return NULL. |
2224 | * It is then safe to fence buffers with a NULL pointer. |
3486 | * It is then safe to fence buffers with a NULL pointer. |
2225 | * |
3487 | * |
2226 | * If @p_handle is not NULL @file_priv must also not be NULL. Creates |
3488 | * If @p_handle is not NULL @file_priv must also not be NULL. Creates |
2227 | * a userspace handle if @p_handle is not NULL, otherwise not. |
3489 | * a userspace handle if @p_handle is not NULL, otherwise not. |
2228 | */ |
3490 | */ |
2229 | 3491 | ||
2230 | int vmw_execbuf_fence_commands(struct drm_file *file_priv, |
3492 | int vmw_execbuf_fence_commands(struct drm_file *file_priv, |
2231 | struct vmw_private *dev_priv, |
3493 | struct vmw_private *dev_priv, |
2232 | struct vmw_fence_obj **p_fence, |
3494 | struct vmw_fence_obj **p_fence, |
2233 | uint32_t *p_handle) |
3495 | uint32_t *p_handle) |
2234 | { |
3496 | { |
2235 | uint32_t sequence; |
3497 | uint32_t sequence; |
2236 | int ret; |
3498 | int ret; |
2237 | bool synced = false; |
3499 | bool synced = false; |
2238 | 3500 | ||
2239 | /* p_handle implies file_priv. */ |
3501 | /* p_handle implies file_priv. */ |
2240 | BUG_ON(p_handle != NULL && file_priv == NULL); |
3502 | BUG_ON(p_handle != NULL && file_priv == NULL); |
2241 | 3503 | ||
2242 | ret = vmw_fifo_send_fence(dev_priv, &sequence); |
3504 | ret = vmw_fifo_send_fence(dev_priv, &sequence); |
2243 | if (unlikely(ret != 0)) { |
3505 | if (unlikely(ret != 0)) { |
2244 | DRM_ERROR("Fence submission error. Syncing.\n"); |
3506 | DRM_ERROR("Fence submission error. Syncing.\n"); |
2245 | synced = true; |
3507 | synced = true; |
2246 | } |
3508 | } |
2247 | 3509 | ||
2248 | if (p_handle != NULL) |
3510 | if (p_handle != NULL) |
2249 | ret = vmw_user_fence_create(file_priv, dev_priv->fman, |
3511 | ret = vmw_user_fence_create(file_priv, dev_priv->fman, |
2250 | sequence, |
- | |
2251 | DRM_VMW_FENCE_FLAG_EXEC, |
- | |
2252 | p_fence, p_handle); |
3512 | sequence, p_fence, p_handle); |
2253 | else |
3513 | else |
2254 | ret = vmw_fence_create(dev_priv->fman, sequence, |
3514 | ret = vmw_fence_create(dev_priv->fman, sequence, p_fence); |
2255 | DRM_VMW_FENCE_FLAG_EXEC, |
- | |
2256 | p_fence); |
- | |
2257 | 3515 | ||
2258 | if (unlikely(ret != 0 && !synced)) { |
3516 | if (unlikely(ret != 0 && !synced)) { |
2259 | (void) vmw_fallback_wait(dev_priv, false, false, |
3517 | (void) vmw_fallback_wait(dev_priv, false, false, |
2260 | sequence, false, |
3518 | sequence, false, |
2261 | VMW_FENCE_WAIT_TIMEOUT); |
3519 | VMW_FENCE_WAIT_TIMEOUT); |
2262 | *p_fence = NULL; |
3520 | *p_fence = NULL; |
2263 | } |
3521 | } |
2264 | 3522 | ||
2265 | return 0; |
3523 | return 0; |
2266 | } |
3524 | } |
2267 | 3525 | ||
2268 | /** |
3526 | /** |
2269 | * vmw_execbuf_copy_fence_user - copy fence object information to |
3527 | * vmw_execbuf_copy_fence_user - copy fence object information to |
2270 | * user-space. |
3528 | * user-space. |
2271 | * |
3529 | * |
2272 | * @dev_priv: Pointer to a vmw_private struct. |
3530 | * @dev_priv: Pointer to a vmw_private struct. |
2273 | * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. |
3531 | * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. |
2274 | * @ret: Return value from fence object creation. |
3532 | * @ret: Return value from fence object creation. |
2275 | * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to |
3533 | * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to |
2276 | * which the information should be copied. |
3534 | * which the information should be copied. |
2277 | * @fence: Pointer to the fenc object. |
3535 | * @fence: Pointer to the fenc object. |
2278 | * @fence_handle: User-space fence handle. |
3536 | * @fence_handle: User-space fence handle. |
2279 | * |
3537 | * |
2280 | * This function copies fence information to user-space. If copying fails, |
3538 | * This function copies fence information to user-space. If copying fails, |
2281 | * The user-space struct drm_vmw_fence_rep::error member is hopefully |
3539 | * The user-space struct drm_vmw_fence_rep::error member is hopefully |
2282 | * left untouched, and if it's preloaded with an -EFAULT by user-space, |
3540 | * left untouched, and if it's preloaded with an -EFAULT by user-space, |
2283 | * the error will hopefully be detected. |
3541 | * the error will hopefully be detected. |
2284 | * Also if copying fails, user-space will be unable to signal the fence |
3542 | * Also if copying fails, user-space will be unable to signal the fence |
2285 | * object so we wait for it immediately, and then unreference the |
3543 | * object so we wait for it immediately, and then unreference the |
2286 | * user-space reference. |
3544 | * user-space reference. |
2287 | */ |
3545 | */ |
2288 | void |
3546 | void |
2289 | vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, |
3547 | vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, |
2290 | struct vmw_fpriv *vmw_fp, |
3548 | struct vmw_fpriv *vmw_fp, |
2291 | int ret, |
3549 | int ret, |
2292 | struct drm_vmw_fence_rep __user *user_fence_rep, |
3550 | struct drm_vmw_fence_rep __user *user_fence_rep, |
2293 | struct vmw_fence_obj *fence, |
3551 | struct vmw_fence_obj *fence, |
2294 | uint32_t fence_handle) |
3552 | uint32_t fence_handle) |
2295 | { |
3553 | { |
2296 | struct drm_vmw_fence_rep fence_rep; |
3554 | struct drm_vmw_fence_rep fence_rep; |
2297 | 3555 | ||
2298 | if (user_fence_rep == NULL) |
3556 | if (user_fence_rep == NULL) |
2299 | return; |
3557 | return; |
2300 | 3558 | ||
2301 | memset(&fence_rep, 0, sizeof(fence_rep)); |
3559 | memset(&fence_rep, 0, sizeof(fence_rep)); |
2302 | 3560 | ||
2303 | fence_rep.error = ret; |
3561 | fence_rep.error = ret; |
2304 | if (ret == 0) { |
3562 | if (ret == 0) { |
2305 | BUG_ON(fence == NULL); |
3563 | BUG_ON(fence == NULL); |
2306 | 3564 | ||
2307 | fence_rep.handle = fence_handle; |
3565 | fence_rep.handle = fence_handle; |
2308 | fence_rep.seqno = fence->seqno; |
3566 | fence_rep.seqno = fence->base.seqno; |
2309 | vmw_update_seqno(dev_priv, &dev_priv->fifo); |
3567 | vmw_update_seqno(dev_priv, &dev_priv->fifo); |
2310 | fence_rep.passed_seqno = dev_priv->last_read_seqno; |
3568 | fence_rep.passed_seqno = dev_priv->last_read_seqno; |
2311 | } |
3569 | } |
2312 | 3570 | ||
2313 | /* |
3571 | /* |
2314 | * copy_to_user errors will be detected by user space not |
3572 | * copy_to_user errors will be detected by user space not |
2315 | * seeing fence_rep::error filled in. Typically |
3573 | * seeing fence_rep::error filled in. Typically |
2316 | * user-space would have pre-set that member to -EFAULT. |
3574 | * user-space would have pre-set that member to -EFAULT. |
2317 | */ |
3575 | */ |
2318 | // ret = copy_to_user(user_fence_rep, &fence_rep, |
3576 | ret = copy_to_user(user_fence_rep, &fence_rep, |
2319 | // sizeof(fence_rep)); |
3577 | sizeof(fence_rep)); |
2320 | 3578 | ||
2321 | /* |
3579 | /* |
2322 | * User-space lost the fence object. We need to sync |
3580 | * User-space lost the fence object. We need to sync |
2323 | * and unreference the handle. |
3581 | * and unreference the handle. |
2324 | */ |
3582 | */ |
2325 | if (unlikely(ret != 0) && (fence_rep.error == 0)) { |
3583 | if (unlikely(ret != 0) && (fence_rep.error == 0)) { |
2326 | ttm_ref_object_base_unref(vmw_fp->tfile, |
3584 | ttm_ref_object_base_unref(vmw_fp->tfile, |
2327 | fence_handle, TTM_REF_USAGE); |
3585 | fence_handle, TTM_REF_USAGE); |
2328 | DRM_ERROR("Fence copy error. Syncing.\n"); |
3586 | DRM_ERROR("Fence copy error. Syncing.\n"); |
2329 | (void) vmw_fence_obj_wait(fence, fence->signal_mask, |
3587 | (void) vmw_fence_obj_wait(fence, false, false, |
2330 | false, false, |
- | |
2331 | VMW_FENCE_WAIT_TIMEOUT); |
3588 | VMW_FENCE_WAIT_TIMEOUT); |
2332 | } |
3589 | } |
2333 | } |
3590 | } |
- | 3591 | ||
- | 3592 | /** |
|
- | 3593 | * vmw_execbuf_submit_fifo - Patch a command batch and submit it using |
|
- | 3594 | * the fifo. |
|
- | 3595 | * |
|
- | 3596 | * @dev_priv: Pointer to a device private structure. |
|
- | 3597 | * @kernel_commands: Pointer to the unpatched command batch. |
|
- | 3598 | * @command_size: Size of the unpatched command batch. |
|
- | 3599 | * @sw_context: Structure holding the relocation lists. |
|
- | 3600 | * |
|
- | 3601 | * Side effects: If this function returns 0, then the command batch |
|
- | 3602 | * pointed to by @kernel_commands will have been modified. |
|
- | 3603 | */ |
|
- | 3604 | static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, |
|
- | 3605 | void *kernel_commands, |
|
- | 3606 | u32 command_size, |
|
- | 3607 | struct vmw_sw_context *sw_context) |
|
- | 3608 | { |
|
- | 3609 | void *cmd; |
|
- | 3610 | ||
- | 3611 | if (sw_context->dx_ctx_node) |
|
- | 3612 | cmd = vmw_fifo_reserve_dx(dev_priv, command_size, |
|
- | 3613 | sw_context->dx_ctx_node->res->id); |
|
- | 3614 | else |
|
- | 3615 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
|
- | 3616 | if (!cmd) { |
|
- | 3617 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
|
- | 3618 | return -ENOMEM; |
|
- | 3619 | } |
|
- | 3620 | ||
- | 3621 | vmw_apply_relocations(sw_context); |
|
- | 3622 | memcpy(cmd, kernel_commands, command_size); |
|
- | 3623 | vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); |
|
- | 3624 | vmw_resource_relocations_free(&sw_context->res_relocations); |
|
- | 3625 | vmw_fifo_commit(dev_priv, command_size); |
|
- | 3626 | ||
- | 3627 | return 0; |
|
- | 3628 | } |
|
- | 3629 | ||
- | 3630 | /** |
|
- | 3631 | * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using |
|
- | 3632 | * the command buffer manager. |
|
- | 3633 | * |
|
- | 3634 | * @dev_priv: Pointer to a device private structure. |
|
- | 3635 | * @header: Opaque handle to the command buffer allocation. |
|
- | 3636 | * @command_size: Size of the unpatched command batch. |
|
- | 3637 | * @sw_context: Structure holding the relocation lists. |
|
- | 3638 | * |
|
- | 3639 | * Side effects: If this function returns 0, then the command buffer |
|
- | 3640 | * represented by @header will have been modified. |
|
- | 3641 | */ |
|
- | 3642 | static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, |
|
- | 3643 | struct vmw_cmdbuf_header *header, |
|
- | 3644 | u32 command_size, |
|
- | 3645 | struct vmw_sw_context *sw_context) |
|
- | 3646 | { |
|
- | 3647 | u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id : |
|
- | 3648 | SVGA3D_INVALID_ID); |
|
- | 3649 | void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, |
|
- | 3650 | id, false, header); |
|
- | 3651 | ||
- | 3652 | vmw_apply_relocations(sw_context); |
|
- | 3653 | vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); |
|
- | 3654 | vmw_resource_relocations_free(&sw_context->res_relocations); |
|
- | 3655 | vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false); |
|
- | 3656 | ||
- | 3657 | return 0; |
|
- | 3658 | } |
|
- | 3659 | ||
- | 3660 | /** |
|
- | 3661 | * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for |
|
- | 3662 | * submission using a command buffer. |
|
- | 3663 | * |
|
- | 3664 | * @dev_priv: Pointer to a device private structure. |
|
- | 3665 | * @user_commands: User-space pointer to the commands to be submitted. |
|
- | 3666 | * @command_size: Size of the unpatched command batch. |
|
- | 3667 | * @header: Out parameter returning the opaque pointer to the command buffer. |
|
- | 3668 | * |
|
- | 3669 | * This function checks whether we can use the command buffer manager for |
|
- | 3670 | * submission and if so, creates a command buffer of suitable size and |
|
- | 3671 | * copies the user data into that buffer. |
|
- | 3672 | * |
|
- | 3673 | * On successful return, the function returns a pointer to the data in the |
|
- | 3674 | * command buffer and *@header is set to non-NULL. |
|
- | 3675 | * If command buffers could not be used, the function will return the value |
|
- | 3676 | * of @kernel_commands on function call. That value may be NULL. In that case, |
|
- | 3677 | * the value of *@header will be set to NULL. |
|
- | 3678 | * If an error is encountered, the function will return a pointer error value. |
|
- | 3679 | * If the function is interrupted by a signal while sleeping, it will return |
|
- | 3680 | * -ERESTARTSYS casted to a pointer error value. |
|
- | 3681 | */ |
|
- | 3682 | static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, |
|
- | 3683 | void __user *user_commands, |
|
- | 3684 | void *kernel_commands, |
|
- | 3685 | u32 command_size, |
|
- | 3686 | struct vmw_cmdbuf_header **header) |
|
- | 3687 | { |
|
- | 3688 | size_t cmdbuf_size; |
|
- | 3689 | int ret; |
|
- | 3690 | ||
- | 3691 | *header = NULL; |
|
- | 3692 | if (!dev_priv->cman || kernel_commands) |
|
- | 3693 | return kernel_commands; |
|
- | 3694 | ||
- | 3695 | if (command_size > SVGA_CB_MAX_SIZE) { |
|
- | 3696 | DRM_ERROR("Command buffer is too large.\n"); |
|
- | 3697 | return ERR_PTR(-EINVAL); |
|
- | 3698 | } |
|
- | 3699 | ||
- | 3700 | /* If possible, add a little space for fencing. */ |
|
- | 3701 | cmdbuf_size = command_size + 512; |
|
- | 3702 | cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); |
|
- | 3703 | kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, |
|
- | 3704 | true, header); |
|
- | 3705 | if (IS_ERR(kernel_commands)) |
|
- | 3706 | return kernel_commands; |
|
- | 3707 | ||
- | 3708 | ret = copy_from_user(kernel_commands, user_commands, |
|
- | 3709 | command_size); |
|
- | 3710 | if (ret) { |
|
- | 3711 | DRM_ERROR("Failed copying commands.\n"); |
|
- | 3712 | vmw_cmdbuf_header_free(*header); |
|
- | 3713 | *header = NULL; |
|
- | 3714 | return ERR_PTR(-EFAULT); |
|
- | 3715 | } |
|
- | 3716 | ||
- | 3717 | return kernel_commands; |
|
- | 3718 | } |
|
- | 3719 | ||
- | 3720 | static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, |
|
- | 3721 | struct vmw_sw_context *sw_context, |
|
- | 3722 | uint32_t handle) |
|
- | 3723 | { |
|
- | 3724 | struct vmw_resource_val_node *ctx_node; |
|
- | 3725 | struct vmw_resource *res; |
|
- | 3726 | int ret; |
|
- | 3727 | ||
- | 3728 | if (handle == SVGA3D_INVALID_ID) |
|
- | 3729 | return 0; |
|
- | 3730 | ||
- | 3731 | ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile, |
|
- | 3732 | handle, user_context_converter, |
|
- | 3733 | &res); |
|
- | 3734 | if (unlikely(ret != 0)) { |
|
- | 3735 | DRM_ERROR("Could not find or user DX context 0x%08x.\n", |
|
- | 3736 | (unsigned) handle); |
|
- | 3737 | return ret; |
|
- | 3738 | } |
|
- | 3739 | ||
- | 3740 | ret = vmw_resource_val_add(sw_context, res, &ctx_node); |
|
- | 3741 | if (unlikely(ret != 0)) |
|
- | 3742 | goto out_err; |
|
- | 3743 | ||
- | 3744 | sw_context->dx_ctx_node = ctx_node; |
|
- | 3745 | sw_context->man = vmw_context_res_man(res); |
|
- | 3746 | out_err: |
|
- | 3747 | vmw_resource_unreference(&res); |
|
2334 | 3748 | return ret; |
|
2335 | 3749 | } |
|
2336 | 3750 | ||
2337 | int vmw_execbuf_process(struct drm_file *file_priv, |
3751 | int vmw_execbuf_process(struct drm_file *file_priv, |
2338 | struct vmw_private *dev_priv, |
3752 | struct vmw_private *dev_priv, |
2339 | void __user *user_commands, |
3753 | void __user *user_commands, |
2340 | void *kernel_commands, |
3754 | void *kernel_commands, |
2341 | uint32_t command_size, |
3755 | uint32_t command_size, |
2342 | uint64_t throttle_us, |
3756 | uint64_t throttle_us, |
- | 3757 | uint32_t dx_context_handle, |
|
2343 | struct drm_vmw_fence_rep __user *user_fence_rep, |
3758 | struct drm_vmw_fence_rep __user *user_fence_rep, |
2344 | struct vmw_fence_obj **out_fence) |
3759 | struct vmw_fence_obj **out_fence) |
2345 | { |
3760 | { |
2346 | struct vmw_sw_context *sw_context = &dev_priv->ctx; |
3761 | struct vmw_sw_context *sw_context = &dev_priv->ctx; |
2347 | struct vmw_fence_obj *fence = NULL; |
3762 | struct vmw_fence_obj *fence = NULL; |
2348 | struct vmw_resource *error_resource; |
3763 | struct vmw_resource *error_resource; |
2349 | struct list_head resource_list; |
3764 | struct list_head resource_list; |
- | 3765 | struct vmw_cmdbuf_header *header; |
|
2350 | struct ww_acquire_ctx ticket; |
3766 | struct ww_acquire_ctx ticket; |
2351 | uint32_t handle; |
3767 | uint32_t handle; |
2352 | void *cmd; |
- | |
2353 | int ret; |
3768 | int ret; |
- | 3769 | ||
- | 3770 | if (throttle_us) { |
|
- | 3771 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, |
|
- | 3772 | throttle_us); |
|
- | 3773 | ||
- | 3774 | if (ret) |
|
- | 3775 | return ret; |
|
- | 3776 | } |
|
- | 3777 | ||
- | 3778 | kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands, |
|
- | 3779 | kernel_commands, command_size, |
|
- | 3780 | &header); |
|
- | 3781 | if (IS_ERR(kernel_commands)) |
|
- | 3782 | return PTR_ERR(kernel_commands); |
|
2354 | 3783 | ||
2355 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); |
3784 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); |
2356 | if (unlikely(ret != 0)) |
3785 | if (ret) { |
- | 3786 | ret = -ERESTARTSYS; |
|
- | 3787 | goto out_free_header; |
|
2357 | return -ERESTARTSYS; |
- | |
2358 | - | ||
2359 | /* |
3788 | } |
2360 | if (kernel_commands == NULL) { |
- | |
- | 3789 | ||
2361 | sw_context->kernel = false; |
3790 | sw_context->kernel = false; |
2362 | 3791 | if (kernel_commands == NULL) { |
|
2363 | ret = vmw_resize_cmd_bounce(sw_context, command_size); |
3792 | ret = vmw_resize_cmd_bounce(sw_context, command_size); |
2364 | if (unlikely(ret != 0)) |
3793 | if (unlikely(ret != 0)) |
2365 | goto out_unlock; |
3794 | goto out_unlock; |
2366 | 3795 | ||
2367 | 3796 | ||
2368 | ret = copy_from_user(sw_context->cmd_bounce, |
3797 | ret = copy_from_user(sw_context->cmd_bounce, |
2369 | user_commands, command_size); |
3798 | user_commands, command_size); |
2370 | 3799 | ||
2371 | if (unlikely(ret != 0)) { |
3800 | if (unlikely(ret != 0)) { |
2372 | ret = -EFAULT; |
3801 | ret = -EFAULT; |
2373 | DRM_ERROR("Failed copying commands.\n"); |
3802 | DRM_ERROR("Failed copying commands.\n"); |
2374 | goto out_unlock; |
3803 | goto out_unlock; |
2375 | } |
3804 | } |
2376 | kernel_commands = sw_context->cmd_bounce; |
3805 | kernel_commands = sw_context->cmd_bounce; |
2377 | } else */ |
3806 | } else if (!header) |
2378 | sw_context->kernel = true; |
3807 | sw_context->kernel = true; |
2379 | 3808 | ||
2380 | sw_context->fp = vmw_fpriv(file_priv); |
3809 | sw_context->fp = vmw_fpriv(file_priv); |
2381 | sw_context->cur_reloc = 0; |
3810 | sw_context->cur_reloc = 0; |
2382 | sw_context->cur_val_buf = 0; |
3811 | sw_context->cur_val_buf = 0; |
2383 | sw_context->fence_flags = 0; |
- | |
2384 | INIT_LIST_HEAD(&sw_context->resource_list); |
3812 | INIT_LIST_HEAD(&sw_context->resource_list); |
- | 3813 | INIT_LIST_HEAD(&sw_context->ctx_resource_list); |
|
2385 | sw_context->cur_query_bo = dev_priv->pinned_bo; |
3814 | sw_context->cur_query_bo = dev_priv->pinned_bo; |
2386 | sw_context->last_query_ctx = NULL; |
3815 | sw_context->last_query_ctx = NULL; |
2387 | sw_context->needs_post_query_barrier = false; |
3816 | sw_context->needs_post_query_barrier = false; |
- | 3817 | sw_context->dx_ctx_node = NULL; |
|
- | 3818 | sw_context->dx_query_mob = NULL; |
|
- | 3819 | sw_context->dx_query_ctx = NULL; |
|
2388 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); |
3820 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); |
2389 | INIT_LIST_HEAD(&sw_context->validate_nodes); |
3821 | INIT_LIST_HEAD(&sw_context->validate_nodes); |
2390 | INIT_LIST_HEAD(&sw_context->res_relocations); |
3822 | INIT_LIST_HEAD(&sw_context->res_relocations); |
- | 3823 | if (sw_context->staged_bindings) |
|
- | 3824 | vmw_binding_state_reset(sw_context->staged_bindings); |
|
- | 3825 | ||
2391 | if (!sw_context->res_ht_initialized) { |
3826 | if (!sw_context->res_ht_initialized) { |
2392 | ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); |
3827 | ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); |
2393 | if (unlikely(ret != 0)) |
3828 | if (unlikely(ret != 0)) |
2394 | goto out_unlock; |
3829 | goto out_unlock; |
2395 | sw_context->res_ht_initialized = true; |
3830 | sw_context->res_ht_initialized = true; |
2396 | } |
3831 | } |
2397 | INIT_LIST_HEAD(&sw_context->staged_cmd_res); |
3832 | INIT_LIST_HEAD(&sw_context->staged_cmd_res); |
2398 | - | ||
2399 | INIT_LIST_HEAD(&resource_list); |
3833 | INIT_LIST_HEAD(&resource_list); |
- | 3834 | ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle); |
|
- | 3835 | if (unlikely(ret != 0)) { |
|
- | 3836 | list_splice_init(&sw_context->ctx_resource_list, |
|
- | 3837 | &sw_context->resource_list); |
|
- | 3838 | goto out_err_nores; |
|
- | 3839 | } |
|
- | 3840 | ||
2400 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
3841 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
2401 | command_size); |
3842 | command_size); |
- | 3843 | /* |
|
- | 3844 | * Merge the resource lists before checking the return status |
|
- | 3845 | * from vmd_cmd_check_all so that all the open hashtabs will |
|
- | 3846 | * be handled properly even if vmw_cmd_check_all fails. |
|
- | 3847 | */ |
|
- | 3848 | list_splice_init(&sw_context->ctx_resource_list, |
|
- | 3849 | &sw_context->resource_list); |
|
- | 3850 | ||
2402 | if (unlikely(ret != 0)) |
3851 | if (unlikely(ret != 0)) |
2403 | goto out_err_nores; |
3852 | goto out_err_nores; |
2404 | 3853 | ||
2405 | ret = vmw_resources_reserve(sw_context); |
3854 | ret = vmw_resources_reserve(sw_context); |
2406 | if (unlikely(ret != 0)) |
3855 | if (unlikely(ret != 0)) |
2407 | goto out_err_nores; |
3856 | goto out_err_nores; |
2408 | 3857 | ||
- | 3858 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, |
|
2409 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); |
3859 | true, NULL); |
2410 | if (unlikely(ret != 0)) |
3860 | if (unlikely(ret != 0)) |
2411 | goto out_err; |
3861 | goto out_err_nores; |
2412 | 3862 | ||
2413 | ret = vmw_validate_buffers(dev_priv, sw_context); |
3863 | ret = vmw_validate_buffers(dev_priv, sw_context); |
2414 | if (unlikely(ret != 0)) |
3864 | if (unlikely(ret != 0)) |
2415 | goto out_err; |
3865 | goto out_err; |
2416 | 3866 | ||
2417 | ret = vmw_resources_validate(sw_context); |
3867 | ret = vmw_resources_validate(sw_context); |
2418 | if (unlikely(ret != 0)) |
3868 | if (unlikely(ret != 0)) |
2419 | goto out_err; |
3869 | goto out_err; |
2420 | - | ||
2421 | if (throttle_us) { |
- | |
2422 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, |
- | |
2423 | throttle_us); |
- | |
2424 | - | ||
2425 | if (unlikely(ret != 0)) |
- | |
2426 | goto out_err; |
- | |
2427 | } |
- | |
2428 | 3870 | ||
2429 | ret = mutex_lock_interruptible(&dev_priv->binding_mutex); |
3871 | ret = mutex_lock_interruptible(&dev_priv->binding_mutex); |
2430 | if (unlikely(ret != 0)) { |
3872 | if (unlikely(ret != 0)) { |
2431 | ret = -ERESTARTSYS; |
3873 | ret = -ERESTARTSYS; |
2432 | goto out_err; |
3874 | goto out_err; |
2433 | } |
3875 | } |
2434 | 3876 | ||
2435 | if (dev_priv->has_mob) { |
3877 | if (dev_priv->has_mob) { |
2436 | ret = vmw_rebind_contexts(sw_context); |
3878 | ret = vmw_rebind_contexts(sw_context); |
2437 | if (unlikely(ret != 0)) |
3879 | if (unlikely(ret != 0)) |
2438 | goto out_unlock_binding; |
3880 | goto out_unlock_binding; |
2439 | } |
3881 | } |
- | 3882 | ||
2440 | 3883 | if (!header) { |
|
2441 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
3884 | ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands, |
- | 3885 | command_size, sw_context); |
|
2442 | if (unlikely(cmd == NULL)) { |
3886 | } else { |
2443 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
3887 | ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size, |
2444 | ret = -ENOMEM; |
3888 | sw_context); |
2445 | goto out_unlock_binding; |
3889 | header = NULL; |
2446 | } |
- | |
2447 | 3890 | } |
|
2448 | vmw_apply_relocations(sw_context); |
3891 | mutex_unlock(&dev_priv->binding_mutex); |
2449 | memcpy(cmd, kernel_commands, command_size); |
- | |
2450 | - | ||
2451 | vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); |
- | |
2452 | vmw_resource_relocations_free(&sw_context->res_relocations); |
- | |
2453 | 3892 | if (ret) |
|
2454 | vmw_fifo_commit(dev_priv, command_size); |
3893 | goto out_err; |
2455 | 3894 | ||
2456 | vmw_query_bo_switch_commit(dev_priv, sw_context); |
3895 | vmw_query_bo_switch_commit(dev_priv, sw_context); |
2457 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, |
3896 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, |
2458 | &fence, |
3897 | &fence, |
2459 | (user_fence_rep) ? &handle : NULL); |
3898 | (user_fence_rep) ? &handle : NULL); |
2460 | /* |
3899 | /* |
2461 | * This error is harmless, because if fence submission fails, |
3900 | * This error is harmless, because if fence submission fails, |
2462 | * vmw_fifo_send_fence will sync. The error will be propagated to |
3901 | * vmw_fifo_send_fence will sync. The error will be propagated to |
2463 | * user-space in @fence_rep |
3902 | * user-space in @fence_rep |
2464 | */ |
3903 | */ |
2465 | 3904 | ||
2466 | if (ret != 0) |
3905 | if (ret != 0) |
2467 | DRM_ERROR("Fence submission error. Syncing.\n"); |
3906 | DRM_ERROR("Fence submission error. Syncing.\n"); |
2468 | 3907 | ||
2469 | vmw_resource_list_unreserve(&sw_context->resource_list, false); |
- | |
2470 | mutex_unlock(&dev_priv->binding_mutex); |
3908 | vmw_resources_unreserve(sw_context, false); |
2471 | 3909 | ||
2472 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, |
3910 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, |
2473 | (void *) fence); |
3911 | (void *) fence); |
2474 | 3912 | ||
2475 | if (unlikely(dev_priv->pinned_bo != NULL && |
3913 | if (unlikely(dev_priv->pinned_bo != NULL && |
2476 | !dev_priv->query_cid_valid)) |
3914 | !dev_priv->query_cid_valid)) |
2477 | __vmw_execbuf_release_pinned_bo(dev_priv, fence); |
3915 | __vmw_execbuf_release_pinned_bo(dev_priv, fence); |
2478 | 3916 | ||
2479 | vmw_clear_validations(sw_context); |
3917 | vmw_clear_validations(sw_context); |
2480 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, |
3918 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, |
2481 | user_fence_rep, fence, handle); |
3919 | user_fence_rep, fence, handle); |
2482 | 3920 | ||
2483 | /* Don't unreference when handing fence out */ |
3921 | /* Don't unreference when handing fence out */ |
2484 | if (unlikely(out_fence != NULL)) { |
3922 | if (unlikely(out_fence != NULL)) { |
2485 | *out_fence = fence; |
3923 | *out_fence = fence; |
2486 | fence = NULL; |
3924 | fence = NULL; |
2487 | } else if (likely(fence != NULL)) { |
3925 | } else if (likely(fence != NULL)) { |
2488 | vmw_fence_obj_unreference(&fence); |
3926 | vmw_fence_obj_unreference(&fence); |
2489 | } |
3927 | } |
2490 | 3928 | ||
2491 | list_splice_init(&sw_context->resource_list, &resource_list); |
3929 | list_splice_init(&sw_context->resource_list, &resource_list); |
2492 | vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res); |
3930 | vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res); |
2493 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
3931 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
2494 | 3932 | ||
2495 | /* |
3933 | /* |
2496 | * Unreference resources outside of the cmdbuf_mutex to |
3934 | * Unreference resources outside of the cmdbuf_mutex to |
2497 | * avoid deadlocks in resource destruction paths. |
3935 | * avoid deadlocks in resource destruction paths. |
2498 | */ |
3936 | */ |
2499 | vmw_resource_list_unreference(&resource_list); |
3937 | vmw_resource_list_unreference(sw_context, &resource_list); |
2500 | 3938 | ||
2501 | return 0; |
3939 | return 0; |
2502 | 3940 | ||
2503 | out_unlock_binding: |
3941 | out_unlock_binding: |
2504 | mutex_unlock(&dev_priv->binding_mutex); |
3942 | mutex_unlock(&dev_priv->binding_mutex); |
2505 | out_err: |
3943 | out_err: |
2506 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); |
3944 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); |
2507 | out_err_nores: |
3945 | out_err_nores: |
2508 | vmw_resource_list_unreserve(&sw_context->resource_list, true); |
3946 | vmw_resources_unreserve(sw_context, true); |
2509 | vmw_resource_relocations_free(&sw_context->res_relocations); |
3947 | vmw_resource_relocations_free(&sw_context->res_relocations); |
2510 | vmw_free_relocations(sw_context); |
3948 | vmw_free_relocations(sw_context); |
2511 | vmw_clear_validations(sw_context); |
3949 | vmw_clear_validations(sw_context); |
2512 | if (unlikely(dev_priv->pinned_bo != NULL && |
3950 | if (unlikely(dev_priv->pinned_bo != NULL && |
2513 | !dev_priv->query_cid_valid)) |
3951 | !dev_priv->query_cid_valid)) |
2514 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
3952 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
2515 | out_unlock: |
3953 | out_unlock: |
2516 | list_splice_init(&sw_context->resource_list, &resource_list); |
3954 | list_splice_init(&sw_context->resource_list, &resource_list); |
2517 | error_resource = sw_context->error_resource; |
3955 | error_resource = sw_context->error_resource; |
2518 | sw_context->error_resource = NULL; |
3956 | sw_context->error_resource = NULL; |
2519 | vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); |
3957 | vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); |
2520 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
3958 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
2521 | 3959 | ||
2522 | /* |
3960 | /* |
2523 | * Unreference resources outside of the cmdbuf_mutex to |
3961 | * Unreference resources outside of the cmdbuf_mutex to |
2524 | * avoid deadlocks in resource destruction paths. |
3962 | * avoid deadlocks in resource destruction paths. |
2525 | */ |
3963 | */ |
2526 | vmw_resource_list_unreference(&resource_list); |
3964 | vmw_resource_list_unreference(sw_context, &resource_list); |
2527 | if (unlikely(error_resource != NULL)) |
3965 | if (unlikely(error_resource != NULL)) |
2528 | vmw_resource_unreference(&error_resource); |
3966 | vmw_resource_unreference(&error_resource); |
- | 3967 | out_free_header: |
|
- | 3968 | if (header) |
|
- | 3969 | vmw_cmdbuf_header_free(header); |
|
2529 | 3970 | ||
2530 | return ret; |
3971 | return ret; |
2531 | } |
3972 | } |
2532 | 3973 | ||
2533 | /** |
3974 | /** |
2534 | * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. |
3975 | * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. |
2535 | * |
3976 | * |
2536 | * @dev_priv: The device private structure. |
3977 | * @dev_priv: The device private structure. |
2537 | * |
3978 | * |
2538 | * This function is called to idle the fifo and unpin the query buffer |
3979 | * This function is called to idle the fifo and unpin the query buffer |
2539 | * if the normal way to do this hits an error, which should typically be |
3980 | * if the normal way to do this hits an error, which should typically be |
2540 | * extremely rare. |
3981 | * extremely rare. |
2541 | */ |
3982 | */ |
2542 | static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) |
3983 | static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) |
2543 | { |
3984 | { |
2544 | DRM_ERROR("Can't unpin query buffer. Trying to recover.\n"); |
3985 | DRM_ERROR("Can't unpin query buffer. Trying to recover.\n"); |
2545 | 3986 | ||
2546 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); |
3987 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); |
2547 | vmw_bo_pin(dev_priv->pinned_bo, false); |
3988 | vmw_bo_pin_reserved(dev_priv->pinned_bo, false); |
- | 3989 | if (dev_priv->dummy_query_bo_pinned) { |
|
2548 | vmw_bo_pin(dev_priv->dummy_query_bo, false); |
3990 | vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false); |
2549 | dev_priv->dummy_query_bo_pinned = false; |
3991 | dev_priv->dummy_query_bo_pinned = false; |
2550 | } |
3992 | } |
- | 3993 | } |
|
2551 | 3994 | ||
2552 | 3995 | ||
2553 | /** |
3996 | /** |
2554 | * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
3997 | * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
2555 | * query bo. |
3998 | * query bo. |
2556 | * |
3999 | * |
2557 | * @dev_priv: The device private structure. |
4000 | * @dev_priv: The device private structure. |
2558 | * @fence: If non-NULL should point to a struct vmw_fence_obj issued |
4001 | * @fence: If non-NULL should point to a struct vmw_fence_obj issued |
2559 | * _after_ a query barrier that flushes all queries touching the current |
4002 | * _after_ a query barrier that flushes all queries touching the current |
2560 | * buffer pointed to by @dev_priv->pinned_bo |
4003 | * buffer pointed to by @dev_priv->pinned_bo |
2561 | * |
4004 | * |
2562 | * This function should be used to unpin the pinned query bo, or |
4005 | * This function should be used to unpin the pinned query bo, or |
2563 | * as a query barrier when we need to make sure that all queries have |
4006 | * as a query barrier when we need to make sure that all queries have |
2564 | * finished before the next fifo command. (For example on hardware |
4007 | * finished before the next fifo command. (For example on hardware |
2565 | * context destructions where the hardware may otherwise leak unfinished |
4008 | * context destructions where the hardware may otherwise leak unfinished |
2566 | * queries). |
4009 | * queries). |
2567 | * |
4010 | * |
2568 | * This function does not return any failure codes, but make attempts |
4011 | * This function does not return any failure codes, but make attempts |
2569 | * to do safe unpinning in case of errors. |
4012 | * to do safe unpinning in case of errors. |
2570 | * |
4013 | * |
2571 | * The function will synchronize on the previous query barrier, and will |
4014 | * The function will synchronize on the previous query barrier, and will |
2572 | * thus not finish until that barrier has executed. |
4015 | * thus not finish until that barrier has executed. |
2573 | * |
4016 | * |
2574 | * the @dev_priv->cmdbuf_mutex needs to be held by the current thread |
4017 | * the @dev_priv->cmdbuf_mutex needs to be held by the current thread |
2575 | * before calling this function. |
4018 | * before calling this function. |
2576 | */ |
4019 | */ |
2577 | void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, |
4020 | void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, |
2578 | struct vmw_fence_obj *fence) |
4021 | struct vmw_fence_obj *fence) |
2579 | { |
4022 | { |
2580 | int ret = 0; |
4023 | int ret = 0; |
2581 | struct list_head validate_list; |
4024 | struct list_head validate_list; |
2582 | struct ttm_validate_buffer pinned_val, query_val; |
4025 | struct ttm_validate_buffer pinned_val, query_val; |
2583 | struct vmw_fence_obj *lfence = NULL; |
4026 | struct vmw_fence_obj *lfence = NULL; |
2584 | struct ww_acquire_ctx ticket; |
4027 | struct ww_acquire_ctx ticket; |
2585 | 4028 | ||
2586 | if (dev_priv->pinned_bo == NULL) |
4029 | if (dev_priv->pinned_bo == NULL) |
2587 | goto out_unlock; |
4030 | goto out_unlock; |
2588 | 4031 | ||
2589 | INIT_LIST_HEAD(&validate_list); |
4032 | INIT_LIST_HEAD(&validate_list); |
2590 | 4033 | ||
- | 4034 | pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base); |
|
2591 | pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); |
4035 | pinned_val.shared = false; |
2592 | list_add_tail(&pinned_val.head, &validate_list); |
4036 | list_add_tail(&pinned_val.head, &validate_list); |
- | 4037 | ||
2593 | 4038 | query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base); |
|
2594 | query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); |
- | |
2595 | list_add_tail(&query_val.head, &validate_list); |
4039 | query_val.shared = false; |
2596 | 4040 | list_add_tail(&query_val.head, &validate_list); |
|
2597 | do { |
- | |
2598 | ret = ttm_eu_reserve_buffers(&ticket, &validate_list); |
4041 | |
2599 | } while (ret == -ERESTARTSYS); |
4042 | ret = ttm_eu_reserve_buffers(&ticket, &validate_list, |
2600 | 4043 | false, NULL); |
|
2601 | if (unlikely(ret != 0)) { |
4044 | if (unlikely(ret != 0)) { |
2602 | vmw_execbuf_unpin_panic(dev_priv); |
4045 | vmw_execbuf_unpin_panic(dev_priv); |
2603 | goto out_no_reserve; |
4046 | goto out_no_reserve; |
2604 | } |
4047 | } |
2605 | 4048 | ||
2606 | if (dev_priv->query_cid_valid) { |
4049 | if (dev_priv->query_cid_valid) { |
2607 | BUG_ON(fence != NULL); |
4050 | BUG_ON(fence != NULL); |
2608 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); |
4051 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); |
2609 | if (unlikely(ret != 0)) { |
4052 | if (unlikely(ret != 0)) { |
2610 | vmw_execbuf_unpin_panic(dev_priv); |
4053 | vmw_execbuf_unpin_panic(dev_priv); |
2611 | goto out_no_emit; |
4054 | goto out_no_emit; |
2612 | } |
4055 | } |
2613 | dev_priv->query_cid_valid = false; |
4056 | dev_priv->query_cid_valid = false; |
2614 | } |
4057 | } |
2615 | 4058 | ||
- | 4059 | vmw_bo_pin_reserved(dev_priv->pinned_bo, false); |
|
2616 | vmw_bo_pin(dev_priv->pinned_bo, false); |
4060 | if (dev_priv->dummy_query_bo_pinned) { |
2617 | vmw_bo_pin(dev_priv->dummy_query_bo, false); |
4061 | vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false); |
2618 | dev_priv->dummy_query_bo_pinned = false; |
4062 | dev_priv->dummy_query_bo_pinned = false; |
2619 | 4063 | } |
|
2620 | if (fence == NULL) { |
4064 | if (fence == NULL) { |
2621 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, |
4065 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, |
2622 | NULL); |
4066 | NULL); |
2623 | fence = lfence; |
4067 | fence = lfence; |
2624 | } |
4068 | } |
2625 | ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence); |
4069 | ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence); |
2626 | if (lfence != NULL) |
4070 | if (lfence != NULL) |
2627 | vmw_fence_obj_unreference(&lfence); |
4071 | vmw_fence_obj_unreference(&lfence); |
2628 | 4072 | ||
2629 | ttm_bo_unref(&query_val.bo); |
4073 | ttm_bo_unref(&query_val.bo); |
2630 | ttm_bo_unref(&pinned_val.bo); |
4074 | ttm_bo_unref(&pinned_val.bo); |
2631 | ttm_bo_unref(&dev_priv->pinned_bo); |
4075 | vmw_dmabuf_unreference(&dev_priv->pinned_bo); |
- | 4076 | DRM_INFO("Dummy query bo pin count: %d\n", |
|
- | 4077 | dev_priv->dummy_query_bo->pin_count); |
|
2632 | 4078 | ||
2633 | out_unlock: |
4079 | out_unlock: |
2634 | return; |
4080 | return; |
2635 | 4081 | ||
2636 | out_no_emit: |
4082 | out_no_emit: |
2637 | ttm_eu_backoff_reservation(&ticket, &validate_list); |
4083 | ttm_eu_backoff_reservation(&ticket, &validate_list); |
2638 | out_no_reserve: |
4084 | out_no_reserve: |
2639 | ttm_bo_unref(&query_val.bo); |
4085 | ttm_bo_unref(&query_val.bo); |
2640 | ttm_bo_unref(&pinned_val.bo); |
4086 | ttm_bo_unref(&pinned_val.bo); |
2641 | ttm_bo_unref(&dev_priv->pinned_bo); |
4087 | vmw_dmabuf_unreference(&dev_priv->pinned_bo); |
2642 | } |
4088 | } |
2643 | 4089 | ||
2644 | /** |
4090 | /** |
2645 | * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
4091 | * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
2646 | * query bo. |
4092 | * query bo. |
2647 | * |
4093 | * |
2648 | * @dev_priv: The device private structure. |
4094 | * @dev_priv: The device private structure. |
2649 | * |
4095 | * |
2650 | * This function should be used to unpin the pinned query bo, or |
4096 | * This function should be used to unpin the pinned query bo, or |
2651 | * as a query barrier when we need to make sure that all queries have |
4097 | * as a query barrier when we need to make sure that all queries have |
2652 | * finished before the next fifo command. (For example on hardware |
4098 | * finished before the next fifo command. (For example on hardware |
2653 | * context destructions where the hardware may otherwise leak unfinished |
4099 | * context destructions where the hardware may otherwise leak unfinished |
2654 | * queries). |
4100 | * queries). |
2655 | * |
4101 | * |
2656 | * This function does not return any failure codes, but make attempts |
4102 | * This function does not return any failure codes, but make attempts |
2657 | * to do safe unpinning in case of errors. |
4103 | * to do safe unpinning in case of errors. |
2658 | * |
4104 | * |
2659 | * The function will synchronize on the previous query barrier, and will |
4105 | * The function will synchronize on the previous query barrier, and will |
2660 | * thus not finish until that barrier has executed. |
4106 | * thus not finish until that barrier has executed. |
2661 | */ |
4107 | */ |
2662 | void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) |
4108 | void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) |
2663 | { |
4109 | { |
2664 | mutex_lock(&dev_priv->cmdbuf_mutex); |
4110 | mutex_lock(&dev_priv->cmdbuf_mutex); |
2665 | if (dev_priv->query_cid_valid) |
4111 | if (dev_priv->query_cid_valid) |
2666 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
4112 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
2667 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
4113 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
2668 | } |
4114 | } |
2669 | - | ||
2670 | 4115 | ||
2671 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, |
4116 | int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, |
2672 | struct drm_file *file_priv) |
4117 | struct drm_file *file_priv, size_t size) |
2673 | { |
4118 | { |
2674 | struct vmw_private *dev_priv = vmw_priv(dev); |
4119 | struct vmw_private *dev_priv = vmw_priv(dev); |
2675 | struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; |
4120 | struct drm_vmw_execbuf_arg arg; |
- | 4121 | int ret; |
|
- | 4122 | static const size_t copy_offset[] = { |
|
- | 4123 | offsetof(struct drm_vmw_execbuf_arg, context_handle), |
|
- | 4124 | sizeof(struct drm_vmw_execbuf_arg)}; |
|
- | 4125 | ||
- | 4126 | if (unlikely(size < copy_offset[0])) { |
|
- | 4127 | DRM_ERROR("Invalid command size, ioctl %d\n", |
|
- | 4128 | DRM_VMW_EXECBUF); |
|
- | 4129 | return -EINVAL; |
|
- | 4130 | } |
|
- | 4131 | ||
- | 4132 | if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0) |
|
2676 | int ret; |
4133 | return -EFAULT; |
2677 | 4134 | ||
2678 | /* |
4135 | /* |
2679 | * This will allow us to extend the ioctl argument while |
4136 | * Extend the ioctl argument while |
2680 | * maintaining backwards compatibility: |
4137 | * maintaining backwards compatibility: |
2681 | * We take different code paths depending on the value of |
4138 | * We take different code paths depending on the value of |
2682 | * arg->version. |
4139 | * arg.version. |
- | 4140 | */ |
|
2683 | */ |
4141 | |
2684 | - | ||
2685 | if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) { |
- | |
2686 | DRM_ERROR("Incorrect execbuf version.\n"); |
4142 | if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION || |
2687 | DRM_ERROR("You're running outdated experimental " |
4143 | arg.version == 0)) { |
2688 | "vmwgfx user-space drivers."); |
4144 | DRM_ERROR("Incorrect execbuf version.\n"); |
2689 | return -EINVAL; |
4145 | return -EINVAL; |
2690 | } |
4146 | } |
- | 4147 | ||
- | 4148 | if (arg.version > 1 && |
|
- | 4149 | copy_from_user(&arg.context_handle, |
|
- | 4150 | (void __user *) (data + copy_offset[0]), |
|
- | 4151 | copy_offset[arg.version - 1] - |
|
- | 4152 | copy_offset[0]) != 0) |
|
- | 4153 | return -EFAULT; |
|
- | 4154 | ||
- | 4155 | switch (arg.version) { |
|
- | 4156 | case 1: |
|
- | 4157 | arg.context_handle = (uint32_t) -1; |
|
- | 4158 | break; |
|
- | 4159 | case 2: |
|
- | 4160 | if (arg.pad64 != 0) { |
|
- | 4161 | DRM_ERROR("Unused IOCTL data not set to zero.\n"); |
|
- | 4162 | return -EINVAL; |
|
- | 4163 | } |
|
- | 4164 | break; |
|
- | 4165 | default: |
|
- | 4166 | break; |
|
- | 4167 | } |
|
2691 | 4168 | ||
2692 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
4169 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
2693 | if (unlikely(ret != 0)) |
4170 | if (unlikely(ret != 0)) |
2694 | return ret; |
4171 | return ret; |
2695 | 4172 | ||
2696 | ret = vmw_execbuf_process(file_priv, dev_priv, |
4173 | ret = vmw_execbuf_process(file_priv, dev_priv, |
2697 | (void __user *)(unsigned long)arg->commands, |
4174 | (void __user *)(unsigned long)arg.commands, |
2698 | NULL, arg->command_size, arg->throttle_us, |
4175 | NULL, arg.command_size, arg.throttle_us, |
- | 4176 | arg.context_handle, |
|
2699 | (void __user *)(unsigned long)arg->fence_rep, |
4177 | (void __user *)(unsigned long)arg.fence_rep, |
2700 | NULL); |
4178 | NULL); |
2701 | - | ||
- | 4179 | ttm_read_unlock(&dev_priv->reservation_sem); |
|
2702 | if (unlikely(ret != 0)) |
4180 | if (unlikely(ret != 0)) |
2703 | goto out_unlock; |
4181 | return ret; |
2704 | 4182 | ||
2705 | // vmw_kms_cursor_post_execbuf(dev_priv); |
4183 | // vmw_kms_cursor_post_execbuf(dev_priv); |
2706 | - | ||
2707 | out_unlock: |
- | |
2708 | ttm_read_unlock(&dev_priv->reservation_sem); |
4184 | |
2709 | return ret; |
4185 | return 0; |
2710 | }>>>>>> |
4186 | }>>>>>>>>>>>> |