Rev 4569 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4569 | Rev 5078 | ||
---|---|---|---|
1 | /************************************************************************** |
1 | /************************************************************************** |
2 | * |
2 | * |
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. |
4 | * All Rights Reserved. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
12 | * the following conditions: |
13 | * |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
16 | * of the Software. |
17 | * |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
25 | * |
26 | **************************************************************************/ |
26 | **************************************************************************/ |
27 | 27 | ||
28 | #include "vmwgfx_drv.h" |
28 | #include "vmwgfx_drv.h" |
29 | #include "vmwgfx_reg.h" |
29 | #include "vmwgfx_reg.h" |
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | 32 | ||
33 | #define VMW_RES_HT_ORDER 12 |
33 | #define VMW_RES_HT_ORDER 12 |
34 | 34 | ||
35 | /** |
35 | /** |
36 | * struct vmw_resource_relocation - Relocation info for resources |
36 | * struct vmw_resource_relocation - Relocation info for resources |
37 | * |
37 | * |
38 | * @head: List head for the software context's relocation list. |
38 | * @head: List head for the software context's relocation list. |
39 | * @res: Non-ref-counted pointer to the resource. |
39 | * @res: Non-ref-counted pointer to the resource. |
40 | * @offset: Offset of 4 byte entries into the command buffer where the |
40 | * @offset: Offset of 4 byte entries into the command buffer where the |
41 | * id that needs fixup is located. |
41 | * id that needs fixup is located. |
42 | */ |
42 | */ |
43 | struct vmw_resource_relocation { |
43 | struct vmw_resource_relocation { |
44 | struct list_head head; |
44 | struct list_head head; |
45 | const struct vmw_resource *res; |
45 | const struct vmw_resource *res; |
46 | unsigned long offset; |
46 | unsigned long offset; |
47 | }; |
47 | }; |
48 | 48 | ||
49 | /** |
49 | /** |
50 | * struct vmw_resource_val_node - Validation info for resources |
50 | * struct vmw_resource_val_node - Validation info for resources |
51 | * |
51 | * |
52 | * @head: List head for the software context's resource list. |
52 | * @head: List head for the software context's resource list. |
53 | * @hash: Hash entry for quick resouce to val_node lookup. |
53 | * @hash: Hash entry for quick resouce to val_node lookup. |
54 | * @res: Ref-counted pointer to the resource. |
54 | * @res: Ref-counted pointer to the resource. |
55 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. |
55 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. |
56 | * @new_backup: Refcounted pointer to the new backup buffer. |
56 | * @new_backup: Refcounted pointer to the new backup buffer. |
57 | * @staged_bindings: If @res is a context, tracks bindings set up during |
57 | * @staged_bindings: If @res is a context, tracks bindings set up during |
58 | * the command batch. Otherwise NULL. |
58 | * the command batch. Otherwise NULL. |
59 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. |
59 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. |
60 | * @first_usage: Set to true the first time the resource is referenced in |
60 | * @first_usage: Set to true the first time the resource is referenced in |
61 | * the command stream. |
61 | * the command stream. |
62 | * @no_buffer_needed: Resources do not need to allocate buffer backup on |
62 | * @no_buffer_needed: Resources do not need to allocate buffer backup on |
63 | * reservation. The command stream will provide one. |
63 | * reservation. The command stream will provide one. |
64 | */ |
64 | */ |
65 | struct vmw_resource_val_node { |
65 | struct vmw_resource_val_node { |
66 | struct list_head head; |
66 | struct list_head head; |
67 | struct drm_hash_item hash; |
67 | struct drm_hash_item hash; |
68 | struct vmw_resource *res; |
68 | struct vmw_resource *res; |
69 | struct vmw_dma_buffer *new_backup; |
69 | struct vmw_dma_buffer *new_backup; |
70 | struct vmw_ctx_binding_state *staged_bindings; |
70 | struct vmw_ctx_binding_state *staged_bindings; |
71 | unsigned long new_backup_offset; |
71 | unsigned long new_backup_offset; |
72 | bool first_usage; |
72 | bool first_usage; |
73 | bool no_buffer_needed; |
73 | bool no_buffer_needed; |
74 | }; |
74 | }; |
75 | 75 | ||
76 | /** |
76 | /** |
77 | * struct vmw_cmd_entry - Describe a command for the verifier |
77 | * struct vmw_cmd_entry - Describe a command for the verifier |
78 | * |
78 | * |
79 | * @user_allow: Whether allowed from the execbuf ioctl. |
79 | * @user_allow: Whether allowed from the execbuf ioctl. |
80 | * @gb_disable: Whether disabled if guest-backed objects are available. |
80 | * @gb_disable: Whether disabled if guest-backed objects are available. |
81 | * @gb_enable: Whether enabled iff guest-backed objects are available. |
81 | * @gb_enable: Whether enabled iff guest-backed objects are available. |
82 | */ |
82 | */ |
83 | struct vmw_cmd_entry { |
83 | struct vmw_cmd_entry { |
84 | int (*func) (struct vmw_private *, struct vmw_sw_context *, |
84 | int (*func) (struct vmw_private *, struct vmw_sw_context *, |
85 | SVGA3dCmdHeader *); |
85 | SVGA3dCmdHeader *); |
86 | bool user_allow; |
86 | bool user_allow; |
87 | bool gb_disable; |
87 | bool gb_disable; |
88 | bool gb_enable; |
88 | bool gb_enable; |
89 | }; |
89 | }; |
90 | 90 | ||
91 | #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ |
91 | #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ |
92 | [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ |
92 | [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ |
93 | (_gb_disable), (_gb_enable)} |
93 | (_gb_disable), (_gb_enable)} |
94 | 94 | ||
95 | /** |
95 | /** |
96 | * vmw_resource_unreserve - unreserve resources previously reserved for |
96 | * vmw_resource_unreserve - unreserve resources previously reserved for |
97 | * command submission. |
97 | * command submission. |
98 | * |
98 | * |
99 | * @list_head: list of resources to unreserve. |
99 | * @list_head: list of resources to unreserve. |
100 | * @backoff: Whether command submission failed. |
100 | * @backoff: Whether command submission failed. |
101 | */ |
101 | */ |
102 | static void vmw_resource_list_unreserve(struct list_head *list, |
102 | static void vmw_resource_list_unreserve(struct list_head *list, |
103 | bool backoff) |
103 | bool backoff) |
104 | { |
104 | { |
105 | struct vmw_resource_val_node *val; |
105 | struct vmw_resource_val_node *val; |
106 | 106 | ||
107 | list_for_each_entry(val, list, head) { |
107 | list_for_each_entry(val, list, head) { |
108 | struct vmw_resource *res = val->res; |
108 | struct vmw_resource *res = val->res; |
109 | struct vmw_dma_buffer *new_backup = |
109 | struct vmw_dma_buffer *new_backup = |
110 | backoff ? NULL : val->new_backup; |
110 | backoff ? NULL : val->new_backup; |
111 | 111 | ||
112 | /* |
112 | /* |
113 | * Transfer staged context bindings to the |
113 | * Transfer staged context bindings to the |
114 | * persistent context binding tracker. |
114 | * persistent context binding tracker. |
115 | */ |
115 | */ |
116 | if (unlikely(val->staged_bindings)) { |
116 | if (unlikely(val->staged_bindings)) { |
- | 117 | if (!backoff) { |
|
117 | vmw_context_binding_state_transfer |
118 | vmw_context_binding_state_transfer |
118 | (val->res, val->staged_bindings); |
119 | (val->res, val->staged_bindings); |
- | 120 | } |
|
119 | kfree(val->staged_bindings); |
121 | kfree(val->staged_bindings); |
120 | val->staged_bindings = NULL; |
122 | val->staged_bindings = NULL; |
121 | } |
123 | } |
122 | vmw_resource_unreserve(res, new_backup, |
124 | vmw_resource_unreserve(res, new_backup, |
123 | val->new_backup_offset); |
125 | val->new_backup_offset); |
124 | vmw_dmabuf_unreference(&val->new_backup); |
126 | vmw_dmabuf_unreference(&val->new_backup); |
125 | } |
127 | } |
126 | } |
128 | } |
127 | 129 | ||
128 | 130 | ||
129 | /** |
131 | /** |
130 | * vmw_resource_val_add - Add a resource to the software context's |
132 | * vmw_resource_val_add - Add a resource to the software context's |
131 | * resource list if it's not already on it. |
133 | * resource list if it's not already on it. |
132 | * |
134 | * |
133 | * @sw_context: Pointer to the software context. |
135 | * @sw_context: Pointer to the software context. |
134 | * @res: Pointer to the resource. |
136 | * @res: Pointer to the resource. |
135 | * @p_node On successful return points to a valid pointer to a |
137 | * @p_node On successful return points to a valid pointer to a |
136 | * struct vmw_resource_val_node, if non-NULL on entry. |
138 | * struct vmw_resource_val_node, if non-NULL on entry. |
137 | */ |
139 | */ |
138 | static int vmw_resource_val_add(struct vmw_sw_context *sw_context, |
140 | static int vmw_resource_val_add(struct vmw_sw_context *sw_context, |
139 | struct vmw_resource *res, |
141 | struct vmw_resource *res, |
140 | struct vmw_resource_val_node **p_node) |
142 | struct vmw_resource_val_node **p_node) |
141 | { |
143 | { |
142 | struct vmw_resource_val_node *node; |
144 | struct vmw_resource_val_node *node; |
143 | struct drm_hash_item *hash; |
145 | struct drm_hash_item *hash; |
144 | int ret; |
146 | int ret; |
145 | 147 | ||
146 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res, |
148 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res, |
147 | &hash) == 0)) { |
149 | &hash) == 0)) { |
148 | node = container_of(hash, struct vmw_resource_val_node, hash); |
150 | node = container_of(hash, struct vmw_resource_val_node, hash); |
149 | node->first_usage = false; |
151 | node->first_usage = false; |
150 | if (unlikely(p_node != NULL)) |
152 | if (unlikely(p_node != NULL)) |
151 | *p_node = node; |
153 | *p_node = node; |
152 | return 0; |
154 | return 0; |
153 | } |
155 | } |
154 | 156 | ||
155 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
157 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
156 | if (unlikely(node == NULL)) { |
158 | if (unlikely(node == NULL)) { |
157 | DRM_ERROR("Failed to allocate a resource validation " |
159 | DRM_ERROR("Failed to allocate a resource validation " |
158 | "entry.\n"); |
160 | "entry.\n"); |
159 | return -ENOMEM; |
161 | return -ENOMEM; |
160 | } |
162 | } |
161 | 163 | ||
162 | node->hash.key = (unsigned long) res; |
164 | node->hash.key = (unsigned long) res; |
163 | ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash); |
165 | ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash); |
164 | if (unlikely(ret != 0)) { |
166 | if (unlikely(ret != 0)) { |
165 | DRM_ERROR("Failed to initialize a resource validation " |
167 | DRM_ERROR("Failed to initialize a resource validation " |
166 | "entry.\n"); |
168 | "entry.\n"); |
167 | kfree(node); |
169 | kfree(node); |
168 | return ret; |
170 | return ret; |
169 | } |
171 | } |
170 | list_add_tail(&node->head, &sw_context->resource_list); |
172 | list_add_tail(&node->head, &sw_context->resource_list); |
171 | node->res = vmw_resource_reference(res); |
173 | node->res = vmw_resource_reference(res); |
172 | node->first_usage = true; |
174 | node->first_usage = true; |
173 | 175 | ||
174 | if (unlikely(p_node != NULL)) |
176 | if (unlikely(p_node != NULL)) |
175 | *p_node = node; |
177 | *p_node = node; |
176 | 178 | ||
177 | return 0; |
179 | return 0; |
178 | } |
180 | } |
179 | 181 | ||
180 | /** |
182 | /** |
- | 183 | * vmw_resource_context_res_add - Put resources previously bound to a context on |
|
- | 184 | * the validation list |
|
- | 185 | * |
|
- | 186 | * @dev_priv: Pointer to a device private structure |
|
- | 187 | * @sw_context: Pointer to a software context used for this command submission |
|
- | 188 | * @ctx: Pointer to the context resource |
|
- | 189 | * |
|
- | 190 | * This function puts all resources that were previously bound to @ctx on |
|
- | 191 | * the resource validation list. This is part of the context state reemission |
|
- | 192 | */ |
|
- | 193 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, |
|
- | 194 | struct vmw_sw_context *sw_context, |
|
- | 195 | struct vmw_resource *ctx) |
|
- | 196 | { |
|
- | 197 | struct list_head *binding_list; |
|
- | 198 | struct vmw_ctx_binding *entry; |
|
- | 199 | int ret = 0; |
|
- | 200 | struct vmw_resource *res; |
|
- | 201 | ||
- | 202 | mutex_lock(&dev_priv->binding_mutex); |
|
- | 203 | binding_list = vmw_context_binding_list(ctx); |
|
- | 204 | ||
- | 205 | list_for_each_entry(entry, binding_list, ctx_list) { |
|
- | 206 | res = vmw_resource_reference_unless_doomed(entry->bi.res); |
|
- | 207 | if (unlikely(res == NULL)) |
|
- | 208 | continue; |
|
- | 209 | ||
- | 210 | ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL); |
|
- | 211 | vmw_resource_unreference(&res); |
|
- | 212 | if (unlikely(ret != 0)) |
|
- | 213 | break; |
|
- | 214 | } |
|
- | 215 | ||
- | 216 | mutex_unlock(&dev_priv->binding_mutex); |
|
- | 217 | return ret; |
|
- | 218 | } |
|
- | 219 | ||
- | 220 | /** |
|
181 | * vmw_resource_relocation_add - Add a relocation to the relocation list |
221 | * vmw_resource_relocation_add - Add a relocation to the relocation list |
182 | * |
222 | * |
183 | * @list: Pointer to head of relocation list. |
223 | * @list: Pointer to head of relocation list. |
184 | * @res: The resource. |
224 | * @res: The resource. |
185 | * @offset: Offset into the command buffer currently being parsed where the |
225 | * @offset: Offset into the command buffer currently being parsed where the |
186 | * id that needs fixup is located. Granularity is 4 bytes. |
226 | * id that needs fixup is located. Granularity is 4 bytes. |
187 | */ |
227 | */ |
188 | static int vmw_resource_relocation_add(struct list_head *list, |
228 | static int vmw_resource_relocation_add(struct list_head *list, |
189 | const struct vmw_resource *res, |
229 | const struct vmw_resource *res, |
190 | unsigned long offset) |
230 | unsigned long offset) |
191 | { |
231 | { |
192 | struct vmw_resource_relocation *rel; |
232 | struct vmw_resource_relocation *rel; |
193 | 233 | ||
194 | rel = kmalloc(sizeof(*rel), GFP_KERNEL); |
234 | rel = kmalloc(sizeof(*rel), GFP_KERNEL); |
195 | if (unlikely(rel == NULL)) { |
235 | if (unlikely(rel == NULL)) { |
196 | DRM_ERROR("Failed to allocate a resource relocation.\n"); |
236 | DRM_ERROR("Failed to allocate a resource relocation.\n"); |
197 | return -ENOMEM; |
237 | return -ENOMEM; |
198 | } |
238 | } |
199 | 239 | ||
200 | rel->res = res; |
240 | rel->res = res; |
201 | rel->offset = offset; |
241 | rel->offset = offset; |
202 | list_add_tail(&rel->head, list); |
242 | list_add_tail(&rel->head, list); |
203 | 243 | ||
204 | return 0; |
244 | return 0; |
205 | } |
245 | } |
206 | 246 | ||
207 | /** |
247 | /** |
208 | * vmw_resource_relocations_free - Free all relocations on a list |
248 | * vmw_resource_relocations_free - Free all relocations on a list |
209 | * |
249 | * |
210 | * @list: Pointer to the head of the relocation list. |
250 | * @list: Pointer to the head of the relocation list. |
211 | */ |
251 | */ |
212 | static void vmw_resource_relocations_free(struct list_head *list) |
252 | static void vmw_resource_relocations_free(struct list_head *list) |
213 | { |
253 | { |
214 | struct vmw_resource_relocation *rel, *n; |
254 | struct vmw_resource_relocation *rel, *n; |
215 | 255 | ||
216 | list_for_each_entry_safe(rel, n, list, head) { |
256 | list_for_each_entry_safe(rel, n, list, head) { |
217 | list_del(&rel->head); |
257 | list_del(&rel->head); |
218 | kfree(rel); |
258 | kfree(rel); |
219 | } |
259 | } |
220 | } |
260 | } |
221 | 261 | ||
222 | /** |
262 | /** |
223 | * vmw_resource_relocations_apply - Apply all relocations on a list |
263 | * vmw_resource_relocations_apply - Apply all relocations on a list |
224 | * |
264 | * |
225 | * @cb: Pointer to the start of the command buffer bein patch. This need |
265 | * @cb: Pointer to the start of the command buffer bein patch. This need |
226 | * not be the same buffer as the one being parsed when the relocation |
266 | * not be the same buffer as the one being parsed when the relocation |
227 | * list was built, but the contents must be the same modulo the |
267 | * list was built, but the contents must be the same modulo the |
228 | * resource ids. |
268 | * resource ids. |
229 | * @list: Pointer to the head of the relocation list. |
269 | * @list: Pointer to the head of the relocation list. |
230 | */ |
270 | */ |
231 | static void vmw_resource_relocations_apply(uint32_t *cb, |
271 | static void vmw_resource_relocations_apply(uint32_t *cb, |
232 | struct list_head *list) |
272 | struct list_head *list) |
233 | { |
273 | { |
234 | struct vmw_resource_relocation *rel; |
274 | struct vmw_resource_relocation *rel; |
235 | 275 | ||
- | 276 | list_for_each_entry(rel, list, head) { |
|
236 | list_for_each_entry(rel, list, head) |
277 | if (likely(rel->res != NULL)) |
- | 278 | cb[rel->offset] = rel->res->id; |
|
- | 279 | else |
|
- | 280 | cb[rel->offset] = SVGA_3D_CMD_NOP; |
|
237 | cb[rel->offset] = rel->res->id; |
281 | } |
238 | } |
282 | } |
239 | 283 | ||
240 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, |
284 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, |
241 | struct vmw_sw_context *sw_context, |
285 | struct vmw_sw_context *sw_context, |
242 | SVGA3dCmdHeader *header) |
286 | SVGA3dCmdHeader *header) |
243 | { |
287 | { |
244 | return 0; //capable(CAP_SYS_ADMIN) ? : -EINVAL; |
288 | return 0; //capable(CAP_SYS_ADMIN) ? : -EINVAL; |
245 | } |
289 | } |
246 | 290 | ||
247 | static int vmw_cmd_ok(struct vmw_private *dev_priv, |
291 | static int vmw_cmd_ok(struct vmw_private *dev_priv, |
248 | struct vmw_sw_context *sw_context, |
292 | struct vmw_sw_context *sw_context, |
249 | SVGA3dCmdHeader *header) |
293 | SVGA3dCmdHeader *header) |
250 | { |
294 | { |
251 | return 0; |
295 | return 0; |
252 | } |
296 | } |
253 | 297 | ||
254 | /** |
298 | /** |
255 | * vmw_bo_to_validate_list - add a bo to a validate list |
299 | * vmw_bo_to_validate_list - add a bo to a validate list |
256 | * |
300 | * |
257 | * @sw_context: The software context used for this command submission batch. |
301 | * @sw_context: The software context used for this command submission batch. |
258 | * @bo: The buffer object to add. |
302 | * @bo: The buffer object to add. |
259 | * @validate_as_mob: Validate this buffer as a MOB. |
303 | * @validate_as_mob: Validate this buffer as a MOB. |
260 | * @p_val_node: If non-NULL Will be updated with the validate node number |
304 | * @p_val_node: If non-NULL Will be updated with the validate node number |
261 | * on return. |
305 | * on return. |
262 | * |
306 | * |
263 | * Returns -EINVAL if the limit of number of buffer objects per command |
307 | * Returns -EINVAL if the limit of number of buffer objects per command |
264 | * submission is reached. |
308 | * submission is reached. |
265 | */ |
309 | */ |
266 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, |
310 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, |
267 | struct ttm_buffer_object *bo, |
311 | struct ttm_buffer_object *bo, |
268 | bool validate_as_mob, |
312 | bool validate_as_mob, |
269 | uint32_t *p_val_node) |
313 | uint32_t *p_val_node) |
270 | { |
314 | { |
271 | uint32_t val_node; |
315 | uint32_t val_node; |
272 | struct vmw_validate_buffer *vval_buf; |
316 | struct vmw_validate_buffer *vval_buf; |
273 | struct ttm_validate_buffer *val_buf; |
317 | struct ttm_validate_buffer *val_buf; |
274 | struct drm_hash_item *hash; |
318 | struct drm_hash_item *hash; |
275 | int ret; |
319 | int ret; |
276 | 320 | ||
277 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo, |
321 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo, |
278 | &hash) == 0)) { |
322 | &hash) == 0)) { |
279 | vval_buf = container_of(hash, struct vmw_validate_buffer, |
323 | vval_buf = container_of(hash, struct vmw_validate_buffer, |
280 | hash); |
324 | hash); |
281 | if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) { |
325 | if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) { |
282 | DRM_ERROR("Inconsistent buffer usage.\n"); |
326 | DRM_ERROR("Inconsistent buffer usage.\n"); |
283 | return -EINVAL; |
327 | return -EINVAL; |
284 | } |
328 | } |
285 | val_buf = &vval_buf->base; |
329 | val_buf = &vval_buf->base; |
286 | val_node = vval_buf - sw_context->val_bufs; |
330 | val_node = vval_buf - sw_context->val_bufs; |
287 | } else { |
331 | } else { |
288 | val_node = sw_context->cur_val_buf; |
332 | val_node = sw_context->cur_val_buf; |
289 | if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { |
333 | if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { |
290 | DRM_ERROR("Max number of DMA buffers per submission " |
334 | DRM_ERROR("Max number of DMA buffers per submission " |
291 | "exceeded.\n"); |
335 | "exceeded.\n"); |
292 | return -EINVAL; |
336 | return -EINVAL; |
293 | } |
337 | } |
294 | vval_buf = &sw_context->val_bufs[val_node]; |
338 | vval_buf = &sw_context->val_bufs[val_node]; |
295 | vval_buf->hash.key = (unsigned long) bo; |
339 | vval_buf->hash.key = (unsigned long) bo; |
296 | ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); |
340 | ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); |
297 | if (unlikely(ret != 0)) { |
341 | if (unlikely(ret != 0)) { |
298 | DRM_ERROR("Failed to initialize a buffer validation " |
342 | DRM_ERROR("Failed to initialize a buffer validation " |
299 | "entry.\n"); |
343 | "entry.\n"); |
300 | return ret; |
344 | return ret; |
301 | } |
345 | } |
302 | ++sw_context->cur_val_buf; |
346 | ++sw_context->cur_val_buf; |
303 | val_buf = &vval_buf->base; |
347 | val_buf = &vval_buf->base; |
304 | val_buf->bo = ttm_bo_reference(bo); |
348 | val_buf->bo = ttm_bo_reference(bo); |
305 | val_buf->reserved = false; |
349 | val_buf->reserved = false; |
306 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
350 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
307 | vval_buf->validate_as_mob = validate_as_mob; |
351 | vval_buf->validate_as_mob = validate_as_mob; |
308 | } |
352 | } |
309 | 353 | ||
310 | sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; |
354 | sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; |
311 | 355 | ||
312 | if (p_val_node) |
356 | if (p_val_node) |
313 | *p_val_node = val_node; |
357 | *p_val_node = val_node; |
314 | 358 | ||
315 | return 0; |
359 | return 0; |
316 | } |
360 | } |
317 | 361 | ||
318 | /** |
362 | /** |
319 | * vmw_resources_reserve - Reserve all resources on the sw_context's |
363 | * vmw_resources_reserve - Reserve all resources on the sw_context's |
320 | * resource list. |
364 | * resource list. |
321 | * |
365 | * |
322 | * @sw_context: Pointer to the software context. |
366 | * @sw_context: Pointer to the software context. |
323 | * |
367 | * |
324 | * Note that since vmware's command submission currently is protected by |
368 | * Note that since vmware's command submission currently is protected by |
325 | * the cmdbuf mutex, no fancy deadlock avoidance is required for resources, |
369 | * the cmdbuf mutex, no fancy deadlock avoidance is required for resources, |
326 | * since only a single thread at once will attempt this. |
370 | * since only a single thread at once will attempt this. |
327 | */ |
371 | */ |
328 | static int vmw_resources_reserve(struct vmw_sw_context *sw_context) |
372 | static int vmw_resources_reserve(struct vmw_sw_context *sw_context) |
329 | { |
373 | { |
330 | struct vmw_resource_val_node *val; |
374 | struct vmw_resource_val_node *val; |
331 | int ret; |
375 | int ret; |
332 | 376 | ||
333 | list_for_each_entry(val, &sw_context->resource_list, head) { |
377 | list_for_each_entry(val, &sw_context->resource_list, head) { |
334 | struct vmw_resource *res = val->res; |
378 | struct vmw_resource *res = val->res; |
335 | 379 | ||
336 | ret = vmw_resource_reserve(res, val->no_buffer_needed); |
380 | ret = vmw_resource_reserve(res, val->no_buffer_needed); |
337 | if (unlikely(ret != 0)) |
381 | if (unlikely(ret != 0)) |
338 | return ret; |
382 | return ret; |
339 | 383 | ||
340 | if (res->backup) { |
384 | if (res->backup) { |
341 | struct ttm_buffer_object *bo = &res->backup->base; |
385 | struct ttm_buffer_object *bo = &res->backup->base; |
342 | 386 | ||
343 | ret = vmw_bo_to_validate_list |
387 | ret = vmw_bo_to_validate_list |
344 | (sw_context, bo, |
388 | (sw_context, bo, |
345 | vmw_resource_needs_backup(res), NULL); |
389 | vmw_resource_needs_backup(res), NULL); |
346 | 390 | ||
347 | if (unlikely(ret != 0)) |
391 | if (unlikely(ret != 0)) |
348 | return ret; |
392 | return ret; |
349 | } |
393 | } |
350 | } |
394 | } |
351 | return 0; |
395 | return 0; |
352 | } |
396 | } |
353 | 397 | ||
354 | /** |
398 | /** |
355 | * vmw_resources_validate - Validate all resources on the sw_context's |
399 | * vmw_resources_validate - Validate all resources on the sw_context's |
356 | * resource list. |
400 | * resource list. |
357 | * |
401 | * |
358 | * @sw_context: Pointer to the software context. |
402 | * @sw_context: Pointer to the software context. |
359 | * |
403 | * |
360 | * Before this function is called, all resource backup buffers must have |
404 | * Before this function is called, all resource backup buffers must have |
361 | * been validated. |
405 | * been validated. |
362 | */ |
406 | */ |
363 | static int vmw_resources_validate(struct vmw_sw_context *sw_context) |
407 | static int vmw_resources_validate(struct vmw_sw_context *sw_context) |
364 | { |
408 | { |
365 | struct vmw_resource_val_node *val; |
409 | struct vmw_resource_val_node *val; |
366 | int ret; |
410 | int ret; |
367 | 411 | ||
368 | list_for_each_entry(val, &sw_context->resource_list, head) { |
412 | list_for_each_entry(val, &sw_context->resource_list, head) { |
369 | struct vmw_resource *res = val->res; |
413 | struct vmw_resource *res = val->res; |
370 | 414 | ||
371 | ret = vmw_resource_validate(res); |
415 | ret = vmw_resource_validate(res); |
372 | if (unlikely(ret != 0)) { |
416 | if (unlikely(ret != 0)) { |
373 | if (ret != -ERESTARTSYS) |
417 | if (ret != -ERESTARTSYS) |
374 | DRM_ERROR("Failed to validate resource.\n"); |
418 | DRM_ERROR("Failed to validate resource.\n"); |
375 | return ret; |
419 | return ret; |
376 | } |
420 | } |
377 | } |
421 | } |
378 | return 0; |
422 | return 0; |
379 | } |
423 | } |
- | 424 | ||
- | 425 | ||
- | 426 | /** |
|
- | 427 | * vmw_cmd_res_reloc_add - Add a resource to a software context's |
|
- | 428 | * relocation- and validation lists. |
|
- | 429 | * |
|
- | 430 | * @dev_priv: Pointer to a struct vmw_private identifying the device. |
|
- | 431 | * @sw_context: Pointer to the software context. |
|
- | 432 | * @res_type: Resource type. |
|
- | 433 | * @id_loc: Pointer to where the id that needs translation is located. |
|
- | 434 | * @res: Valid pointer to a struct vmw_resource. |
|
- | 435 | * @p_val: If non null, a pointer to the struct vmw_resource_validate_node |
|
- | 436 | * used for this resource is returned here. |
|
- | 437 | */ |
|
- | 438 | static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, |
|
- | 439 | struct vmw_sw_context *sw_context, |
|
- | 440 | enum vmw_res_type res_type, |
|
- | 441 | uint32_t *id_loc, |
|
- | 442 | struct vmw_resource *res, |
|
- | 443 | struct vmw_resource_val_node **p_val) |
|
- | 444 | { |
|
- | 445 | int ret; |
|
- | 446 | struct vmw_resource_val_node *node; |
|
- | 447 | ||
- | 448 | *p_val = NULL; |
|
- | 449 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, |
|
- | 450 | res, |
|
- | 451 | id_loc - sw_context->buf_start); |
|
- | 452 | if (unlikely(ret != 0)) |
|
- | 453 | goto out_err; |
|
- | 454 | ||
- | 455 | ret = vmw_resource_val_add(sw_context, res, &node); |
|
- | 456 | if (unlikely(ret != 0)) |
|
- | 457 | goto out_err; |
|
- | 458 | ||
- | 459 | if (res_type == vmw_res_context && dev_priv->has_mob && |
|
- | 460 | node->first_usage) { |
|
- | 461 | ||
- | 462 | /* |
|
- | 463 | * Put contexts first on the list to be able to exit |
|
- | 464 | * list traversal for contexts early. |
|
- | 465 | */ |
|
- | 466 | list_del(&node->head); |
|
- | 467 | list_add(&node->head, &sw_context->resource_list); |
|
- | 468 | ||
- | 469 | ret = vmw_resource_context_res_add(dev_priv, sw_context, res); |
|
- | 470 | if (unlikely(ret != 0)) |
|
- | 471 | goto out_err; |
|
- | 472 | node->staged_bindings = |
|
- | 473 | kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); |
|
- | 474 | if (node->staged_bindings == NULL) { |
|
- | 475 | DRM_ERROR("Failed to allocate context binding " |
|
- | 476 | "information.\n"); |
|
- | 477 | goto out_err; |
|
- | 478 | } |
|
- | 479 | INIT_LIST_HEAD(&node->staged_bindings->list); |
|
- | 480 | } |
|
- | 481 | ||
- | 482 | if (p_val) |
|
- | 483 | *p_val = node; |
|
- | 484 | ||
- | 485 | out_err: |
|
- | 486 | return ret; |
|
- | 487 | } |
|
- | 488 | ||
380 | 489 | ||
381 | /** |
490 | /** |
382 | * vmw_cmd_res_check - Check that a resource is present and if so, put it |
491 | * vmw_cmd_res_check - Check that a resource is present and if so, put it |
383 | * on the resource validate list unless it's already there. |
492 | * on the resource validate list unless it's already there. |
384 | * |
493 | * |
385 | * @dev_priv: Pointer to a device private structure. |
494 | * @dev_priv: Pointer to a device private structure. |
386 | * @sw_context: Pointer to the software context. |
495 | * @sw_context: Pointer to the software context. |
387 | * @res_type: Resource type. |
496 | * @res_type: Resource type. |
388 | * @converter: User-space visisble type specific information. |
497 | * @converter: User-space visisble type specific information. |
389 | * @id: Pointer to the location in the command buffer currently being |
498 | * @id_loc: Pointer to the location in the command buffer currently being |
390 | * parsed from where the user-space resource id handle is located. |
499 | * parsed from where the user-space resource id handle is located. |
- | 500 | * @p_val: Pointer to pointer to resource validalidation node. Populated |
|
- | 501 | * on exit. |
|
391 | */ |
502 | */ |
- | 503 | static int |
|
392 | static int vmw_cmd_res_check(struct vmw_private *dev_priv, |
504 | vmw_cmd_res_check(struct vmw_private *dev_priv, |
393 | struct vmw_sw_context *sw_context, |
505 | struct vmw_sw_context *sw_context, |
394 | enum vmw_res_type res_type, |
506 | enum vmw_res_type res_type, |
395 | const struct vmw_user_resource_conv *converter, |
507 | const struct vmw_user_resource_conv *converter, |
396 | uint32_t *id, |
508 | uint32_t *id_loc, |
397 | struct vmw_resource_val_node **p_val) |
509 | struct vmw_resource_val_node **p_val) |
398 | { |
510 | { |
399 | struct vmw_res_cache_entry *rcache = |
511 | struct vmw_res_cache_entry *rcache = |
400 | &sw_context->res_cache[res_type]; |
512 | &sw_context->res_cache[res_type]; |
401 | struct vmw_resource *res; |
513 | struct vmw_resource *res; |
402 | struct vmw_resource_val_node *node; |
514 | struct vmw_resource_val_node *node; |
403 | int ret; |
515 | int ret; |
404 | 516 | ||
405 | if (*id == SVGA3D_INVALID_ID) { |
517 | if (*id_loc == SVGA3D_INVALID_ID) { |
406 | if (p_val) |
518 | if (p_val) |
407 | *p_val = NULL; |
519 | *p_val = NULL; |
408 | if (res_type == vmw_res_context) { |
520 | if (res_type == vmw_res_context) { |
409 | DRM_ERROR("Illegal context invalid id.\n"); |
521 | DRM_ERROR("Illegal context invalid id.\n"); |
410 | return -EINVAL; |
522 | return -EINVAL; |
411 | } |
523 | } |
412 | return 0; |
524 | return 0; |
413 | } |
525 | } |
414 | 526 | ||
415 | /* |
527 | /* |
416 | * Fastpath in case of repeated commands referencing the same |
528 | * Fastpath in case of repeated commands referencing the same |
417 | * resource |
529 | * resource |
418 | */ |
530 | */ |
419 | 531 | ||
420 | if (likely(rcache->valid && *id == rcache->handle)) { |
532 | if (likely(rcache->valid && *id_loc == rcache->handle)) { |
421 | const struct vmw_resource *res = rcache->res; |
533 | const struct vmw_resource *res = rcache->res; |
422 | 534 | ||
423 | rcache->node->first_usage = false; |
535 | rcache->node->first_usage = false; |
424 | if (p_val) |
536 | if (p_val) |
425 | *p_val = rcache->node; |
537 | *p_val = rcache->node; |
426 | 538 | ||
427 | return vmw_resource_relocation_add |
539 | return vmw_resource_relocation_add |
428 | (&sw_context->res_relocations, res, |
540 | (&sw_context->res_relocations, res, |
429 | id - sw_context->buf_start); |
541 | id_loc - sw_context->buf_start); |
430 | } |
542 | } |
431 | 543 | ||
432 | ret = vmw_user_resource_lookup_handle(dev_priv, |
544 | ret = vmw_user_resource_lookup_handle(dev_priv, |
433 | sw_context->tfile, |
545 | sw_context->fp->tfile, |
434 | *id, |
546 | *id_loc, |
435 | converter, |
547 | converter, |
436 | &res); |
548 | &res); |
437 | if (unlikely(ret != 0)) { |
549 | if (unlikely(ret != 0)) { |
438 | DRM_ERROR("Could not find or use resource 0x%08x.\n", |
550 | DRM_ERROR("Could not find or use resource 0x%08x.\n", |
439 | (unsigned) *id); |
551 | (unsigned) *id_loc); |
440 | // dump_stack(); |
552 | .. dump_stack(); |
441 | return ret; |
553 | return ret; |
442 | } |
554 | } |
443 | 555 | ||
444 | rcache->valid = true; |
556 | rcache->valid = true; |
445 | rcache->res = res; |
557 | rcache->res = res; |
446 | rcache->handle = *id; |
558 | rcache->handle = *id_loc; |
447 | 559 | ||
448 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, |
560 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc, |
449 | res, |
- | |
450 | id - sw_context->buf_start); |
- | |
451 | if (unlikely(ret != 0)) |
- | |
452 | goto out_no_reloc; |
- | |
453 | - | ||
454 | ret = vmw_resource_val_add(sw_context, res, &node); |
561 | res, &node); |
455 | if (unlikely(ret != 0)) |
562 | if (unlikely(ret != 0)) |
456 | goto out_no_reloc; |
563 | goto out_no_reloc; |
457 | 564 | ||
458 | rcache->node = node; |
565 | rcache->node = node; |
459 | if (p_val) |
566 | if (p_val) |
460 | *p_val = node; |
567 | *p_val = node; |
461 | - | ||
462 | if (node->first_usage && res_type == vmw_res_context) { |
- | |
463 | node->staged_bindings = |
- | |
464 | kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); |
- | |
465 | if (node->staged_bindings == NULL) { |
- | |
466 | DRM_ERROR("Failed to allocate context binding " |
- | |
467 | "information.\n"); |
- | |
468 | goto out_no_reloc; |
- | |
469 | } |
- | |
470 | INIT_LIST_HEAD(&node->staged_bindings->list); |
- | |
471 | } |
- | |
472 | - | ||
473 | vmw_resource_unreference(&res); |
568 | vmw_resource_unreference(&res); |
474 | return 0; |
569 | return 0; |
475 | 570 | ||
476 | out_no_reloc: |
571 | out_no_reloc: |
477 | BUG_ON(sw_context->error_resource != NULL); |
572 | BUG_ON(sw_context->error_resource != NULL); |
478 | sw_context->error_resource = res; |
573 | sw_context->error_resource = res; |
479 | 574 | ||
480 | return ret; |
575 | return ret; |
481 | } |
576 | } |
482 | 577 | ||
483 | /** |
578 | /** |
- | 579 | * vmw_rebind_contexts - Rebind all resources previously bound to |
|
- | 580 | * referenced contexts. |
|
- | 581 | * |
|
- | 582 | * @sw_context: Pointer to the software context. |
|
- | 583 | * |
|
- | 584 | * Rebind context binding points that have been scrubbed because of eviction. |
|
- | 585 | */ |
|
- | 586 | static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) |
|
- | 587 | { |
|
- | 588 | struct vmw_resource_val_node *val; |
|
- | 589 | int ret; |
|
- | 590 | ||
- | 591 | list_for_each_entry(val, &sw_context->resource_list, head) { |
|
- | 592 | if (unlikely(!val->staged_bindings)) |
|
- | 593 | break; |
|
- | 594 | ||
- | 595 | ret = vmw_context_rebind_all(val->res); |
|
- | 596 | if (unlikely(ret != 0)) { |
|
- | 597 | if (ret != -ERESTARTSYS) |
|
- | 598 | DRM_ERROR("Failed to rebind context.\n"); |
|
- | 599 | return ret; |
|
- | 600 | } |
|
- | 601 | } |
|
- | 602 | ||
- | 603 | return 0; |
|
- | 604 | } |
|
- | 605 | ||
- | 606 | /** |
|
484 | * vmw_cmd_cid_check - Check a command header for valid context information. |
607 | * vmw_cmd_cid_check - Check a command header for valid context information. |
485 | * |
608 | * |
486 | * @dev_priv: Pointer to a device private structure. |
609 | * @dev_priv: Pointer to a device private structure. |
487 | * @sw_context: Pointer to the software context. |
610 | * @sw_context: Pointer to the software context. |
488 | * @header: A command header with an embedded user-space context handle. |
611 | * @header: A command header with an embedded user-space context handle. |
489 | * |
612 | * |
490 | * Convenience function: Call vmw_cmd_res_check with the user-space context |
613 | * Convenience function: Call vmw_cmd_res_check with the user-space context |
491 | * handle embedded in @header. |
614 | * handle embedded in @header. |
492 | */ |
615 | */ |
493 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, |
616 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, |
494 | struct vmw_sw_context *sw_context, |
617 | struct vmw_sw_context *sw_context, |
495 | SVGA3dCmdHeader *header) |
618 | SVGA3dCmdHeader *header) |
496 | { |
619 | { |
497 | struct vmw_cid_cmd { |
620 | struct vmw_cid_cmd { |
498 | SVGA3dCmdHeader header; |
621 | SVGA3dCmdHeader header; |
499 | __le32 cid; |
622 | uint32_t cid; |
500 | } *cmd; |
623 | } *cmd; |
501 | 624 | ||
502 | cmd = container_of(header, struct vmw_cid_cmd, header); |
625 | cmd = container_of(header, struct vmw_cid_cmd, header); |
503 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
626 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
504 | user_context_converter, &cmd->cid, NULL); |
627 | user_context_converter, &cmd->cid, NULL); |
505 | } |
628 | } |
506 | 629 | ||
507 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, |
630 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, |
508 | struct vmw_sw_context *sw_context, |
631 | struct vmw_sw_context *sw_context, |
509 | SVGA3dCmdHeader *header) |
632 | SVGA3dCmdHeader *header) |
510 | { |
633 | { |
511 | struct vmw_sid_cmd { |
634 | struct vmw_sid_cmd { |
512 | SVGA3dCmdHeader header; |
635 | SVGA3dCmdHeader header; |
513 | SVGA3dCmdSetRenderTarget body; |
636 | SVGA3dCmdSetRenderTarget body; |
514 | } *cmd; |
637 | } *cmd; |
515 | struct vmw_resource_val_node *ctx_node; |
638 | struct vmw_resource_val_node *ctx_node; |
516 | struct vmw_resource_val_node *res_node; |
639 | struct vmw_resource_val_node *res_node; |
517 | int ret; |
640 | int ret; |
518 | 641 | ||
519 | cmd = container_of(header, struct vmw_sid_cmd, header); |
642 | cmd = container_of(header, struct vmw_sid_cmd, header); |
520 | 643 | ||
521 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
644 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
522 | user_context_converter, &cmd->body.cid, |
645 | user_context_converter, &cmd->body.cid, |
523 | &ctx_node); |
646 | &ctx_node); |
524 | if (unlikely(ret != 0)) |
647 | if (unlikely(ret != 0)) |
525 | return ret; |
648 | return ret; |
526 | 649 | ||
527 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
650 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
528 | user_surface_converter, |
651 | user_surface_converter, |
529 | &cmd->body.target.sid, &res_node); |
652 | &cmd->body.target.sid, &res_node); |
530 | if (unlikely(ret != 0)) |
653 | if (unlikely(ret != 0)) |
531 | return ret; |
654 | return ret; |
532 | 655 | ||
533 | if (dev_priv->has_mob) { |
656 | if (dev_priv->has_mob) { |
534 | struct vmw_ctx_bindinfo bi; |
657 | struct vmw_ctx_bindinfo bi; |
535 | 658 | ||
536 | bi.ctx = ctx_node->res; |
659 | bi.ctx = ctx_node->res; |
537 | bi.res = res_node ? res_node->res : NULL; |
660 | bi.res = res_node ? res_node->res : NULL; |
538 | bi.bt = vmw_ctx_binding_rt; |
661 | bi.bt = vmw_ctx_binding_rt; |
539 | bi.i1.rt_type = cmd->body.type; |
662 | bi.i1.rt_type = cmd->body.type; |
540 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); |
663 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); |
541 | } |
664 | } |
542 | 665 | ||
543 | return 0; |
666 | return 0; |
544 | } |
667 | } |
545 | 668 | ||
546 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, |
669 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, |
547 | struct vmw_sw_context *sw_context, |
670 | struct vmw_sw_context *sw_context, |
548 | SVGA3dCmdHeader *header) |
671 | SVGA3dCmdHeader *header) |
549 | { |
672 | { |
550 | struct vmw_sid_cmd { |
673 | struct vmw_sid_cmd { |
551 | SVGA3dCmdHeader header; |
674 | SVGA3dCmdHeader header; |
552 | SVGA3dCmdSurfaceCopy body; |
675 | SVGA3dCmdSurfaceCopy body; |
553 | } *cmd; |
676 | } *cmd; |
554 | int ret; |
677 | int ret; |
555 | 678 | ||
556 | cmd = container_of(header, struct vmw_sid_cmd, header); |
679 | cmd = container_of(header, struct vmw_sid_cmd, header); |
557 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
680 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
558 | user_surface_converter, |
681 | user_surface_converter, |
559 | &cmd->body.src.sid, NULL); |
682 | &cmd->body.src.sid, NULL); |
560 | if (unlikely(ret != 0)) |
683 | if (unlikely(ret != 0)) |
561 | return ret; |
684 | return ret; |
562 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
685 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
563 | user_surface_converter, |
686 | user_surface_converter, |
564 | &cmd->body.dest.sid, NULL); |
687 | &cmd->body.dest.sid, NULL); |
565 | } |
688 | } |
566 | 689 | ||
567 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, |
690 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, |
568 | struct vmw_sw_context *sw_context, |
691 | struct vmw_sw_context *sw_context, |
569 | SVGA3dCmdHeader *header) |
692 | SVGA3dCmdHeader *header) |
570 | { |
693 | { |
571 | struct vmw_sid_cmd { |
694 | struct vmw_sid_cmd { |
572 | SVGA3dCmdHeader header; |
695 | SVGA3dCmdHeader header; |
573 | SVGA3dCmdSurfaceStretchBlt body; |
696 | SVGA3dCmdSurfaceStretchBlt body; |
574 | } *cmd; |
697 | } *cmd; |
575 | int ret; |
698 | int ret; |
576 | 699 | ||
577 | cmd = container_of(header, struct vmw_sid_cmd, header); |
700 | cmd = container_of(header, struct vmw_sid_cmd, header); |
578 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
701 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
579 | user_surface_converter, |
702 | user_surface_converter, |
580 | &cmd->body.src.sid, NULL); |
703 | &cmd->body.src.sid, NULL); |
581 | if (unlikely(ret != 0)) |
704 | if (unlikely(ret != 0)) |
582 | return ret; |
705 | return ret; |
583 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
706 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
584 | user_surface_converter, |
707 | user_surface_converter, |
585 | &cmd->body.dest.sid, NULL); |
708 | &cmd->body.dest.sid, NULL); |
586 | } |
709 | } |
587 | 710 | ||
588 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, |
711 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, |
589 | struct vmw_sw_context *sw_context, |
712 | struct vmw_sw_context *sw_context, |
590 | SVGA3dCmdHeader *header) |
713 | SVGA3dCmdHeader *header) |
591 | { |
714 | { |
592 | struct vmw_sid_cmd { |
715 | struct vmw_sid_cmd { |
593 | SVGA3dCmdHeader header; |
716 | SVGA3dCmdHeader header; |
594 | SVGA3dCmdBlitSurfaceToScreen body; |
717 | SVGA3dCmdBlitSurfaceToScreen body; |
595 | } *cmd; |
718 | } *cmd; |
596 | 719 | ||
597 | cmd = container_of(header, struct vmw_sid_cmd, header); |
720 | cmd = container_of(header, struct vmw_sid_cmd, header); |
598 | 721 | ||
599 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
722 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
600 | user_surface_converter, |
723 | user_surface_converter, |
601 | &cmd->body.srcImage.sid, NULL); |
724 | &cmd->body.srcImage.sid, NULL); |
602 | } |
725 | } |
603 | 726 | ||
604 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, |
727 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, |
605 | struct vmw_sw_context *sw_context, |
728 | struct vmw_sw_context *sw_context, |
606 | SVGA3dCmdHeader *header) |
729 | SVGA3dCmdHeader *header) |
607 | { |
730 | { |
608 | struct vmw_sid_cmd { |
731 | struct vmw_sid_cmd { |
609 | SVGA3dCmdHeader header; |
732 | SVGA3dCmdHeader header; |
610 | SVGA3dCmdPresent body; |
733 | SVGA3dCmdPresent body; |
611 | } *cmd; |
734 | } *cmd; |
612 | 735 | ||
613 | 736 | ||
614 | cmd = container_of(header, struct vmw_sid_cmd, header); |
737 | cmd = container_of(header, struct vmw_sid_cmd, header); |
615 | 738 | ||
616 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
739 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
617 | user_surface_converter, &cmd->body.sid, |
740 | user_surface_converter, &cmd->body.sid, |
618 | NULL); |
741 | NULL); |
619 | } |
742 | } |
620 | 743 | ||
621 | /** |
744 | /** |
622 | * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. |
745 | * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. |
623 | * |
746 | * |
624 | * @dev_priv: The device private structure. |
747 | * @dev_priv: The device private structure. |
625 | * @new_query_bo: The new buffer holding query results. |
748 | * @new_query_bo: The new buffer holding query results. |
626 | * @sw_context: The software context used for this command submission. |
749 | * @sw_context: The software context used for this command submission. |
627 | * |
750 | * |
628 | * This function checks whether @new_query_bo is suitable for holding |
751 | * This function checks whether @new_query_bo is suitable for holding |
629 | * query results, and if another buffer currently is pinned for query |
752 | * query results, and if another buffer currently is pinned for query |
630 | * results. If so, the function prepares the state of @sw_context for |
753 | * results. If so, the function prepares the state of @sw_context for |
631 | * switching pinned buffers after successful submission of the current |
754 | * switching pinned buffers after successful submission of the current |
632 | * command batch. |
755 | * command batch. |
633 | */ |
756 | */ |
634 | static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, |
757 | static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, |
635 | struct ttm_buffer_object *new_query_bo, |
758 | struct ttm_buffer_object *new_query_bo, |
636 | struct vmw_sw_context *sw_context) |
759 | struct vmw_sw_context *sw_context) |
637 | { |
760 | { |
638 | struct vmw_res_cache_entry *ctx_entry = |
761 | struct vmw_res_cache_entry *ctx_entry = |
639 | &sw_context->res_cache[vmw_res_context]; |
762 | &sw_context->res_cache[vmw_res_context]; |
640 | int ret; |
763 | int ret; |
641 | 764 | ||
642 | BUG_ON(!ctx_entry->valid); |
765 | BUG_ON(!ctx_entry->valid); |
643 | sw_context->last_query_ctx = ctx_entry->res; |
766 | sw_context->last_query_ctx = ctx_entry->res; |
644 | 767 | ||
645 | if (unlikely(new_query_bo != sw_context->cur_query_bo)) { |
768 | if (unlikely(new_query_bo != sw_context->cur_query_bo)) { |
646 | 769 | ||
647 | if (unlikely(new_query_bo->num_pages > 4)) { |
770 | if (unlikely(new_query_bo->num_pages > 4)) { |
648 | DRM_ERROR("Query buffer too large.\n"); |
771 | DRM_ERROR("Query buffer too large.\n"); |
649 | return -EINVAL; |
772 | return -EINVAL; |
650 | } |
773 | } |
651 | 774 | ||
652 | if (unlikely(sw_context->cur_query_bo != NULL)) { |
775 | if (unlikely(sw_context->cur_query_bo != NULL)) { |
653 | sw_context->needs_post_query_barrier = true; |
776 | sw_context->needs_post_query_barrier = true; |
654 | ret = vmw_bo_to_validate_list(sw_context, |
777 | ret = vmw_bo_to_validate_list(sw_context, |
655 | sw_context->cur_query_bo, |
778 | sw_context->cur_query_bo, |
656 | dev_priv->has_mob, NULL); |
779 | dev_priv->has_mob, NULL); |
657 | if (unlikely(ret != 0)) |
780 | if (unlikely(ret != 0)) |
658 | return ret; |
781 | return ret; |
659 | } |
782 | } |
660 | sw_context->cur_query_bo = new_query_bo; |
783 | sw_context->cur_query_bo = new_query_bo; |
661 | 784 | ||
662 | ret = vmw_bo_to_validate_list(sw_context, |
785 | ret = vmw_bo_to_validate_list(sw_context, |
663 | dev_priv->dummy_query_bo, |
786 | dev_priv->dummy_query_bo, |
664 | dev_priv->has_mob, NULL); |
787 | dev_priv->has_mob, NULL); |
665 | if (unlikely(ret != 0)) |
788 | if (unlikely(ret != 0)) |
666 | return ret; |
789 | return ret; |
667 | 790 | ||
668 | } |
791 | } |
669 | 792 | ||
670 | return 0; |
793 | return 0; |
671 | } |
794 | } |
672 | 795 | ||
673 | 796 | ||
674 | /** |
797 | /** |
675 | * vmw_query_bo_switch_commit - Finalize switching pinned query buffer |
798 | * vmw_query_bo_switch_commit - Finalize switching pinned query buffer |
676 | * |
799 | * |
677 | * @dev_priv: The device private structure. |
800 | * @dev_priv: The device private structure. |
678 | * @sw_context: The software context used for this command submission batch. |
801 | * @sw_context: The software context used for this command submission batch. |
679 | * |
802 | * |
680 | * This function will check if we're switching query buffers, and will then, |
803 | * This function will check if we're switching query buffers, and will then, |
681 | * issue a dummy occlusion query wait used as a query barrier. When the fence |
804 | * issue a dummy occlusion query wait used as a query barrier. When the fence |
682 | * object following that query wait has signaled, we are sure that all |
805 | * object following that query wait has signaled, we are sure that all |
683 | * preceding queries have finished, and the old query buffer can be unpinned. |
806 | * preceding queries have finished, and the old query buffer can be unpinned. |
684 | * However, since both the new query buffer and the old one are fenced with |
807 | * However, since both the new query buffer and the old one are fenced with |
685 | * that fence, we can do an asynchronus unpin now, and be sure that the |
808 | * that fence, we can do an asynchronus unpin now, and be sure that the |
686 | * old query buffer won't be moved until the fence has signaled. |
809 | * old query buffer won't be moved until the fence has signaled. |
687 | * |
810 | * |
688 | * As mentioned above, both the new - and old query buffers need to be fenced |
811 | * As mentioned above, both the new - and old query buffers need to be fenced |
689 | * using a sequence emitted *after* calling this function. |
812 | * using a sequence emitted *after* calling this function. |
690 | */ |
813 | */ |
691 | static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, |
814 | static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, |
692 | struct vmw_sw_context *sw_context) |
815 | struct vmw_sw_context *sw_context) |
693 | { |
816 | { |
694 | /* |
817 | /* |
695 | * The validate list should still hold references to all |
818 | * The validate list should still hold references to all |
696 | * contexts here. |
819 | * contexts here. |
697 | */ |
820 | */ |
698 | 821 | ||
699 | if (sw_context->needs_post_query_barrier) { |
822 | if (sw_context->needs_post_query_barrier) { |
700 | struct vmw_res_cache_entry *ctx_entry = |
823 | struct vmw_res_cache_entry *ctx_entry = |
701 | &sw_context->res_cache[vmw_res_context]; |
824 | &sw_context->res_cache[vmw_res_context]; |
702 | struct vmw_resource *ctx; |
825 | struct vmw_resource *ctx; |
703 | int ret; |
826 | int ret; |
704 | 827 | ||
705 | BUG_ON(!ctx_entry->valid); |
828 | BUG_ON(!ctx_entry->valid); |
706 | ctx = ctx_entry->res; |
829 | ctx = ctx_entry->res; |
707 | 830 | ||
708 | ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); |
831 | ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); |
709 | 832 | ||
710 | if (unlikely(ret != 0)) |
833 | if (unlikely(ret != 0)) |
711 | DRM_ERROR("Out of fifo space for dummy query.\n"); |
834 | DRM_ERROR("Out of fifo space for dummy query.\n"); |
712 | } |
835 | } |
713 | 836 | ||
714 | if (dev_priv->pinned_bo != sw_context->cur_query_bo) { |
837 | if (dev_priv->pinned_bo != sw_context->cur_query_bo) { |
715 | if (dev_priv->pinned_bo) { |
838 | if (dev_priv->pinned_bo) { |
716 | vmw_bo_pin(dev_priv->pinned_bo, false); |
839 | vmw_bo_pin(dev_priv->pinned_bo, false); |
717 | ttm_bo_unref(&dev_priv->pinned_bo); |
840 | ttm_bo_unref(&dev_priv->pinned_bo); |
718 | } |
841 | } |
719 | 842 | ||
720 | if (!sw_context->needs_post_query_barrier) { |
843 | if (!sw_context->needs_post_query_barrier) { |
721 | vmw_bo_pin(sw_context->cur_query_bo, true); |
844 | vmw_bo_pin(sw_context->cur_query_bo, true); |
722 | 845 | ||
723 | /* |
846 | /* |
724 | * We pin also the dummy_query_bo buffer so that we |
847 | * We pin also the dummy_query_bo buffer so that we |
725 | * don't need to validate it when emitting |
848 | * don't need to validate it when emitting |
726 | * dummy queries in context destroy paths. |
849 | * dummy queries in context destroy paths. |
727 | */ |
850 | */ |
728 | 851 | ||
729 | vmw_bo_pin(dev_priv->dummy_query_bo, true); |
852 | vmw_bo_pin(dev_priv->dummy_query_bo, true); |
730 | dev_priv->dummy_query_bo_pinned = true; |
853 | dev_priv->dummy_query_bo_pinned = true; |
731 | 854 | ||
732 | BUG_ON(sw_context->last_query_ctx == NULL); |
855 | BUG_ON(sw_context->last_query_ctx == NULL); |
733 | dev_priv->query_cid = sw_context->last_query_ctx->id; |
856 | dev_priv->query_cid = sw_context->last_query_ctx->id; |
734 | dev_priv->query_cid_valid = true; |
857 | dev_priv->query_cid_valid = true; |
735 | dev_priv->pinned_bo = |
858 | dev_priv->pinned_bo = |
736 | ttm_bo_reference(sw_context->cur_query_bo); |
859 | ttm_bo_reference(sw_context->cur_query_bo); |
737 | } |
860 | } |
738 | } |
861 | } |
739 | } |
862 | } |
740 | 863 | ||
741 | /** |
864 | /** |
742 | * vmw_translate_mob_pointer - Prepare to translate a user-space buffer |
865 | * vmw_translate_mob_pointer - Prepare to translate a user-space buffer |
743 | * handle to a MOB id. |
866 | * handle to a MOB id. |
744 | * |
867 | * |
745 | * @dev_priv: Pointer to a device private structure. |
868 | * @dev_priv: Pointer to a device private structure. |
746 | * @sw_context: The software context used for this command batch validation. |
869 | * @sw_context: The software context used for this command batch validation. |
747 | * @id: Pointer to the user-space handle to be translated. |
870 | * @id: Pointer to the user-space handle to be translated. |
748 | * @vmw_bo_p: Points to a location that, on successful return will carry |
871 | * @vmw_bo_p: Points to a location that, on successful return will carry |
749 | * a reference-counted pointer to the DMA buffer identified by the |
872 | * a reference-counted pointer to the DMA buffer identified by the |
750 | * user-space handle in @id. |
873 | * user-space handle in @id. |
751 | * |
874 | * |
752 | * This function saves information needed to translate a user-space buffer |
875 | * This function saves information needed to translate a user-space buffer |
753 | * handle to a MOB id. The translation does not take place immediately, but |
876 | * handle to a MOB id. The translation does not take place immediately, but |
754 | * during a call to vmw_apply_relocations(). This function builds a relocation |
877 | * during a call to vmw_apply_relocations(). This function builds a relocation |
755 | * list and a list of buffers to validate. The former needs to be freed using |
878 | * list and a list of buffers to validate. The former needs to be freed using |
756 | * either vmw_apply_relocations() or vmw_free_relocations(). The latter |
879 | * either vmw_apply_relocations() or vmw_free_relocations(). The latter |
757 | * needs to be freed using vmw_clear_validations. |
880 | * needs to be freed using vmw_clear_validations. |
758 | */ |
881 | */ |
759 | static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, |
882 | static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, |
760 | struct vmw_sw_context *sw_context, |
883 | struct vmw_sw_context *sw_context, |
761 | SVGAMobId *id, |
884 | SVGAMobId *id, |
762 | struct vmw_dma_buffer **vmw_bo_p) |
885 | struct vmw_dma_buffer **vmw_bo_p) |
763 | { |
886 | { |
764 | struct vmw_dma_buffer *vmw_bo = NULL; |
887 | struct vmw_dma_buffer *vmw_bo = NULL; |
765 | struct ttm_buffer_object *bo; |
888 | struct ttm_buffer_object *bo; |
766 | uint32_t handle = *id; |
889 | uint32_t handle = *id; |
767 | struct vmw_relocation *reloc; |
890 | struct vmw_relocation *reloc; |
768 | int ret; |
891 | int ret; |
769 | 892 | ||
770 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); |
893 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
771 | if (unlikely(ret != 0)) { |
894 | if (unlikely(ret != 0)) { |
772 | DRM_ERROR("Could not find or use MOB buffer.\n"); |
895 | DRM_ERROR("Could not find or use MOB buffer.\n"); |
773 | return -EINVAL; |
896 | return -EINVAL; |
774 | } |
897 | } |
775 | bo = &vmw_bo->base; |
898 | bo = &vmw_bo->base; |
776 | 899 | ||
777 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
900 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
778 | DRM_ERROR("Max number relocations per submission" |
901 | DRM_ERROR("Max number relocations per submission" |
779 | " exceeded\n"); |
902 | " exceeded\n"); |
780 | ret = -EINVAL; |
903 | ret = -EINVAL; |
781 | goto out_no_reloc; |
904 | goto out_no_reloc; |
782 | } |
905 | } |
783 | 906 | ||
784 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
907 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
785 | reloc->mob_loc = id; |
908 | reloc->mob_loc = id; |
786 | reloc->location = NULL; |
909 | reloc->location = NULL; |
787 | 910 | ||
788 | ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); |
911 | ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); |
789 | if (unlikely(ret != 0)) |
912 | if (unlikely(ret != 0)) |
790 | goto out_no_reloc; |
913 | goto out_no_reloc; |
791 | 914 | ||
792 | *vmw_bo_p = vmw_bo; |
915 | *vmw_bo_p = vmw_bo; |
793 | return 0; |
916 | return 0; |
794 | 917 | ||
795 | out_no_reloc: |
918 | out_no_reloc: |
796 | vmw_dmabuf_unreference(&vmw_bo); |
919 | vmw_dmabuf_unreference(&vmw_bo); |
797 | vmw_bo_p = NULL; |
920 | vmw_bo_p = NULL; |
798 | return ret; |
921 | return ret; |
799 | } |
922 | } |
800 | 923 | ||
801 | /** |
924 | /** |
802 | * vmw_translate_guest_pointer - Prepare to translate a user-space buffer |
925 | * vmw_translate_guest_pointer - Prepare to translate a user-space buffer |
803 | * handle to a valid SVGAGuestPtr |
926 | * handle to a valid SVGAGuestPtr |
804 | * |
927 | * |
805 | * @dev_priv: Pointer to a device private structure. |
928 | * @dev_priv: Pointer to a device private structure. |
806 | * @sw_context: The software context used for this command batch validation. |
929 | * @sw_context: The software context used for this command batch validation. |
807 | * @ptr: Pointer to the user-space handle to be translated. |
930 | * @ptr: Pointer to the user-space handle to be translated. |
808 | * @vmw_bo_p: Points to a location that, on successful return will carry |
931 | * @vmw_bo_p: Points to a location that, on successful return will carry |
809 | * a reference-counted pointer to the DMA buffer identified by the |
932 | * a reference-counted pointer to the DMA buffer identified by the |
810 | * user-space handle in @id. |
933 | * user-space handle in @id. |
811 | * |
934 | * |
812 | * This function saves information needed to translate a user-space buffer |
935 | * This function saves information needed to translate a user-space buffer |
813 | * handle to a valid SVGAGuestPtr. The translation does not take place |
936 | * handle to a valid SVGAGuestPtr. The translation does not take place |
814 | * immediately, but during a call to vmw_apply_relocations(). |
937 | * immediately, but during a call to vmw_apply_relocations(). |
815 | * This function builds a relocation list and a list of buffers to validate. |
938 | * This function builds a relocation list and a list of buffers to validate. |
816 | * The former needs to be freed using either vmw_apply_relocations() or |
939 | * The former needs to be freed using either vmw_apply_relocations() or |
817 | * vmw_free_relocations(). The latter needs to be freed using |
940 | * vmw_free_relocations(). The latter needs to be freed using |
818 | * vmw_clear_validations. |
941 | * vmw_clear_validations. |
819 | */ |
942 | */ |
820 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
943 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
821 | struct vmw_sw_context *sw_context, |
944 | struct vmw_sw_context *sw_context, |
822 | SVGAGuestPtr *ptr, |
945 | SVGAGuestPtr *ptr, |
823 | struct vmw_dma_buffer **vmw_bo_p) |
946 | struct vmw_dma_buffer **vmw_bo_p) |
824 | { |
947 | { |
825 | struct vmw_dma_buffer *vmw_bo = NULL; |
948 | struct vmw_dma_buffer *vmw_bo = NULL; |
826 | struct ttm_buffer_object *bo; |
949 | struct ttm_buffer_object *bo; |
827 | uint32_t handle = ptr->gmrId; |
950 | uint32_t handle = ptr->gmrId; |
828 | struct vmw_relocation *reloc; |
951 | struct vmw_relocation *reloc; |
829 | int ret; |
952 | int ret; |
830 | 953 | ||
831 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); |
954 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
832 | if (unlikely(ret != 0)) { |
955 | if (unlikely(ret != 0)) { |
833 | DRM_ERROR("Could not find or use GMR region.\n"); |
956 | DRM_ERROR("Could not find or use GMR region.\n"); |
834 | return -EINVAL; |
957 | return -EINVAL; |
835 | } |
958 | } |
836 | bo = &vmw_bo->base; |
959 | bo = &vmw_bo->base; |
837 | 960 | ||
838 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
961 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
839 | DRM_ERROR("Max number relocations per submission" |
962 | DRM_ERROR("Max number relocations per submission" |
840 | " exceeded\n"); |
963 | " exceeded\n"); |
841 | ret = -EINVAL; |
964 | ret = -EINVAL; |
842 | goto out_no_reloc; |
965 | goto out_no_reloc; |
843 | } |
966 | } |
844 | 967 | ||
845 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
968 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
846 | reloc->location = ptr; |
969 | reloc->location = ptr; |
847 | 970 | ||
848 | ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); |
971 | ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); |
849 | if (unlikely(ret != 0)) |
972 | if (unlikely(ret != 0)) |
850 | goto out_no_reloc; |
973 | goto out_no_reloc; |
851 | 974 | ||
852 | *vmw_bo_p = vmw_bo; |
975 | *vmw_bo_p = vmw_bo; |
853 | return 0; |
976 | return 0; |
854 | 977 | ||
855 | out_no_reloc: |
978 | out_no_reloc: |
856 | vmw_dmabuf_unreference(&vmw_bo); |
979 | vmw_dmabuf_unreference(&vmw_bo); |
857 | vmw_bo_p = NULL; |
980 | vmw_bo_p = NULL; |
858 | return ret; |
981 | return ret; |
859 | } |
982 | } |
860 | 983 | ||
861 | /** |
984 | /** |
862 | * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. |
985 | * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. |
863 | * |
986 | * |
864 | * @dev_priv: Pointer to a device private struct. |
987 | * @dev_priv: Pointer to a device private struct. |
865 | * @sw_context: The software context used for this command submission. |
988 | * @sw_context: The software context used for this command submission. |
866 | * @header: Pointer to the command header in the command stream. |
989 | * @header: Pointer to the command header in the command stream. |
867 | */ |
990 | */ |
868 | static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, |
991 | static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, |
869 | struct vmw_sw_context *sw_context, |
992 | struct vmw_sw_context *sw_context, |
870 | SVGA3dCmdHeader *header) |
993 | SVGA3dCmdHeader *header) |
871 | { |
994 | { |
872 | struct vmw_begin_gb_query_cmd { |
995 | struct vmw_begin_gb_query_cmd { |
873 | SVGA3dCmdHeader header; |
996 | SVGA3dCmdHeader header; |
874 | SVGA3dCmdBeginGBQuery q; |
997 | SVGA3dCmdBeginGBQuery q; |
875 | } *cmd; |
998 | } *cmd; |
876 | 999 | ||
877 | cmd = container_of(header, struct vmw_begin_gb_query_cmd, |
1000 | cmd = container_of(header, struct vmw_begin_gb_query_cmd, |
878 | header); |
1001 | header); |
879 | 1002 | ||
880 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1003 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
881 | user_context_converter, &cmd->q.cid, |
1004 | user_context_converter, &cmd->q.cid, |
882 | NULL); |
1005 | NULL); |
883 | } |
1006 | } |
884 | 1007 | ||
885 | /** |
1008 | /** |
886 | * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. |
1009 | * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. |
887 | * |
1010 | * |
888 | * @dev_priv: Pointer to a device private struct. |
1011 | * @dev_priv: Pointer to a device private struct. |
889 | * @sw_context: The software context used for this command submission. |
1012 | * @sw_context: The software context used for this command submission. |
890 | * @header: Pointer to the command header in the command stream. |
1013 | * @header: Pointer to the command header in the command stream. |
891 | */ |
1014 | */ |
892 | static int vmw_cmd_begin_query(struct vmw_private *dev_priv, |
1015 | static int vmw_cmd_begin_query(struct vmw_private *dev_priv, |
893 | struct vmw_sw_context *sw_context, |
1016 | struct vmw_sw_context *sw_context, |
894 | SVGA3dCmdHeader *header) |
1017 | SVGA3dCmdHeader *header) |
895 | { |
1018 | { |
896 | struct vmw_begin_query_cmd { |
1019 | struct vmw_begin_query_cmd { |
897 | SVGA3dCmdHeader header; |
1020 | SVGA3dCmdHeader header; |
898 | SVGA3dCmdBeginQuery q; |
1021 | SVGA3dCmdBeginQuery q; |
899 | } *cmd; |
1022 | } *cmd; |
900 | 1023 | ||
901 | cmd = container_of(header, struct vmw_begin_query_cmd, |
1024 | cmd = container_of(header, struct vmw_begin_query_cmd, |
902 | header); |
1025 | header); |
903 | 1026 | ||
904 | if (unlikely(dev_priv->has_mob)) { |
1027 | if (unlikely(dev_priv->has_mob)) { |
905 | struct { |
1028 | struct { |
906 | SVGA3dCmdHeader header; |
1029 | SVGA3dCmdHeader header; |
907 | SVGA3dCmdBeginGBQuery q; |
1030 | SVGA3dCmdBeginGBQuery q; |
908 | } gb_cmd; |
1031 | } gb_cmd; |
909 | 1032 | ||
910 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
1033 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
911 | 1034 | ||
912 | gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; |
1035 | gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; |
913 | gb_cmd.header.size = cmd->header.size; |
1036 | gb_cmd.header.size = cmd->header.size; |
914 | gb_cmd.q.cid = cmd->q.cid; |
1037 | gb_cmd.q.cid = cmd->q.cid; |
915 | gb_cmd.q.type = cmd->q.type; |
1038 | gb_cmd.q.type = cmd->q.type; |
916 | 1039 | ||
917 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
1040 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
918 | return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); |
1041 | return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); |
919 | } |
1042 | } |
920 | 1043 | ||
921 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1044 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
922 | user_context_converter, &cmd->q.cid, |
1045 | user_context_converter, &cmd->q.cid, |
923 | NULL); |
1046 | NULL); |
924 | } |
1047 | } |
925 | 1048 | ||
926 | /** |
1049 | /** |
927 | * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command. |
1050 | * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command. |
928 | * |
1051 | * |
929 | * @dev_priv: Pointer to a device private struct. |
1052 | * @dev_priv: Pointer to a device private struct. |
930 | * @sw_context: The software context used for this command submission. |
1053 | * @sw_context: The software context used for this command submission. |
931 | * @header: Pointer to the command header in the command stream. |
1054 | * @header: Pointer to the command header in the command stream. |
932 | */ |
1055 | */ |
933 | static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, |
1056 | static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, |
934 | struct vmw_sw_context *sw_context, |
1057 | struct vmw_sw_context *sw_context, |
935 | SVGA3dCmdHeader *header) |
1058 | SVGA3dCmdHeader *header) |
936 | { |
1059 | { |
937 | struct vmw_dma_buffer *vmw_bo; |
1060 | struct vmw_dma_buffer *vmw_bo; |
938 | struct vmw_query_cmd { |
1061 | struct vmw_query_cmd { |
939 | SVGA3dCmdHeader header; |
1062 | SVGA3dCmdHeader header; |
940 | SVGA3dCmdEndGBQuery q; |
1063 | SVGA3dCmdEndGBQuery q; |
941 | } *cmd; |
1064 | } *cmd; |
942 | int ret; |
1065 | int ret; |
943 | 1066 | ||
944 | cmd = container_of(header, struct vmw_query_cmd, header); |
1067 | cmd = container_of(header, struct vmw_query_cmd, header); |
945 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1068 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
946 | if (unlikely(ret != 0)) |
1069 | if (unlikely(ret != 0)) |
947 | return ret; |
1070 | return ret; |
948 | 1071 | ||
949 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, |
1072 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, |
950 | &cmd->q.mobid, |
1073 | &cmd->q.mobid, |
951 | &vmw_bo); |
1074 | &vmw_bo); |
952 | if (unlikely(ret != 0)) |
1075 | if (unlikely(ret != 0)) |
953 | return ret; |
1076 | return ret; |
954 | 1077 | ||
955 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); |
1078 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); |
956 | 1079 | ||
957 | vmw_dmabuf_unreference(&vmw_bo); |
1080 | vmw_dmabuf_unreference(&vmw_bo); |
958 | return ret; |
1081 | return ret; |
959 | } |
1082 | } |
960 | 1083 | ||
961 | /** |
1084 | /** |
962 | * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. |
1085 | * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. |
963 | * |
1086 | * |
964 | * @dev_priv: Pointer to a device private struct. |
1087 | * @dev_priv: Pointer to a device private struct. |
965 | * @sw_context: The software context used for this command submission. |
1088 | * @sw_context: The software context used for this command submission. |
966 | * @header: Pointer to the command header in the command stream. |
1089 | * @header: Pointer to the command header in the command stream. |
967 | */ |
1090 | */ |
968 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, |
1091 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, |
969 | struct vmw_sw_context *sw_context, |
1092 | struct vmw_sw_context *sw_context, |
970 | SVGA3dCmdHeader *header) |
1093 | SVGA3dCmdHeader *header) |
971 | { |
1094 | { |
972 | struct vmw_dma_buffer *vmw_bo; |
1095 | struct vmw_dma_buffer *vmw_bo; |
973 | struct vmw_query_cmd { |
1096 | struct vmw_query_cmd { |
974 | SVGA3dCmdHeader header; |
1097 | SVGA3dCmdHeader header; |
975 | SVGA3dCmdEndQuery q; |
1098 | SVGA3dCmdEndQuery q; |
976 | } *cmd; |
1099 | } *cmd; |
977 | int ret; |
1100 | int ret; |
978 | 1101 | ||
979 | cmd = container_of(header, struct vmw_query_cmd, header); |
1102 | cmd = container_of(header, struct vmw_query_cmd, header); |
980 | if (dev_priv->has_mob) { |
1103 | if (dev_priv->has_mob) { |
981 | struct { |
1104 | struct { |
982 | SVGA3dCmdHeader header; |
1105 | SVGA3dCmdHeader header; |
983 | SVGA3dCmdEndGBQuery q; |
1106 | SVGA3dCmdEndGBQuery q; |
984 | } gb_cmd; |
1107 | } gb_cmd; |
985 | 1108 | ||
986 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
1109 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
987 | 1110 | ||
988 | gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; |
1111 | gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; |
989 | gb_cmd.header.size = cmd->header.size; |
1112 | gb_cmd.header.size = cmd->header.size; |
990 | gb_cmd.q.cid = cmd->q.cid; |
1113 | gb_cmd.q.cid = cmd->q.cid; |
991 | gb_cmd.q.type = cmd->q.type; |
1114 | gb_cmd.q.type = cmd->q.type; |
992 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; |
1115 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; |
993 | gb_cmd.q.offset = cmd->q.guestResult.offset; |
1116 | gb_cmd.q.offset = cmd->q.guestResult.offset; |
994 | 1117 | ||
995 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
1118 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
996 | return vmw_cmd_end_gb_query(dev_priv, sw_context, header); |
1119 | return vmw_cmd_end_gb_query(dev_priv, sw_context, header); |
997 | } |
1120 | } |
998 | 1121 | ||
999 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1122 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1000 | if (unlikely(ret != 0)) |
1123 | if (unlikely(ret != 0)) |
1001 | return ret; |
1124 | return ret; |
1002 | 1125 | ||
1003 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1126 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1004 | &cmd->q.guestResult, |
1127 | &cmd->q.guestResult, |
1005 | &vmw_bo); |
1128 | &vmw_bo); |
1006 | if (unlikely(ret != 0)) |
1129 | if (unlikely(ret != 0)) |
1007 | return ret; |
1130 | return ret; |
1008 | 1131 | ||
1009 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); |
1132 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); |
1010 | 1133 | ||
1011 | vmw_dmabuf_unreference(&vmw_bo); |
1134 | vmw_dmabuf_unreference(&vmw_bo); |
1012 | return ret; |
1135 | return ret; |
1013 | } |
1136 | } |
1014 | 1137 | ||
1015 | /** |
1138 | /** |
1016 | * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command. |
1139 | * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command. |
1017 | * |
1140 | * |
1018 | * @dev_priv: Pointer to a device private struct. |
1141 | * @dev_priv: Pointer to a device private struct. |
1019 | * @sw_context: The software context used for this command submission. |
1142 | * @sw_context: The software context used for this command submission. |
1020 | * @header: Pointer to the command header in the command stream. |
1143 | * @header: Pointer to the command header in the command stream. |
1021 | */ |
1144 | */ |
1022 | static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, |
1145 | static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, |
1023 | struct vmw_sw_context *sw_context, |
1146 | struct vmw_sw_context *sw_context, |
1024 | SVGA3dCmdHeader *header) |
1147 | SVGA3dCmdHeader *header) |
1025 | { |
1148 | { |
1026 | struct vmw_dma_buffer *vmw_bo; |
1149 | struct vmw_dma_buffer *vmw_bo; |
1027 | struct vmw_query_cmd { |
1150 | struct vmw_query_cmd { |
1028 | SVGA3dCmdHeader header; |
1151 | SVGA3dCmdHeader header; |
1029 | SVGA3dCmdWaitForGBQuery q; |
1152 | SVGA3dCmdWaitForGBQuery q; |
1030 | } *cmd; |
1153 | } *cmd; |
1031 | int ret; |
1154 | int ret; |
1032 | 1155 | ||
1033 | cmd = container_of(header, struct vmw_query_cmd, header); |
1156 | cmd = container_of(header, struct vmw_query_cmd, header); |
1034 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1157 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1035 | if (unlikely(ret != 0)) |
1158 | if (unlikely(ret != 0)) |
1036 | return ret; |
1159 | return ret; |
1037 | 1160 | ||
1038 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, |
1161 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, |
1039 | &cmd->q.mobid, |
1162 | &cmd->q.mobid, |
1040 | &vmw_bo); |
1163 | &vmw_bo); |
1041 | if (unlikely(ret != 0)) |
1164 | if (unlikely(ret != 0)) |
1042 | return ret; |
1165 | return ret; |
1043 | 1166 | ||
1044 | vmw_dmabuf_unreference(&vmw_bo); |
1167 | vmw_dmabuf_unreference(&vmw_bo); |
1045 | return 0; |
1168 | return 0; |
1046 | } |
1169 | } |
1047 | 1170 | ||
1048 | /** |
1171 | /** |
1049 | * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. |
1172 | * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. |
1050 | * |
1173 | * |
1051 | * @dev_priv: Pointer to a device private struct. |
1174 | * @dev_priv: Pointer to a device private struct. |
1052 | * @sw_context: The software context used for this command submission. |
1175 | * @sw_context: The software context used for this command submission. |
1053 | * @header: Pointer to the command header in the command stream. |
1176 | * @header: Pointer to the command header in the command stream. |
1054 | */ |
1177 | */ |
1055 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, |
1178 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, |
1056 | struct vmw_sw_context *sw_context, |
1179 | struct vmw_sw_context *sw_context, |
1057 | SVGA3dCmdHeader *header) |
1180 | SVGA3dCmdHeader *header) |
1058 | { |
1181 | { |
1059 | struct vmw_dma_buffer *vmw_bo; |
1182 | struct vmw_dma_buffer *vmw_bo; |
1060 | struct vmw_query_cmd { |
1183 | struct vmw_query_cmd { |
1061 | SVGA3dCmdHeader header; |
1184 | SVGA3dCmdHeader header; |
1062 | SVGA3dCmdWaitForQuery q; |
1185 | SVGA3dCmdWaitForQuery q; |
1063 | } *cmd; |
1186 | } *cmd; |
1064 | int ret; |
1187 | int ret; |
1065 | 1188 | ||
1066 | cmd = container_of(header, struct vmw_query_cmd, header); |
1189 | cmd = container_of(header, struct vmw_query_cmd, header); |
1067 | if (dev_priv->has_mob) { |
1190 | if (dev_priv->has_mob) { |
1068 | struct { |
1191 | struct { |
1069 | SVGA3dCmdHeader header; |
1192 | SVGA3dCmdHeader header; |
1070 | SVGA3dCmdWaitForGBQuery q; |
1193 | SVGA3dCmdWaitForGBQuery q; |
1071 | } gb_cmd; |
1194 | } gb_cmd; |
1072 | 1195 | ||
1073 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
1196 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); |
1074 | 1197 | ||
1075 | gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; |
1198 | gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; |
1076 | gb_cmd.header.size = cmd->header.size; |
1199 | gb_cmd.header.size = cmd->header.size; |
1077 | gb_cmd.q.cid = cmd->q.cid; |
1200 | gb_cmd.q.cid = cmd->q.cid; |
1078 | gb_cmd.q.type = cmd->q.type; |
1201 | gb_cmd.q.type = cmd->q.type; |
1079 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; |
1202 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; |
1080 | gb_cmd.q.offset = cmd->q.guestResult.offset; |
1203 | gb_cmd.q.offset = cmd->q.guestResult.offset; |
1081 | 1204 | ||
1082 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
1205 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); |
1083 | return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); |
1206 | return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); |
1084 | } |
1207 | } |
1085 | 1208 | ||
1086 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1209 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1087 | if (unlikely(ret != 0)) |
1210 | if (unlikely(ret != 0)) |
1088 | return ret; |
1211 | return ret; |
1089 | 1212 | ||
1090 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1213 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1091 | &cmd->q.guestResult, |
1214 | &cmd->q.guestResult, |
1092 | &vmw_bo); |
1215 | &vmw_bo); |
1093 | if (unlikely(ret != 0)) |
1216 | if (unlikely(ret != 0)) |
1094 | return ret; |
1217 | return ret; |
1095 | 1218 | ||
1096 | vmw_dmabuf_unreference(&vmw_bo); |
1219 | vmw_dmabuf_unreference(&vmw_bo); |
1097 | return 0; |
1220 | return 0; |
1098 | } |
1221 | } |
1099 | 1222 | ||
1100 | static int vmw_cmd_dma(struct vmw_private *dev_priv, |
1223 | static int vmw_cmd_dma(struct vmw_private *dev_priv, |
1101 | struct vmw_sw_context *sw_context, |
1224 | struct vmw_sw_context *sw_context, |
1102 | SVGA3dCmdHeader *header) |
1225 | SVGA3dCmdHeader *header) |
1103 | { |
1226 | { |
1104 | struct vmw_dma_buffer *vmw_bo = NULL; |
1227 | struct vmw_dma_buffer *vmw_bo = NULL; |
1105 | struct vmw_surface *srf = NULL; |
1228 | struct vmw_surface *srf = NULL; |
1106 | struct vmw_dma_cmd { |
1229 | struct vmw_dma_cmd { |
1107 | SVGA3dCmdHeader header; |
1230 | SVGA3dCmdHeader header; |
1108 | SVGA3dCmdSurfaceDMA dma; |
1231 | SVGA3dCmdSurfaceDMA dma; |
1109 | } *cmd; |
1232 | } *cmd; |
1110 | int ret; |
1233 | int ret; |
- | 1234 | SVGA3dCmdSurfaceDMASuffix *suffix; |
|
- | 1235 | uint32_t bo_size; |
|
1111 | 1236 | ||
- | 1237 | cmd = container_of(header, struct vmw_dma_cmd, header); |
|
- | 1238 | suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma + |
|
- | 1239 | header->size - sizeof(*suffix)); |
|
- | 1240 | ||
- | 1241 | /* Make sure device and verifier stays in sync. */ |
|
- | 1242 | if (unlikely(suffix->suffixSize != sizeof(*suffix))) { |
|
- | 1243 | DRM_ERROR("Invalid DMA suffix size.\n"); |
|
- | 1244 | return -EINVAL; |
|
- | 1245 | } |
|
1112 | cmd = container_of(header, struct vmw_dma_cmd, header); |
1246 | |
1113 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1247 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1114 | &cmd->dma.guest.ptr, |
1248 | &cmd->dma.guest.ptr, |
1115 | &vmw_bo); |
1249 | &vmw_bo); |
1116 | if (unlikely(ret != 0)) |
1250 | if (unlikely(ret != 0)) |
1117 | return ret; |
1251 | return ret; |
- | 1252 | ||
- | 1253 | /* Make sure DMA doesn't cross BO boundaries. */ |
|
- | 1254 | bo_size = vmw_bo->base.num_pages * PAGE_SIZE; |
|
- | 1255 | if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) { |
|
- | 1256 | DRM_ERROR("Invalid DMA offset.\n"); |
|
- | 1257 | return -EINVAL; |
|
- | 1258 | } |
|
- | 1259 | ||
- | 1260 | bo_size -= cmd->dma.guest.ptr.offset; |
|
- | 1261 | if (unlikely(suffix->maximumOffset > bo_size)) |
|
- | 1262 | suffix->maximumOffset = bo_size; |
|
1118 | 1263 | ||
1119 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1264 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1120 | user_surface_converter, &cmd->dma.host.sid, |
1265 | user_surface_converter, &cmd->dma.host.sid, |
1121 | NULL); |
1266 | NULL); |
1122 | if (unlikely(ret != 0)) { |
1267 | if (unlikely(ret != 0)) { |
1123 | if (unlikely(ret != -ERESTARTSYS)) |
1268 | if (unlikely(ret != -ERESTARTSYS)) |
1124 | DRM_ERROR("could not find surface for DMA.\n"); |
1269 | DRM_ERROR("could not find surface for DMA.\n"); |
1125 | goto out_no_surface; |
1270 | goto out_no_surface; |
1126 | } |
1271 | } |
1127 | 1272 | ||
1128 | srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); |
1273 | srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); |
1129 | 1274 | ||
1130 | // vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); |
1275 | // vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); |
1131 | 1276 | ||
1132 | out_no_surface: |
1277 | out_no_surface: |
1133 | vmw_dmabuf_unreference(&vmw_bo); |
1278 | vmw_dmabuf_unreference(&vmw_bo); |
1134 | return ret; |
1279 | return ret; |
1135 | } |
1280 | } |
1136 | 1281 | ||
1137 | static int vmw_cmd_draw(struct vmw_private *dev_priv, |
1282 | static int vmw_cmd_draw(struct vmw_private *dev_priv, |
1138 | struct vmw_sw_context *sw_context, |
1283 | struct vmw_sw_context *sw_context, |
1139 | SVGA3dCmdHeader *header) |
1284 | SVGA3dCmdHeader *header) |
1140 | { |
1285 | { |
1141 | struct vmw_draw_cmd { |
1286 | struct vmw_draw_cmd { |
1142 | SVGA3dCmdHeader header; |
1287 | SVGA3dCmdHeader header; |
1143 | SVGA3dCmdDrawPrimitives body; |
1288 | SVGA3dCmdDrawPrimitives body; |
1144 | } *cmd; |
1289 | } *cmd; |
1145 | SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( |
1290 | SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( |
1146 | (unsigned long)header + sizeof(*cmd)); |
1291 | (unsigned long)header + sizeof(*cmd)); |
1147 | SVGA3dPrimitiveRange *range; |
1292 | SVGA3dPrimitiveRange *range; |
1148 | uint32_t i; |
1293 | uint32_t i; |
1149 | uint32_t maxnum; |
1294 | uint32_t maxnum; |
1150 | int ret; |
1295 | int ret; |
1151 | 1296 | ||
1152 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1297 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
1153 | if (unlikely(ret != 0)) |
1298 | if (unlikely(ret != 0)) |
1154 | return ret; |
1299 | return ret; |
1155 | 1300 | ||
1156 | cmd = container_of(header, struct vmw_draw_cmd, header); |
1301 | cmd = container_of(header, struct vmw_draw_cmd, header); |
1157 | maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); |
1302 | maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); |
1158 | 1303 | ||
1159 | if (unlikely(cmd->body.numVertexDecls > maxnum)) { |
1304 | if (unlikely(cmd->body.numVertexDecls > maxnum)) { |
1160 | DRM_ERROR("Illegal number of vertex declarations.\n"); |
1305 | DRM_ERROR("Illegal number of vertex declarations.\n"); |
1161 | return -EINVAL; |
1306 | return -EINVAL; |
1162 | } |
1307 | } |
1163 | 1308 | ||
1164 | for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { |
1309 | for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { |
1165 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1310 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1166 | user_surface_converter, |
1311 | user_surface_converter, |
1167 | &decl->array.surfaceId, NULL); |
1312 | &decl->array.surfaceId, NULL); |
1168 | if (unlikely(ret != 0)) |
1313 | if (unlikely(ret != 0)) |
1169 | return ret; |
1314 | return ret; |
1170 | } |
1315 | } |
1171 | 1316 | ||
1172 | maxnum = (header->size - sizeof(cmd->body) - |
1317 | maxnum = (header->size - sizeof(cmd->body) - |
1173 | cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); |
1318 | cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); |
1174 | if (unlikely(cmd->body.numRanges > maxnum)) { |
1319 | if (unlikely(cmd->body.numRanges > maxnum)) { |
1175 | DRM_ERROR("Illegal number of index ranges.\n"); |
1320 | DRM_ERROR("Illegal number of index ranges.\n"); |
1176 | return -EINVAL; |
1321 | return -EINVAL; |
1177 | } |
1322 | } |
1178 | 1323 | ||
1179 | range = (SVGA3dPrimitiveRange *) decl; |
1324 | range = (SVGA3dPrimitiveRange *) decl; |
1180 | for (i = 0; i < cmd->body.numRanges; ++i, ++range) { |
1325 | for (i = 0; i < cmd->body.numRanges; ++i, ++range) { |
1181 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1326 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1182 | user_surface_converter, |
1327 | user_surface_converter, |
1183 | &range->indexArray.surfaceId, NULL); |
1328 | &range->indexArray.surfaceId, NULL); |
1184 | if (unlikely(ret != 0)) |
1329 | if (unlikely(ret != 0)) |
1185 | return ret; |
1330 | return ret; |
1186 | } |
1331 | } |
1187 | return 0; |
1332 | return 0; |
1188 | } |
1333 | } |
1189 | 1334 | ||
1190 | 1335 | ||
1191 | static int vmw_cmd_tex_state(struct vmw_private *dev_priv, |
1336 | static int vmw_cmd_tex_state(struct vmw_private *dev_priv, |
1192 | struct vmw_sw_context *sw_context, |
1337 | struct vmw_sw_context *sw_context, |
1193 | SVGA3dCmdHeader *header) |
1338 | SVGA3dCmdHeader *header) |
1194 | { |
1339 | { |
1195 | struct vmw_tex_state_cmd { |
1340 | struct vmw_tex_state_cmd { |
1196 | SVGA3dCmdHeader header; |
1341 | SVGA3dCmdHeader header; |
1197 | SVGA3dCmdSetTextureState state; |
1342 | SVGA3dCmdSetTextureState state; |
1198 | } *cmd; |
1343 | } *cmd; |
1199 | 1344 | ||
1200 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) |
1345 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) |
1201 | ((unsigned long) header + header->size + sizeof(header)); |
1346 | ((unsigned long) header + header->size + sizeof(header)); |
1202 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) |
1347 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) |
1203 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); |
1348 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); |
1204 | struct vmw_resource_val_node *ctx_node; |
1349 | struct vmw_resource_val_node *ctx_node; |
1205 | struct vmw_resource_val_node *res_node; |
1350 | struct vmw_resource_val_node *res_node; |
1206 | int ret; |
1351 | int ret; |
1207 | 1352 | ||
1208 | cmd = container_of(header, struct vmw_tex_state_cmd, |
1353 | cmd = container_of(header, struct vmw_tex_state_cmd, |
1209 | header); |
1354 | header); |
1210 | 1355 | ||
1211 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1356 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1212 | user_context_converter, &cmd->state.cid, |
1357 | user_context_converter, &cmd->state.cid, |
1213 | &ctx_node); |
1358 | &ctx_node); |
1214 | if (unlikely(ret != 0)) |
1359 | if (unlikely(ret != 0)) |
1215 | return ret; |
1360 | return ret; |
1216 | 1361 | ||
1217 | for (; cur_state < last_state; ++cur_state) { |
1362 | for (; cur_state < last_state; ++cur_state) { |
1218 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) |
1363 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) |
1219 | continue; |
1364 | continue; |
1220 | 1365 | ||
1221 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1366 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1222 | user_surface_converter, |
1367 | user_surface_converter, |
1223 | &cur_state->value, &res_node); |
1368 | &cur_state->value, &res_node); |
1224 | if (unlikely(ret != 0)) |
1369 | if (unlikely(ret != 0)) |
1225 | return ret; |
1370 | return ret; |
1226 | 1371 | ||
1227 | if (dev_priv->has_mob) { |
1372 | if (dev_priv->has_mob) { |
1228 | struct vmw_ctx_bindinfo bi; |
1373 | struct vmw_ctx_bindinfo bi; |
1229 | 1374 | ||
1230 | bi.ctx = ctx_node->res; |
1375 | bi.ctx = ctx_node->res; |
1231 | bi.res = res_node ? res_node->res : NULL; |
1376 | bi.res = res_node ? res_node->res : NULL; |
1232 | bi.bt = vmw_ctx_binding_tex; |
1377 | bi.bt = vmw_ctx_binding_tex; |
1233 | bi.i1.texture_stage = cur_state->stage; |
1378 | bi.i1.texture_stage = cur_state->stage; |
1234 | vmw_context_binding_add(ctx_node->staged_bindings, |
1379 | vmw_context_binding_add(ctx_node->staged_bindings, |
1235 | &bi); |
1380 | &bi); |
1236 | } |
1381 | } |
1237 | } |
1382 | } |
1238 | 1383 | ||
1239 | return 0; |
1384 | return 0; |
1240 | } |
1385 | } |
1241 | 1386 | ||
1242 | static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, |
1387 | static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, |
1243 | struct vmw_sw_context *sw_context, |
1388 | struct vmw_sw_context *sw_context, |
1244 | void *buf) |
1389 | void *buf) |
1245 | { |
1390 | { |
1246 | struct vmw_dma_buffer *vmw_bo; |
1391 | struct vmw_dma_buffer *vmw_bo; |
1247 | int ret; |
1392 | int ret; |
1248 | 1393 | ||
1249 | struct { |
1394 | struct { |
1250 | uint32_t header; |
1395 | uint32_t header; |
1251 | SVGAFifoCmdDefineGMRFB body; |
1396 | SVGAFifoCmdDefineGMRFB body; |
1252 | } *cmd = buf; |
1397 | } *cmd = buf; |
1253 | 1398 | ||
1254 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1399 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1255 | &cmd->body.ptr, |
1400 | &cmd->body.ptr, |
1256 | &vmw_bo); |
1401 | &vmw_bo); |
1257 | if (unlikely(ret != 0)) |
1402 | if (unlikely(ret != 0)) |
1258 | return ret; |
1403 | return ret; |
1259 | 1404 | ||
1260 | vmw_dmabuf_unreference(&vmw_bo); |
1405 | vmw_dmabuf_unreference(&vmw_bo); |
1261 | 1406 | ||
1262 | return ret; |
1407 | return ret; |
1263 | } |
1408 | } |
1264 | 1409 | ||
1265 | /** |
1410 | /** |
1266 | * vmw_cmd_switch_backup - Utility function to handle backup buffer switching |
1411 | * vmw_cmd_switch_backup - Utility function to handle backup buffer switching |
1267 | * |
1412 | * |
1268 | * @dev_priv: Pointer to a device private struct. |
1413 | * @dev_priv: Pointer to a device private struct. |
1269 | * @sw_context: The software context being used for this batch. |
1414 | * @sw_context: The software context being used for this batch. |
1270 | * @res_type: The resource type. |
1415 | * @res_type: The resource type. |
1271 | * @converter: Information about user-space binding for this resource type. |
1416 | * @converter: Information about user-space binding for this resource type. |
1272 | * @res_id: Pointer to the user-space resource handle in the command stream. |
1417 | * @res_id: Pointer to the user-space resource handle in the command stream. |
1273 | * @buf_id: Pointer to the user-space backup buffer handle in the command |
1418 | * @buf_id: Pointer to the user-space backup buffer handle in the command |
1274 | * stream. |
1419 | * stream. |
1275 | * @backup_offset: Offset of backup into MOB. |
1420 | * @backup_offset: Offset of backup into MOB. |
1276 | * |
1421 | * |
1277 | * This function prepares for registering a switch of backup buffers |
1422 | * This function prepares for registering a switch of backup buffers |
1278 | * in the resource metadata just prior to unreserving. |
1423 | * in the resource metadata just prior to unreserving. |
1279 | */ |
1424 | */ |
1280 | static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, |
1425 | static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, |
1281 | struct vmw_sw_context *sw_context, |
1426 | struct vmw_sw_context *sw_context, |
1282 | enum vmw_res_type res_type, |
1427 | enum vmw_res_type res_type, |
1283 | const struct vmw_user_resource_conv |
1428 | const struct vmw_user_resource_conv |
1284 | *converter, |
1429 | *converter, |
1285 | uint32_t *res_id, |
1430 | uint32_t *res_id, |
1286 | uint32_t *buf_id, |
1431 | uint32_t *buf_id, |
1287 | unsigned long backup_offset) |
1432 | unsigned long backup_offset) |
1288 | { |
1433 | { |
1289 | int ret; |
1434 | int ret; |
1290 | struct vmw_dma_buffer *dma_buf; |
1435 | struct vmw_dma_buffer *dma_buf; |
1291 | struct vmw_resource_val_node *val_node; |
1436 | struct vmw_resource_val_node *val_node; |
1292 | 1437 | ||
1293 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, |
1438 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, |
1294 | converter, res_id, &val_node); |
1439 | converter, res_id, &val_node); |
1295 | if (unlikely(ret != 0)) |
1440 | if (unlikely(ret != 0)) |
1296 | return ret; |
1441 | return ret; |
1297 | 1442 | ||
1298 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); |
1443 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); |
1299 | if (unlikely(ret != 0)) |
1444 | if (unlikely(ret != 0)) |
1300 | return ret; |
1445 | return ret; |
1301 | 1446 | ||
1302 | if (val_node->first_usage) |
1447 | if (val_node->first_usage) |
1303 | val_node->no_buffer_needed = true; |
1448 | val_node->no_buffer_needed = true; |
1304 | 1449 | ||
1305 | vmw_dmabuf_unreference(&val_node->new_backup); |
1450 | vmw_dmabuf_unreference(&val_node->new_backup); |
1306 | val_node->new_backup = dma_buf; |
1451 | val_node->new_backup = dma_buf; |
1307 | val_node->new_backup_offset = backup_offset; |
1452 | val_node->new_backup_offset = backup_offset; |
1308 | 1453 | ||
1309 | return 0; |
1454 | return 0; |
1310 | } |
1455 | } |
1311 | 1456 | ||
1312 | /** |
1457 | /** |
1313 | * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE |
1458 | * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE |
1314 | * command |
1459 | * command |
1315 | * |
1460 | * |
1316 | * @dev_priv: Pointer to a device private struct. |
1461 | * @dev_priv: Pointer to a device private struct. |
1317 | * @sw_context: The software context being used for this batch. |
1462 | * @sw_context: The software context being used for this batch. |
1318 | * @header: Pointer to the command header in the command stream. |
1463 | * @header: Pointer to the command header in the command stream. |
1319 | */ |
1464 | */ |
1320 | static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, |
1465 | static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, |
1321 | struct vmw_sw_context *sw_context, |
1466 | struct vmw_sw_context *sw_context, |
1322 | SVGA3dCmdHeader *header) |
1467 | SVGA3dCmdHeader *header) |
1323 | { |
1468 | { |
1324 | struct vmw_bind_gb_surface_cmd { |
1469 | struct vmw_bind_gb_surface_cmd { |
1325 | SVGA3dCmdHeader header; |
1470 | SVGA3dCmdHeader header; |
1326 | SVGA3dCmdBindGBSurface body; |
1471 | SVGA3dCmdBindGBSurface body; |
1327 | } *cmd; |
1472 | } *cmd; |
1328 | 1473 | ||
1329 | cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header); |
1474 | cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header); |
1330 | 1475 | ||
1331 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, |
1476 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, |
1332 | user_surface_converter, |
1477 | user_surface_converter, |
1333 | &cmd->body.sid, &cmd->body.mobid, |
1478 | &cmd->body.sid, &cmd->body.mobid, |
1334 | 0); |
1479 | 0); |
1335 | } |
1480 | } |
1336 | 1481 | ||
1337 | /** |
1482 | /** |
1338 | * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE |
1483 | * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE |
1339 | * command |
1484 | * command |
1340 | * |
1485 | * |
1341 | * @dev_priv: Pointer to a device private struct. |
1486 | * @dev_priv: Pointer to a device private struct. |
1342 | * @sw_context: The software context being used for this batch. |
1487 | * @sw_context: The software context being used for this batch. |
1343 | * @header: Pointer to the command header in the command stream. |
1488 | * @header: Pointer to the command header in the command stream. |
1344 | */ |
1489 | */ |
1345 | static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, |
1490 | static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, |
1346 | struct vmw_sw_context *sw_context, |
1491 | struct vmw_sw_context *sw_context, |
1347 | SVGA3dCmdHeader *header) |
1492 | SVGA3dCmdHeader *header) |
1348 | { |
1493 | { |
1349 | struct vmw_gb_surface_cmd { |
1494 | struct vmw_gb_surface_cmd { |
1350 | SVGA3dCmdHeader header; |
1495 | SVGA3dCmdHeader header; |
1351 | SVGA3dCmdUpdateGBImage body; |
1496 | SVGA3dCmdUpdateGBImage body; |
1352 | } *cmd; |
1497 | } *cmd; |
1353 | 1498 | ||
1354 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1499 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1355 | 1500 | ||
1356 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1501 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1357 | user_surface_converter, |
1502 | user_surface_converter, |
1358 | &cmd->body.image.sid, NULL); |
1503 | &cmd->body.image.sid, NULL); |
1359 | } |
1504 | } |
1360 | 1505 | ||
1361 | /** |
1506 | /** |
1362 | * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE |
1507 | * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE |
1363 | * command |
1508 | * command |
1364 | * |
1509 | * |
1365 | * @dev_priv: Pointer to a device private struct. |
1510 | * @dev_priv: Pointer to a device private struct. |
1366 | * @sw_context: The software context being used for this batch. |
1511 | * @sw_context: The software context being used for this batch. |
1367 | * @header: Pointer to the command header in the command stream. |
1512 | * @header: Pointer to the command header in the command stream. |
1368 | */ |
1513 | */ |
1369 | static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, |
1514 | static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, |
1370 | struct vmw_sw_context *sw_context, |
1515 | struct vmw_sw_context *sw_context, |
1371 | SVGA3dCmdHeader *header) |
1516 | SVGA3dCmdHeader *header) |
1372 | { |
1517 | { |
1373 | struct vmw_gb_surface_cmd { |
1518 | struct vmw_gb_surface_cmd { |
1374 | SVGA3dCmdHeader header; |
1519 | SVGA3dCmdHeader header; |
1375 | SVGA3dCmdUpdateGBSurface body; |
1520 | SVGA3dCmdUpdateGBSurface body; |
1376 | } *cmd; |
1521 | } *cmd; |
1377 | 1522 | ||
1378 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1523 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1379 | 1524 | ||
1380 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1525 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1381 | user_surface_converter, |
1526 | user_surface_converter, |
1382 | &cmd->body.sid, NULL); |
1527 | &cmd->body.sid, NULL); |
1383 | } |
1528 | } |
1384 | 1529 | ||
1385 | /** |
1530 | /** |
1386 | * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE |
1531 | * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE |
1387 | * command |
1532 | * command |
1388 | * |
1533 | * |
1389 | * @dev_priv: Pointer to a device private struct. |
1534 | * @dev_priv: Pointer to a device private struct. |
1390 | * @sw_context: The software context being used for this batch. |
1535 | * @sw_context: The software context being used for this batch. |
1391 | * @header: Pointer to the command header in the command stream. |
1536 | * @header: Pointer to the command header in the command stream. |
1392 | */ |
1537 | */ |
1393 | static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, |
1538 | static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, |
1394 | struct vmw_sw_context *sw_context, |
1539 | struct vmw_sw_context *sw_context, |
1395 | SVGA3dCmdHeader *header) |
1540 | SVGA3dCmdHeader *header) |
1396 | { |
1541 | { |
1397 | struct vmw_gb_surface_cmd { |
1542 | struct vmw_gb_surface_cmd { |
1398 | SVGA3dCmdHeader header; |
1543 | SVGA3dCmdHeader header; |
1399 | SVGA3dCmdReadbackGBImage body; |
1544 | SVGA3dCmdReadbackGBImage body; |
1400 | } *cmd; |
1545 | } *cmd; |
1401 | 1546 | ||
1402 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1547 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1403 | 1548 | ||
1404 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1549 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1405 | user_surface_converter, |
1550 | user_surface_converter, |
1406 | &cmd->body.image.sid, NULL); |
1551 | &cmd->body.image.sid, NULL); |
1407 | } |
1552 | } |
1408 | 1553 | ||
1409 | /** |
1554 | /** |
1410 | * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE |
1555 | * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE |
1411 | * command |
1556 | * command |
1412 | * |
1557 | * |
1413 | * @dev_priv: Pointer to a device private struct. |
1558 | * @dev_priv: Pointer to a device private struct. |
1414 | * @sw_context: The software context being used for this batch. |
1559 | * @sw_context: The software context being used for this batch. |
1415 | * @header: Pointer to the command header in the command stream. |
1560 | * @header: Pointer to the command header in the command stream. |
1416 | */ |
1561 | */ |
1417 | static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, |
1562 | static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, |
1418 | struct vmw_sw_context *sw_context, |
1563 | struct vmw_sw_context *sw_context, |
1419 | SVGA3dCmdHeader *header) |
1564 | SVGA3dCmdHeader *header) |
1420 | { |
1565 | { |
1421 | struct vmw_gb_surface_cmd { |
1566 | struct vmw_gb_surface_cmd { |
1422 | SVGA3dCmdHeader header; |
1567 | SVGA3dCmdHeader header; |
1423 | SVGA3dCmdReadbackGBSurface body; |
1568 | SVGA3dCmdReadbackGBSurface body; |
1424 | } *cmd; |
1569 | } *cmd; |
1425 | 1570 | ||
1426 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1571 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1427 | 1572 | ||
1428 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1573 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1429 | user_surface_converter, |
1574 | user_surface_converter, |
1430 | &cmd->body.sid, NULL); |
1575 | &cmd->body.sid, NULL); |
1431 | } |
1576 | } |
1432 | 1577 | ||
1433 | /** |
1578 | /** |
1434 | * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE |
1579 | * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE |
1435 | * command |
1580 | * command |
1436 | * |
1581 | * |
1437 | * @dev_priv: Pointer to a device private struct. |
1582 | * @dev_priv: Pointer to a device private struct. |
1438 | * @sw_context: The software context being used for this batch. |
1583 | * @sw_context: The software context being used for this batch. |
1439 | * @header: Pointer to the command header in the command stream. |
1584 | * @header: Pointer to the command header in the command stream. |
1440 | */ |
1585 | */ |
1441 | static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, |
1586 | static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, |
1442 | struct vmw_sw_context *sw_context, |
1587 | struct vmw_sw_context *sw_context, |
1443 | SVGA3dCmdHeader *header) |
1588 | SVGA3dCmdHeader *header) |
1444 | { |
1589 | { |
1445 | struct vmw_gb_surface_cmd { |
1590 | struct vmw_gb_surface_cmd { |
1446 | SVGA3dCmdHeader header; |
1591 | SVGA3dCmdHeader header; |
1447 | SVGA3dCmdInvalidateGBImage body; |
1592 | SVGA3dCmdInvalidateGBImage body; |
1448 | } *cmd; |
1593 | } *cmd; |
1449 | 1594 | ||
1450 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1595 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1451 | 1596 | ||
1452 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1597 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1453 | user_surface_converter, |
1598 | user_surface_converter, |
1454 | &cmd->body.image.sid, NULL); |
1599 | &cmd->body.image.sid, NULL); |
1455 | } |
1600 | } |
1456 | 1601 | ||
1457 | /** |
1602 | /** |
1458 | * vmw_cmd_invalidate_gb_surface - Validate an |
1603 | * vmw_cmd_invalidate_gb_surface - Validate an |
1459 | * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command |
1604 | * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command |
1460 | * |
1605 | * |
1461 | * @dev_priv: Pointer to a device private struct. |
1606 | * @dev_priv: Pointer to a device private struct. |
1462 | * @sw_context: The software context being used for this batch. |
1607 | * @sw_context: The software context being used for this batch. |
1463 | * @header: Pointer to the command header in the command stream. |
1608 | * @header: Pointer to the command header in the command stream. |
1464 | */ |
1609 | */ |
1465 | static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, |
1610 | static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, |
1466 | struct vmw_sw_context *sw_context, |
1611 | struct vmw_sw_context *sw_context, |
1467 | SVGA3dCmdHeader *header) |
1612 | SVGA3dCmdHeader *header) |
1468 | { |
1613 | { |
1469 | struct vmw_gb_surface_cmd { |
1614 | struct vmw_gb_surface_cmd { |
1470 | SVGA3dCmdHeader header; |
1615 | SVGA3dCmdHeader header; |
1471 | SVGA3dCmdInvalidateGBSurface body; |
1616 | SVGA3dCmdInvalidateGBSurface body; |
1472 | } *cmd; |
1617 | } *cmd; |
1473 | 1618 | ||
1474 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1619 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); |
1475 | 1620 | ||
1476 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1621 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1477 | user_surface_converter, |
1622 | user_surface_converter, |
1478 | &cmd->body.sid, NULL); |
1623 | &cmd->body.sid, NULL); |
1479 | } |
1624 | } |
- | 1625 | ||
1480 | 1626 | #if 0 |
|
1481 | /** |
1627 | /** |
1482 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER |
1628 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER |
1483 | * command |
1629 | * command |
1484 | * |
1630 | * |
1485 | * @dev_priv: Pointer to a device private struct. |
1631 | * @dev_priv: Pointer to a device private struct. |
1486 | * @sw_context: The software context being used for this batch. |
1632 | * @sw_context: The software context being used for this batch. |
1487 | * @header: Pointer to the command header in the command stream. |
1633 | * @header: Pointer to the command header in the command stream. |
1488 | */ |
1634 | */ |
1489 | static int vmw_cmd_set_shader(struct vmw_private *dev_priv, |
1635 | static int vmw_cmd_set_shader(struct vmw_private *dev_priv, |
1490 | struct vmw_sw_context *sw_context, |
1636 | struct vmw_sw_context *sw_context, |
1491 | SVGA3dCmdHeader *header) |
1637 | SVGA3dCmdHeader *header) |
1492 | { |
1638 | { |
1493 | struct vmw_set_shader_cmd { |
1639 | struct vmw_set_shader_cmd { |
1494 | SVGA3dCmdHeader header; |
1640 | SVGA3dCmdHeader header; |
1495 | SVGA3dCmdSetShader body; |
1641 | SVGA3dCmdSetShader body; |
1496 | } *cmd; |
1642 | } *cmd; |
1497 | struct vmw_resource_val_node *ctx_node; |
1643 | struct vmw_resource_val_node *ctx_node, *res_node = NULL; |
- | 1644 | struct vmw_ctx_bindinfo bi; |
|
- | 1645 | struct vmw_resource *res = NULL; |
|
1498 | int ret; |
1646 | int ret; |
1499 | 1647 | ||
1500 | cmd = container_of(header, struct vmw_set_shader_cmd, |
1648 | cmd = container_of(header, struct vmw_set_shader_cmd, |
1501 | header); |
1649 | header); |
1502 | 1650 | ||
1503 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1651 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1504 | user_context_converter, &cmd->body.cid, |
1652 | user_context_converter, &cmd->body.cid, |
1505 | &ctx_node); |
1653 | &ctx_node); |
1506 | if (unlikely(ret != 0)) |
1654 | if (unlikely(ret != 0)) |
1507 | return ret; |
1655 | return ret; |
1508 | 1656 | ||
1509 | if (dev_priv->has_mob) { |
1657 | if (!dev_priv->has_mob) |
1510 | struct vmw_ctx_bindinfo bi; |
- | |
- | 1658 | return 0; |
|
- | 1659 | ||
- | 1660 | if (cmd->body.shid != SVGA3D_INVALID_ID) { |
|
- | 1661 | res = vmw_compat_shader_lookup |
|
- | 1662 | (vmw_context_res_man(ctx_node->res), |
|
- | 1663 | cmd->body.shid, |
|
- | 1664 | cmd->body.type); |
|
- | 1665 | ||
- | 1666 | if (!IS_ERR(res)) { |
|
- | 1667 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, |
|
- | 1668 | vmw_res_shader, |
|
- | 1669 | &cmd->body.shid, res, |
|
- | 1670 | &res_node); |
|
- | 1671 | vmw_resource_unreference(&res); |
|
- | 1672 | if (unlikely(ret != 0)) |
|
- | 1673 | return ret; |
|
- | 1674 | } |
|
- | 1675 | } |
|
1511 | struct vmw_resource_val_node *res_node; |
1676 | |
- | 1677 | if (!res_node) { |
|
1512 | 1678 | ret = vmw_cmd_res_check(dev_priv, sw_context, |
|
1513 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, |
1679 | vmw_res_shader, |
1514 | user_shader_converter, |
1680 | user_shader_converter, |
1515 | &cmd->body.shid, &res_node); |
1681 | &cmd->body.shid, &res_node); |
1516 | if (unlikely(ret != 0)) |
1682 | if (unlikely(ret != 0)) |
1517 | return ret; |
1683 | return ret; |
- | 1684 | } |
|
1518 | 1685 | ||
1519 | bi.ctx = ctx_node->res; |
1686 | bi.ctx = ctx_node->res; |
1520 | bi.res = res_node ? res_node->res : NULL; |
1687 | bi.res = res_node ? res_node->res : NULL; |
1521 | bi.bt = vmw_ctx_binding_shader; |
1688 | bi.bt = vmw_ctx_binding_shader; |
1522 | bi.i1.shader_type = cmd->body.type; |
1689 | bi.i1.shader_type = cmd->body.type; |
1523 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); |
1690 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); |
1524 | } |
1691 | } |
- | 1692 | #endif |
|
- | 1693 | ||
- | 1694 | /** |
|
- | 1695 | * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST |
|
- | 1696 | * command |
|
- | 1697 | * |
|
- | 1698 | * @dev_priv: Pointer to a device private struct. |
|
- | 1699 | * @sw_context: The software context being used for this batch. |
|
- | 1700 | * @header: Pointer to the command header in the command stream. |
|
- | 1701 | */ |
|
- | 1702 | static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, |
|
- | 1703 | struct vmw_sw_context *sw_context, |
|
- | 1704 | SVGA3dCmdHeader *header) |
|
- | 1705 | { |
|
- | 1706 | struct vmw_set_shader_const_cmd { |
|
- | 1707 | SVGA3dCmdHeader header; |
|
- | 1708 | SVGA3dCmdSetShaderConst body; |
|
- | 1709 | } *cmd; |
|
- | 1710 | int ret; |
|
- | 1711 | ||
- | 1712 | cmd = container_of(header, struct vmw_set_shader_const_cmd, |
|
- | 1713 | header); |
|
- | 1714 | ||
- | 1715 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
|
- | 1716 | user_context_converter, &cmd->body.cid, |
|
- | 1717 | NULL); |
|
- | 1718 | if (unlikely(ret != 0)) |
|
- | 1719 | return ret; |
|
- | 1720 | ||
- | 1721 | if (dev_priv->has_mob) |
|
- | 1722 | header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; |
|
1525 | 1723 | ||
1526 | return 0; |
1724 | return 0; |
1527 | } |
1725 | } |
- | 1726 | ||
1528 | 1727 | #if 0 |
|
1529 | /** |
1728 | /** |
1530 | * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER |
1729 | * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER |
1531 | * command |
1730 | * command |
1532 | * |
1731 | * |
1533 | * @dev_priv: Pointer to a device private struct. |
1732 | * @dev_priv: Pointer to a device private struct. |
1534 | * @sw_context: The software context being used for this batch. |
1733 | * @sw_context: The software context being used for this batch. |
1535 | * @header: Pointer to the command header in the command stream. |
1734 | * @header: Pointer to the command header in the command stream. |
1536 | */ |
1735 | */ |
1537 | static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, |
1736 | static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, |
1538 | struct vmw_sw_context *sw_context, |
1737 | struct vmw_sw_context *sw_context, |
1539 | SVGA3dCmdHeader *header) |
1738 | SVGA3dCmdHeader *header) |
1540 | { |
1739 | { |
1541 | struct vmw_bind_gb_shader_cmd { |
1740 | struct vmw_bind_gb_shader_cmd { |
1542 | SVGA3dCmdHeader header; |
1741 | SVGA3dCmdHeader header; |
1543 | SVGA3dCmdBindGBShader body; |
1742 | SVGA3dCmdBindGBShader body; |
1544 | } *cmd; |
1743 | } *cmd; |
1545 | 1744 | ||
1546 | cmd = container_of(header, struct vmw_bind_gb_shader_cmd, |
1745 | cmd = container_of(header, struct vmw_bind_gb_shader_cmd, |
1547 | header); |
1746 | header); |
1548 | 1747 | ||
1549 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, |
1748 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, |
1550 | user_shader_converter, |
1749 | user_shader_converter, |
1551 | &cmd->body.shid, &cmd->body.mobid, |
1750 | &cmd->body.shid, &cmd->body.mobid, |
1552 | cmd->body.offsetInBytes); |
1751 | cmd->body.offsetInBytes); |
1553 | } |
1752 | } |
- | 1753 | #endif |
|
1554 | 1754 | ||
1555 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
1755 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
1556 | struct vmw_sw_context *sw_context, |
1756 | struct vmw_sw_context *sw_context, |
1557 | void *buf, uint32_t *size) |
1757 | void *buf, uint32_t *size) |
1558 | { |
1758 | { |
1559 | uint32_t size_remaining = *size; |
1759 | uint32_t size_remaining = *size; |
1560 | uint32_t cmd_id; |
1760 | uint32_t cmd_id; |
1561 | 1761 | ||
1562 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
1762 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
1563 | switch (cmd_id) { |
1763 | switch (cmd_id) { |
1564 | case SVGA_CMD_UPDATE: |
1764 | case SVGA_CMD_UPDATE: |
1565 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); |
1765 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); |
1566 | break; |
1766 | break; |
1567 | case SVGA_CMD_DEFINE_GMRFB: |
1767 | case SVGA_CMD_DEFINE_GMRFB: |
1568 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); |
1768 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); |
1569 | break; |
1769 | break; |
1570 | case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: |
1770 | case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: |
1571 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
1771 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
1572 | break; |
1772 | break; |
1573 | case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: |
1773 | case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: |
1574 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
1774 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
1575 | break; |
1775 | break; |
1576 | default: |
1776 | default: |
1577 | DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id); |
1777 | DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id); |
1578 | return -EINVAL; |
1778 | return -EINVAL; |
1579 | } |
1779 | } |
1580 | 1780 | ||
1581 | if (*size > size_remaining) { |
1781 | if (*size > size_remaining) { |
1582 | DRM_ERROR("Invalid SVGA command (size mismatch):" |
1782 | DRM_ERROR("Invalid SVGA command (size mismatch):" |
1583 | " %u.\n", cmd_id); |
1783 | " %u.\n", cmd_id); |
1584 | return -EINVAL; |
1784 | return -EINVAL; |
1585 | } |
1785 | } |
1586 | 1786 | ||
1587 | if (unlikely(!sw_context->kernel)) { |
1787 | if (unlikely(!sw_context->kernel)) { |
1588 | DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id); |
1788 | DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id); |
1589 | return -EPERM; |
1789 | return -EPERM; |
1590 | } |
1790 | } |
1591 | 1791 | ||
1592 | if (cmd_id == SVGA_CMD_DEFINE_GMRFB) |
1792 | if (cmd_id == SVGA_CMD_DEFINE_GMRFB) |
1593 | return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); |
1793 | return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); |
1594 | 1794 | ||
1595 | return 0; |
1795 | return 0; |
1596 | } |
1796 | } |
1597 | 1797 | ||
1598 | static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { |
1798 | static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { |
1599 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, |
1799 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, |
1600 | false, false, false), |
1800 | false, false, false), |
1601 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, |
1801 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, |
1602 | false, false, false), |
1802 | false, false, false), |
1603 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, |
1803 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, |
1604 | true, false, false), |
1804 | true, false, false), |
1605 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, |
1805 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, |
1606 | true, false, false), |
1806 | true, false, false), |
1607 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, |
1807 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, |
1608 | true, false, false), |
1808 | true, false, false), |
1609 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, |
1809 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, |
1610 | false, false, false), |
1810 | false, false, false), |
1611 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, |
1811 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, |
1612 | false, false, false), |
1812 | false, false, false), |
1613 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, |
1813 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, |
1614 | true, false, false), |
1814 | true, false, false), |
1615 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, |
1815 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, |
1616 | true, false, false), |
1816 | true, false, false), |
1617 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, |
1817 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, |
1618 | true, false, false), |
1818 | true, false, false), |
1619 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, |
1819 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, |
1620 | &vmw_cmd_set_render_target_check, true, false, false), |
1820 | &vmw_cmd_set_render_target_check, true, false, false), |
1621 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, |
1821 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, |
1622 | true, false, false), |
1822 | true, false, false), |
1623 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, |
1823 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, |
1624 | true, false, false), |
1824 | true, false, false), |
1625 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, |
1825 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, |
1626 | true, false, false), |
1826 | true, false, false), |
1627 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, |
1827 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, |
1628 | true, false, false), |
1828 | true, false, false), |
1629 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, |
1829 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, |
1630 | true, false, false), |
1830 | true, false, false), |
1631 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, |
1831 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, |
1632 | true, false, false), |
1832 | true, false, false), |
1633 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, |
1833 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, |
1634 | true, false, false), |
1834 | true, false, false), |
1635 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, |
1835 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, |
1636 | false, false, false), |
1836 | false, false, false), |
1637 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check, |
1837 | // VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, |
1638 | true, true, false), |
1838 | // true, false, false), |
1639 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check, |
1839 | // VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, |
1640 | true, true, false), |
1840 | // true, false, false), |
1641 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, |
1841 | // VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, |
1642 | true, false, false), |
1842 | // true, false, false), |
1643 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check, |
1843 | // VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, |
1644 | true, true, false), |
1844 | // true, false, false), |
1645 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, |
1845 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, |
1646 | true, false, false), |
1846 | true, false, false), |
1647 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, |
1847 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, |
1648 | true, false, false), |
1848 | true, false, false), |
1649 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, |
1849 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, |
1650 | true, false, false), |
1850 | true, false, false), |
1651 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, |
1851 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, |
1652 | true, false, false), |
1852 | true, false, false), |
1653 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, |
1853 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, |
1654 | true, false, false), |
1854 | true, false, false), |
1655 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, |
1855 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, |
1656 | true, false, false), |
1856 | true, false, false), |
1657 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
1857 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
1658 | &vmw_cmd_blt_surf_screen_check, false, false, false), |
1858 | &vmw_cmd_blt_surf_screen_check, false, false, false), |
1659 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, |
1859 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, |
1660 | false, false, false), |
1860 | false, false, false), |
1661 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, |
1861 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, |
1662 | false, false, false), |
1862 | false, false, false), |
1663 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, |
1863 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, |
1664 | false, false, false), |
1864 | false, false, false), |
1665 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, |
1865 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, |
1666 | false, false, false), |
1866 | false, false, false), |
1667 | VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, |
1867 | VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, |
1668 | false, false, false), |
1868 | false, false, false), |
1669 | VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid, |
1869 | VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid, |
1670 | false, false, false), |
1870 | false, false, false), |
1671 | VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid, |
1871 | VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid, |
1672 | false, false, false), |
1872 | false, false, false), |
1673 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, |
1873 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, |
1674 | false, false, false), |
1874 | false, false, false), |
1675 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid, |
1875 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid, |
1676 | false, false, false), |
1876 | false, false, false), |
1677 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid, |
1877 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid, |
1678 | false, false, false), |
1878 | false, false, false), |
1679 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid, |
1879 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid, |
1680 | false, false, false), |
1880 | false, false, false), |
1681 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid, |
1881 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid, |
1682 | false, false, false), |
1882 | false, false, false), |
1683 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid, |
1883 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid, |
1684 | false, false, false), |
1884 | false, false, false), |
1685 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, |
1885 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, |
1686 | false, false, true), |
1886 | false, false, true), |
1687 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, |
1887 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, |
1688 | false, false, true), |
1888 | false, false, true), |
1689 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, |
1889 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, |
1690 | false, false, true), |
1890 | false, false, true), |
1691 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, |
1891 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, |
1692 | false, false, true), |
1892 | false, false, true), |
1693 | VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid, |
1893 | VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid, |
1694 | false, false, true), |
1894 | false, false, true), |
1695 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, |
1895 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, |
1696 | false, false, true), |
1896 | false, false, true), |
1697 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, |
1897 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, |
1698 | false, false, true), |
1898 | false, false, true), |
1699 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, |
1899 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, |
1700 | false, false, true), |
1900 | false, false, true), |
1701 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, |
1901 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, |
1702 | true, false, true), |
1902 | true, false, true), |
1703 | VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, |
1903 | VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, |
1704 | false, false, true), |
1904 | false, false, true), |
1705 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, |
1905 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, |
1706 | true, false, true), |
1906 | true, false, true), |
1707 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, |
1907 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, |
1708 | &vmw_cmd_update_gb_surface, true, false, true), |
1908 | &vmw_cmd_update_gb_surface, true, false, true), |
1709 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, |
1909 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, |
1710 | &vmw_cmd_readback_gb_image, true, false, true), |
1910 | &vmw_cmd_readback_gb_image, true, false, true), |
1711 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, |
1911 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, |
1712 | &vmw_cmd_readback_gb_surface, true, false, true), |
1912 | &vmw_cmd_readback_gb_surface, true, false, true), |
1713 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, |
1913 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, |
1714 | &vmw_cmd_invalidate_gb_image, true, false, true), |
1914 | &vmw_cmd_invalidate_gb_image, true, false, true), |
1715 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, |
1915 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, |
1716 | &vmw_cmd_invalidate_gb_surface, true, false, true), |
1916 | &vmw_cmd_invalidate_gb_surface, true, false, true), |
1717 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, |
1917 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, |
1718 | false, false, true), |
1918 | false, false, true), |
1719 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, |
1919 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, |
1720 | false, false, true), |
1920 | false, false, true), |
1721 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, |
1921 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, |
1722 | false, false, true), |
1922 | false, false, true), |
1723 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, |
1923 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, |
1724 | false, false, true), |
1924 | false, false, true), |
1725 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, |
1925 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, |
1726 | false, false, true), |
1926 | false, false, true), |
1727 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, |
1927 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, |
1728 | false, false, true), |
1928 | false, false, true), |
1729 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, |
1929 | // VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, |
1730 | true, false, true), |
1930 | // true, false, true), |
1731 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, |
1931 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, |
1732 | false, false, true), |
1932 | false, false, true), |
1733 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, |
1933 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, |
1734 | false, false, false), |
1934 | false, false, false), |
1735 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, |
1935 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, |
1736 | true, false, true), |
1936 | true, false, true), |
1737 | VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, |
1937 | VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, |
1738 | true, false, true), |
1938 | true, false, true), |
1739 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, |
1939 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, |
1740 | true, false, true), |
1940 | true, false, true), |
1741 | VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, |
1941 | VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, |
1742 | true, false, true), |
1942 | true, false, true), |
1743 | VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, |
1943 | VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, |
1744 | false, false, true), |
1944 | false, false, true), |
1745 | VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, |
1945 | VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, |
1746 | false, false, true), |
1946 | false, false, true), |
1747 | VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, |
1947 | VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, |
1748 | false, false, true), |
1948 | false, false, true), |
1749 | VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, |
1949 | VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, |
1750 | false, false, true), |
1950 | false, false, true), |
1751 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, |
1951 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, |
1752 | false, false, true), |
1952 | false, false, true), |
1753 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, |
1953 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, |
1754 | false, false, true), |
1954 | false, false, true), |
1755 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, |
1955 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, |
1756 | false, false, true), |
1956 | false, false, true), |
1757 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, |
1957 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, |
1758 | false, false, true), |
1958 | false, false, true), |
1759 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
1959 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
1760 | false, false, true), |
1960 | false, false, true), |
1761 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
1961 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
1762 | false, false, true), |
1962 | false, false, true), |
1763 | VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, |
1963 | VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, |
1764 | true, false, true) |
1964 | true, false, true) |
1765 | }; |
1965 | }; |
1766 | 1966 | ||
1767 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
1967 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
1768 | struct vmw_sw_context *sw_context, |
1968 | struct vmw_sw_context *sw_context, |
1769 | void *buf, uint32_t *size) |
1969 | void *buf, uint32_t *size) |
1770 | { |
1970 | { |
1771 | uint32_t cmd_id; |
1971 | uint32_t cmd_id; |
1772 | uint32_t size_remaining = *size; |
1972 | uint32_t size_remaining = *size; |
1773 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; |
1973 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; |
1774 | int ret; |
1974 | int ret; |
1775 | const struct vmw_cmd_entry *entry; |
1975 | const struct vmw_cmd_entry *entry; |
1776 | bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; |
1976 | bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; |
1777 | 1977 | ||
1778 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
1978 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
1779 | /* Handle any none 3D commands */ |
1979 | /* Handle any none 3D commands */ |
1780 | if (unlikely(cmd_id < SVGA_CMD_MAX)) |
1980 | if (unlikely(cmd_id < SVGA_CMD_MAX)) |
1781 | return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); |
1981 | return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); |
1782 | 1982 | ||
1783 | 1983 | ||
1784 | cmd_id = le32_to_cpu(header->id); |
1984 | cmd_id = le32_to_cpu(header->id); |
1785 | *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); |
1985 | *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); |
1786 | 1986 | ||
1787 | cmd_id -= SVGA_3D_CMD_BASE; |
1987 | cmd_id -= SVGA_3D_CMD_BASE; |
1788 | if (unlikely(*size > size_remaining)) |
1988 | if (unlikely(*size > size_remaining)) |
1789 | goto out_invalid; |
1989 | goto out_invalid; |
1790 | 1990 | ||
1791 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) |
1991 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) |
1792 | goto out_invalid; |
1992 | goto out_invalid; |
1793 | 1993 | ||
1794 | entry = &vmw_cmd_entries[cmd_id]; |
1994 | entry = &vmw_cmd_entries[cmd_id]; |
- | 1995 | if (unlikely(!entry->func)) |
|
- | 1996 | goto out_invalid; |
|
- | 1997 | ||
1795 | if (unlikely(!entry->user_allow && !sw_context->kernel)) |
1998 | if (unlikely(!entry->user_allow && !sw_context->kernel)) |
1796 | goto out_privileged; |
1999 | goto out_privileged; |
1797 | 2000 | ||
1798 | if (unlikely(entry->gb_disable && gb)) |
2001 | if (unlikely(entry->gb_disable && gb)) |
1799 | goto out_old; |
2002 | goto out_old; |
1800 | 2003 | ||
1801 | if (unlikely(entry->gb_enable && !gb)) |
2004 | if (unlikely(entry->gb_enable && !gb)) |
1802 | goto out_new; |
2005 | goto out_new; |
1803 | 2006 | ||
1804 | ret = entry->func(dev_priv, sw_context, header); |
2007 | ret = entry->func(dev_priv, sw_context, header); |
1805 | if (unlikely(ret != 0)) |
2008 | if (unlikely(ret != 0)) |
1806 | goto out_invalid; |
2009 | goto out_invalid; |
1807 | 2010 | ||
1808 | return 0; |
2011 | return 0; |
1809 | out_invalid: |
2012 | out_invalid: |
1810 | DRM_ERROR("Invalid SVGA3D command: %d\n", |
2013 | DRM_ERROR("Invalid SVGA3D command: %d\n", |
1811 | cmd_id + SVGA_3D_CMD_BASE); |
2014 | cmd_id + SVGA_3D_CMD_BASE); |
1812 | return -EINVAL; |
2015 | return -EINVAL; |
1813 | out_privileged: |
2016 | out_privileged: |
1814 | DRM_ERROR("Privileged SVGA3D command: %d\n", |
2017 | DRM_ERROR("Privileged SVGA3D command: %d\n", |
1815 | cmd_id + SVGA_3D_CMD_BASE); |
2018 | cmd_id + SVGA_3D_CMD_BASE); |
1816 | return -EPERM; |
2019 | return -EPERM; |
1817 | out_old: |
2020 | out_old: |
1818 | DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n", |
2021 | DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n", |
1819 | cmd_id + SVGA_3D_CMD_BASE); |
2022 | cmd_id + SVGA_3D_CMD_BASE); |
1820 | return -EINVAL; |
2023 | return -EINVAL; |
1821 | out_new: |
2024 | out_new: |
1822 | DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n", |
2025 | DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n", |
1823 | cmd_id + SVGA_3D_CMD_BASE); |
2026 | cmd_id + SVGA_3D_CMD_BASE); |
1824 | return -EINVAL; |
2027 | return -EINVAL; |
1825 | } |
2028 | } |
1826 | 2029 | ||
1827 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, |
2030 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, |
1828 | struct vmw_sw_context *sw_context, |
2031 | struct vmw_sw_context *sw_context, |
1829 | void *buf, |
2032 | void *buf, |
1830 | uint32_t size) |
2033 | uint32_t size) |
1831 | { |
2034 | { |
1832 | int32_t cur_size = size; |
2035 | int32_t cur_size = size; |
1833 | int ret; |
2036 | int ret; |
1834 | 2037 | ||
1835 | sw_context->buf_start = buf; |
2038 | sw_context->buf_start = buf; |
1836 | 2039 | ||
1837 | while (cur_size > 0) { |
2040 | while (cur_size > 0) { |
1838 | size = cur_size; |
2041 | size = cur_size; |
1839 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); |
2042 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); |
1840 | if (unlikely(ret != 0)) |
2043 | if (unlikely(ret != 0)) |
1841 | return ret; |
2044 | return ret; |
1842 | buf = (void *)((unsigned long) buf + size); |
2045 | buf = (void *)((unsigned long) buf + size); |
1843 | cur_size -= size; |
2046 | cur_size -= size; |
1844 | } |
2047 | } |
1845 | 2048 | ||
1846 | if (unlikely(cur_size != 0)) { |
2049 | if (unlikely(cur_size != 0)) { |
1847 | DRM_ERROR("Command verifier out of sync.\n"); |
2050 | DRM_ERROR("Command verifier out of sync.\n"); |
1848 | return -EINVAL; |
2051 | return -EINVAL; |
1849 | } |
2052 | } |
1850 | 2053 | ||
1851 | return 0; |
2054 | return 0; |
1852 | } |
2055 | } |
1853 | 2056 | ||
1854 | static void vmw_free_relocations(struct vmw_sw_context *sw_context) |
2057 | static void vmw_free_relocations(struct vmw_sw_context *sw_context) |
1855 | { |
2058 | { |
1856 | sw_context->cur_reloc = 0; |
2059 | sw_context->cur_reloc = 0; |
1857 | } |
2060 | } |
1858 | 2061 | ||
1859 | static void vmw_apply_relocations(struct vmw_sw_context *sw_context) |
2062 | static void vmw_apply_relocations(struct vmw_sw_context *sw_context) |
1860 | { |
2063 | { |
1861 | uint32_t i; |
2064 | uint32_t i; |
1862 | struct vmw_relocation *reloc; |
2065 | struct vmw_relocation *reloc; |
1863 | struct ttm_validate_buffer *validate; |
2066 | struct ttm_validate_buffer *validate; |
1864 | struct ttm_buffer_object *bo; |
2067 | struct ttm_buffer_object *bo; |
1865 | 2068 | ||
1866 | for (i = 0; i < sw_context->cur_reloc; ++i) { |
2069 | for (i = 0; i < sw_context->cur_reloc; ++i) { |
1867 | reloc = &sw_context->relocs[i]; |
2070 | reloc = &sw_context->relocs[i]; |
1868 | validate = &sw_context->val_bufs[reloc->index].base; |
2071 | validate = &sw_context->val_bufs[reloc->index].base; |
1869 | bo = validate->bo; |
2072 | bo = validate->bo; |
1870 | switch (bo->mem.mem_type) { |
2073 | switch (bo->mem.mem_type) { |
1871 | case TTM_PL_VRAM: |
2074 | case TTM_PL_VRAM: |
1872 | reloc->location->offset += bo->offset; |
2075 | reloc->location->offset += bo->offset; |
1873 | reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; |
2076 | reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; |
1874 | break; |
2077 | break; |
1875 | case VMW_PL_GMR: |
2078 | case VMW_PL_GMR: |
1876 | reloc->location->gmrId = bo->mem.start; |
2079 | reloc->location->gmrId = bo->mem.start; |
1877 | break; |
2080 | break; |
1878 | case VMW_PL_MOB: |
2081 | case VMW_PL_MOB: |
1879 | *reloc->mob_loc = bo->mem.start; |
2082 | *reloc->mob_loc = bo->mem.start; |
1880 | break; |
2083 | break; |
1881 | default: |
2084 | default: |
1882 | BUG(); |
2085 | BUG(); |
1883 | } |
2086 | } |
1884 | } |
2087 | } |
1885 | vmw_free_relocations(sw_context); |
2088 | vmw_free_relocations(sw_context); |
1886 | } |
2089 | } |
1887 | 2090 | ||
1888 | /** |
2091 | /** |
1889 | * vmw_resource_list_unrefererence - Free up a resource list and unreference |
2092 | * vmw_resource_list_unrefererence - Free up a resource list and unreference |
1890 | * all resources referenced by it. |
2093 | * all resources referenced by it. |
1891 | * |
2094 | * |
1892 | * @list: The resource list. |
2095 | * @list: The resource list. |
1893 | */ |
2096 | */ |
1894 | static void vmw_resource_list_unreference(struct list_head *list) |
2097 | static void vmw_resource_list_unreference(struct list_head *list) |
1895 | { |
2098 | { |
1896 | struct vmw_resource_val_node *val, *val_next; |
2099 | struct vmw_resource_val_node *val, *val_next; |
1897 | 2100 | ||
1898 | /* |
2101 | /* |
1899 | * Drop references to resources held during command submission. |
2102 | * Drop references to resources held during command submission. |
1900 | */ |
2103 | */ |
1901 | 2104 | ||
1902 | list_for_each_entry_safe(val, val_next, list, head) { |
2105 | list_for_each_entry_safe(val, val_next, list, head) { |
1903 | list_del_init(&val->head); |
2106 | list_del_init(&val->head); |
1904 | vmw_resource_unreference(&val->res); |
2107 | vmw_resource_unreference(&val->res); |
1905 | if (unlikely(val->staged_bindings)) |
2108 | if (unlikely(val->staged_bindings)) |
1906 | kfree(val->staged_bindings); |
2109 | kfree(val->staged_bindings); |
1907 | kfree(val); |
2110 | kfree(val); |
1908 | } |
2111 | } |
1909 | } |
2112 | } |
1910 | 2113 | ||
1911 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) |
2114 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) |
1912 | { |
2115 | { |
1913 | struct vmw_validate_buffer *entry, *next; |
2116 | struct vmw_validate_buffer *entry, *next; |
1914 | struct vmw_resource_val_node *val; |
2117 | struct vmw_resource_val_node *val; |
1915 | 2118 | ||
1916 | /* |
2119 | /* |
1917 | * Drop references to DMA buffers held during command submission. |
2120 | * Drop references to DMA buffers held during command submission. |
1918 | */ |
2121 | */ |
1919 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, |
2122 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, |
1920 | base.head) { |
2123 | base.head) { |
1921 | list_del(&entry->base.head); |
2124 | list_del(&entry->base.head); |
1922 | ttm_bo_unref(&entry->base.bo); |
2125 | ttm_bo_unref(&entry->base.bo); |
1923 | (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash); |
2126 | (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash); |
1924 | sw_context->cur_val_buf--; |
2127 | sw_context->cur_val_buf--; |
1925 | } |
2128 | } |
1926 | BUG_ON(sw_context->cur_val_buf != 0); |
2129 | BUG_ON(sw_context->cur_val_buf != 0); |
1927 | 2130 | ||
1928 | list_for_each_entry(val, &sw_context->resource_list, head) |
2131 | list_for_each_entry(val, &sw_context->resource_list, head) |
1929 | (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); |
2132 | (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); |
1930 | } |
2133 | } |
1931 | 2134 | ||
1932 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
2135 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
1933 | struct ttm_buffer_object *bo, |
2136 | struct ttm_buffer_object *bo, |
1934 | bool validate_as_mob) |
2137 | bool validate_as_mob) |
1935 | { |
2138 | { |
1936 | int ret; |
2139 | int ret; |
1937 | 2140 | ||
1938 | 2141 | ||
1939 | /* |
2142 | /* |
1940 | * Don't validate pinned buffers. |
2143 | * Don't validate pinned buffers. |
1941 | */ |
2144 | */ |
1942 | 2145 | ||
1943 | if (bo == dev_priv->pinned_bo || |
2146 | if (bo == dev_priv->pinned_bo || |
1944 | (bo == dev_priv->dummy_query_bo && |
2147 | (bo == dev_priv->dummy_query_bo && |
1945 | dev_priv->dummy_query_bo_pinned)) |
2148 | dev_priv->dummy_query_bo_pinned)) |
1946 | return 0; |
2149 | return 0; |
1947 | 2150 | ||
1948 | if (validate_as_mob) |
2151 | if (validate_as_mob) |
1949 | return ttm_bo_validate(bo, &vmw_mob_placement, true, false); |
2152 | return ttm_bo_validate(bo, &vmw_mob_placement, true, false); |
1950 | 2153 | ||
1951 | /** |
2154 | /** |
1952 | * Put BO in VRAM if there is space, otherwise as a GMR. |
2155 | * Put BO in VRAM if there is space, otherwise as a GMR. |
1953 | * If there is no space in VRAM and GMR ids are all used up, |
2156 | * If there is no space in VRAM and GMR ids are all used up, |
1954 | * start evicting GMRs to make room. If the DMA buffer can't be |
2157 | * start evicting GMRs to make room. If the DMA buffer can't be |
1955 | * used as a GMR, this will return -ENOMEM. |
2158 | * used as a GMR, this will return -ENOMEM. |
1956 | */ |
2159 | */ |
1957 | 2160 | ||
1958 | ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false); |
2161 | ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false); |
1959 | if (likely(ret == 0 || ret == -ERESTARTSYS)) |
2162 | if (likely(ret == 0 || ret == -ERESTARTSYS)) |
1960 | return ret; |
2163 | return ret; |
1961 | 2164 | ||
1962 | /** |
2165 | /** |
1963 | * If that failed, try VRAM again, this time evicting |
2166 | * If that failed, try VRAM again, this time evicting |
1964 | * previous contents. |
2167 | * previous contents. |
1965 | */ |
2168 | */ |
1966 | 2169 | ||
1967 | DRM_INFO("Falling through to VRAM.\n"); |
2170 | DRM_INFO("Falling through to VRAM.\n"); |
1968 | ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); |
2171 | ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); |
1969 | return ret; |
2172 | return ret; |
1970 | } |
2173 | } |
1971 | 2174 | ||
1972 | static int vmw_validate_buffers(struct vmw_private *dev_priv, |
2175 | static int vmw_validate_buffers(struct vmw_private *dev_priv, |
1973 | struct vmw_sw_context *sw_context) |
2176 | struct vmw_sw_context *sw_context) |
1974 | { |
2177 | { |
1975 | struct vmw_validate_buffer *entry; |
2178 | struct vmw_validate_buffer *entry; |
1976 | int ret; |
2179 | int ret; |
1977 | 2180 | ||
1978 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { |
2181 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { |
1979 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, |
2182 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, |
1980 | entry->validate_as_mob); |
2183 | entry->validate_as_mob); |
1981 | if (unlikely(ret != 0)) |
2184 | if (unlikely(ret != 0)) |
1982 | return ret; |
2185 | return ret; |
1983 | } |
2186 | } |
1984 | return 0; |
2187 | return 0; |
1985 | } |
2188 | } |
1986 | 2189 | ||
1987 | static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, |
2190 | static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, |
1988 | uint32_t size) |
2191 | uint32_t size) |
1989 | { |
2192 | { |
1990 | if (likely(sw_context->cmd_bounce_size >= size)) |
2193 | if (likely(sw_context->cmd_bounce_size >= size)) |
1991 | return 0; |
2194 | return 0; |
1992 | 2195 | ||
1993 | if (sw_context->cmd_bounce_size == 0) |
2196 | if (sw_context->cmd_bounce_size == 0) |
1994 | sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; |
2197 | sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; |
1995 | 2198 | ||
1996 | while (sw_context->cmd_bounce_size < size) { |
2199 | while (sw_context->cmd_bounce_size < size) { |
1997 | sw_context->cmd_bounce_size = |
2200 | sw_context->cmd_bounce_size = |
1998 | PAGE_ALIGN(sw_context->cmd_bounce_size + |
2201 | PAGE_ALIGN(sw_context->cmd_bounce_size + |
1999 | (sw_context->cmd_bounce_size >> 1)); |
2202 | (sw_context->cmd_bounce_size >> 1)); |
2000 | } |
2203 | } |
2001 | 2204 | ||
2002 | if (sw_context->cmd_bounce != NULL) |
2205 | if (sw_context->cmd_bounce != NULL) |
2003 | vfree(sw_context->cmd_bounce); |
2206 | vfree(sw_context->cmd_bounce); |
2004 | 2207 | ||
2005 | sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); |
2208 | sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); |
2006 | 2209 | ||
2007 | if (sw_context->cmd_bounce == NULL) { |
2210 | if (sw_context->cmd_bounce == NULL) { |
2008 | DRM_ERROR("Failed to allocate command bounce buffer.\n"); |
2211 | DRM_ERROR("Failed to allocate command bounce buffer.\n"); |
2009 | sw_context->cmd_bounce_size = 0; |
2212 | sw_context->cmd_bounce_size = 0; |
2010 | return -ENOMEM; |
2213 | return -ENOMEM; |
2011 | } |
2214 | } |
2012 | 2215 | ||
2013 | return 0; |
2216 | return 0; |
2014 | } |
2217 | } |
2015 | 2218 | ||
2016 | /** |
2219 | /** |
2017 | * vmw_execbuf_fence_commands - create and submit a command stream fence |
2220 | * vmw_execbuf_fence_commands - create and submit a command stream fence |
2018 | * |
2221 | * |
2019 | * Creates a fence object and submits a command stream marker. |
2222 | * Creates a fence object and submits a command stream marker. |
2020 | * If this fails for some reason, We sync the fifo and return NULL. |
2223 | * If this fails for some reason, We sync the fifo and return NULL. |
2021 | * It is then safe to fence buffers with a NULL pointer. |
2224 | * It is then safe to fence buffers with a NULL pointer. |
2022 | * |
2225 | * |
2023 | * If @p_handle is not NULL @file_priv must also not be NULL. Creates |
2226 | * If @p_handle is not NULL @file_priv must also not be NULL. Creates |
2024 | * a userspace handle if @p_handle is not NULL, otherwise not. |
2227 | * a userspace handle if @p_handle is not NULL, otherwise not. |
2025 | */ |
2228 | */ |
2026 | 2229 | ||
2027 | int vmw_execbuf_fence_commands(struct drm_file *file_priv, |
2230 | int vmw_execbuf_fence_commands(struct drm_file *file_priv, |
2028 | struct vmw_private *dev_priv, |
2231 | struct vmw_private *dev_priv, |
2029 | struct vmw_fence_obj **p_fence, |
2232 | struct vmw_fence_obj **p_fence, |
2030 | uint32_t *p_handle) |
2233 | uint32_t *p_handle) |
2031 | { |
2234 | { |
2032 | uint32_t sequence; |
2235 | uint32_t sequence; |
2033 | int ret; |
2236 | int ret; |
2034 | bool synced = false; |
2237 | bool synced = false; |
2035 | 2238 | ||
2036 | /* p_handle implies file_priv. */ |
2239 | /* p_handle implies file_priv. */ |
2037 | BUG_ON(p_handle != NULL && file_priv == NULL); |
2240 | BUG_ON(p_handle != NULL && file_priv == NULL); |
2038 | 2241 | ||
2039 | ret = vmw_fifo_send_fence(dev_priv, &sequence); |
2242 | ret = vmw_fifo_send_fence(dev_priv, &sequence); |
2040 | if (unlikely(ret != 0)) { |
2243 | if (unlikely(ret != 0)) { |
2041 | DRM_ERROR("Fence submission error. Syncing.\n"); |
2244 | DRM_ERROR("Fence submission error. Syncing.\n"); |
2042 | synced = true; |
2245 | synced = true; |
2043 | } |
2246 | } |
2044 | 2247 | ||
2045 | if (p_handle != NULL) |
2248 | if (p_handle != NULL) |
2046 | ret = vmw_user_fence_create(file_priv, dev_priv->fman, |
2249 | ret = vmw_user_fence_create(file_priv, dev_priv->fman, |
2047 | sequence, |
2250 | sequence, |
2048 | DRM_VMW_FENCE_FLAG_EXEC, |
2251 | DRM_VMW_FENCE_FLAG_EXEC, |
2049 | p_fence, p_handle); |
2252 | p_fence, p_handle); |
2050 | else |
2253 | else |
2051 | ret = vmw_fence_create(dev_priv->fman, sequence, |
2254 | ret = vmw_fence_create(dev_priv->fman, sequence, |
2052 | DRM_VMW_FENCE_FLAG_EXEC, |
2255 | DRM_VMW_FENCE_FLAG_EXEC, |
2053 | p_fence); |
2256 | p_fence); |
2054 | 2257 | ||
2055 | if (unlikely(ret != 0 && !synced)) { |
2258 | if (unlikely(ret != 0 && !synced)) { |
2056 | (void) vmw_fallback_wait(dev_priv, false, false, |
2259 | (void) vmw_fallback_wait(dev_priv, false, false, |
2057 | sequence, false, |
2260 | sequence, false, |
2058 | VMW_FENCE_WAIT_TIMEOUT); |
2261 | VMW_FENCE_WAIT_TIMEOUT); |
2059 | *p_fence = NULL; |
2262 | *p_fence = NULL; |
2060 | } |
2263 | } |
2061 | 2264 | ||
2062 | return 0; |
2265 | return 0; |
2063 | } |
2266 | } |
2064 | 2267 | ||
2065 | /** |
2268 | /** |
2066 | * vmw_execbuf_copy_fence_user - copy fence object information to |
2269 | * vmw_execbuf_copy_fence_user - copy fence object information to |
2067 | * user-space. |
2270 | * user-space. |
2068 | * |
2271 | * |
2069 | * @dev_priv: Pointer to a vmw_private struct. |
2272 | * @dev_priv: Pointer to a vmw_private struct. |
2070 | * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. |
2273 | * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. |
2071 | * @ret: Return value from fence object creation. |
2274 | * @ret: Return value from fence object creation. |
2072 | * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to |
2275 | * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to |
2073 | * which the information should be copied. |
2276 | * which the information should be copied. |
2074 | * @fence: Pointer to the fenc object. |
2277 | * @fence: Pointer to the fenc object. |
2075 | * @fence_handle: User-space fence handle. |
2278 | * @fence_handle: User-space fence handle. |
2076 | * |
2279 | * |
2077 | * This function copies fence information to user-space. If copying fails, |
2280 | * This function copies fence information to user-space. If copying fails, |
2078 | * The user-space struct drm_vmw_fence_rep::error member is hopefully |
2281 | * The user-space struct drm_vmw_fence_rep::error member is hopefully |
2079 | * left untouched, and if it's preloaded with an -EFAULT by user-space, |
2282 | * left untouched, and if it's preloaded with an -EFAULT by user-space, |
2080 | * the error will hopefully be detected. |
2283 | * the error will hopefully be detected. |
2081 | * Also if copying fails, user-space will be unable to signal the fence |
2284 | * Also if copying fails, user-space will be unable to signal the fence |
2082 | * object so we wait for it immediately, and then unreference the |
2285 | * object so we wait for it immediately, and then unreference the |
2083 | * user-space reference. |
2286 | * user-space reference. |
2084 | */ |
2287 | */ |
2085 | void |
2288 | void |
2086 | vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, |
2289 | vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, |
2087 | struct vmw_fpriv *vmw_fp, |
2290 | struct vmw_fpriv *vmw_fp, |
2088 | int ret, |
2291 | int ret, |
2089 | struct drm_vmw_fence_rep __user *user_fence_rep, |
2292 | struct drm_vmw_fence_rep __user *user_fence_rep, |
2090 | struct vmw_fence_obj *fence, |
2293 | struct vmw_fence_obj *fence, |
2091 | uint32_t fence_handle) |
2294 | uint32_t fence_handle) |
2092 | { |
2295 | { |
2093 | struct drm_vmw_fence_rep fence_rep; |
2296 | struct drm_vmw_fence_rep fence_rep; |
2094 | 2297 | ||
2095 | if (user_fence_rep == NULL) |
2298 | if (user_fence_rep == NULL) |
2096 | return; |
2299 | return; |
2097 | 2300 | ||
2098 | memset(&fence_rep, 0, sizeof(fence_rep)); |
2301 | memset(&fence_rep, 0, sizeof(fence_rep)); |
2099 | 2302 | ||
2100 | fence_rep.error = ret; |
2303 | fence_rep.error = ret; |
2101 | if (ret == 0) { |
2304 | if (ret == 0) { |
2102 | BUG_ON(fence == NULL); |
2305 | BUG_ON(fence == NULL); |
2103 | 2306 | ||
2104 | fence_rep.handle = fence_handle; |
2307 | fence_rep.handle = fence_handle; |
2105 | fence_rep.seqno = fence->seqno; |
2308 | fence_rep.seqno = fence->seqno; |
2106 | vmw_update_seqno(dev_priv, &dev_priv->fifo); |
2309 | vmw_update_seqno(dev_priv, &dev_priv->fifo); |
2107 | fence_rep.passed_seqno = dev_priv->last_read_seqno; |
2310 | fence_rep.passed_seqno = dev_priv->last_read_seqno; |
2108 | } |
2311 | } |
2109 | 2312 | ||
2110 | /* |
2313 | /* |
2111 | * copy_to_user errors will be detected by user space not |
2314 | * copy_to_user errors will be detected by user space not |
2112 | * seeing fence_rep::error filled in. Typically |
2315 | * seeing fence_rep::error filled in. Typically |
2113 | * user-space would have pre-set that member to -EFAULT. |
2316 | * user-space would have pre-set that member to -EFAULT. |
2114 | */ |
2317 | */ |
2115 | // ret = copy_to_user(user_fence_rep, &fence_rep, |
2318 | // ret = copy_to_user(user_fence_rep, &fence_rep, |
2116 | // sizeof(fence_rep)); |
2319 | // sizeof(fence_rep)); |
2117 | 2320 | ||
2118 | /* |
2321 | /* |
2119 | * User-space lost the fence object. We need to sync |
2322 | * User-space lost the fence object. We need to sync |
2120 | * and unreference the handle. |
2323 | * and unreference the handle. |
2121 | */ |
2324 | */ |
2122 | if (unlikely(ret != 0) && (fence_rep.error == 0)) { |
2325 | if (unlikely(ret != 0) && (fence_rep.error == 0)) { |
2123 | ttm_ref_object_base_unref(vmw_fp->tfile, |
2326 | ttm_ref_object_base_unref(vmw_fp->tfile, |
2124 | fence_handle, TTM_REF_USAGE); |
2327 | fence_handle, TTM_REF_USAGE); |
2125 | DRM_ERROR("Fence copy error. Syncing.\n"); |
2328 | DRM_ERROR("Fence copy error. Syncing.\n"); |
2126 | (void) vmw_fence_obj_wait(fence, fence->signal_mask, |
2329 | (void) vmw_fence_obj_wait(fence, fence->signal_mask, |
2127 | false, false, |
2330 | false, false, |
2128 | VMW_FENCE_WAIT_TIMEOUT); |
2331 | VMW_FENCE_WAIT_TIMEOUT); |
2129 | } |
2332 | } |
2130 | } |
2333 | } |
- | 2334 | ||
- | 2335 | ||
2131 | 2336 | ||
2132 | int vmw_execbuf_process(struct drm_file *file_priv, |
2337 | int vmw_execbuf_process(struct drm_file *file_priv, |
2133 | struct vmw_private *dev_priv, |
2338 | struct vmw_private *dev_priv, |
2134 | void __user *user_commands, |
2339 | void __user *user_commands, |
2135 | void *kernel_commands, |
2340 | void *kernel_commands, |
2136 | uint32_t command_size, |
2341 | uint32_t command_size, |
2137 | uint64_t throttle_us, |
2342 | uint64_t throttle_us, |
2138 | struct drm_vmw_fence_rep __user *user_fence_rep, |
2343 | struct drm_vmw_fence_rep __user *user_fence_rep, |
2139 | struct vmw_fence_obj **out_fence) |
2344 | struct vmw_fence_obj **out_fence) |
2140 | { |
2345 | { |
2141 | struct vmw_sw_context *sw_context = &dev_priv->ctx; |
2346 | struct vmw_sw_context *sw_context = &dev_priv->ctx; |
2142 | struct vmw_fence_obj *fence = NULL; |
2347 | struct vmw_fence_obj *fence = NULL; |
2143 | struct vmw_resource *error_resource; |
2348 | struct vmw_resource *error_resource; |
2144 | struct list_head resource_list; |
2349 | struct list_head resource_list; |
2145 | struct ww_acquire_ctx ticket; |
2350 | struct ww_acquire_ctx ticket; |
2146 | uint32_t handle; |
2351 | uint32_t handle; |
2147 | void *cmd; |
2352 | void *cmd; |
2148 | int ret; |
2353 | int ret; |
2149 | 2354 | ||
2150 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); |
2355 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); |
2151 | if (unlikely(ret != 0)) |
2356 | if (unlikely(ret != 0)) |
2152 | return -ERESTARTSYS; |
2357 | return -ERESTARTSYS; |
2153 | 2358 | ||
2154 | /* |
2359 | /* |
2155 | if (kernel_commands == NULL) { |
2360 | if (kernel_commands == NULL) { |
2156 | sw_context->kernel = false; |
2361 | sw_context->kernel = false; |
2157 | 2362 | ||
2158 | ret = vmw_resize_cmd_bounce(sw_context, command_size); |
2363 | ret = vmw_resize_cmd_bounce(sw_context, command_size); |
2159 | if (unlikely(ret != 0)) |
2364 | if (unlikely(ret != 0)) |
2160 | goto out_unlock; |
2365 | goto out_unlock; |
2161 | 2366 | ||
2162 | 2367 | ||
2163 | ret = copy_from_user(sw_context->cmd_bounce, |
2368 | ret = copy_from_user(sw_context->cmd_bounce, |
2164 | user_commands, command_size); |
2369 | user_commands, command_size); |
2165 | 2370 | ||
2166 | if (unlikely(ret != 0)) { |
2371 | if (unlikely(ret != 0)) { |
2167 | ret = -EFAULT; |
2372 | ret = -EFAULT; |
2168 | DRM_ERROR("Failed copying commands.\n"); |
2373 | DRM_ERROR("Failed copying commands.\n"); |
2169 | goto out_unlock; |
2374 | goto out_unlock; |
2170 | } |
2375 | } |
2171 | kernel_commands = sw_context->cmd_bounce; |
2376 | kernel_commands = sw_context->cmd_bounce; |
2172 | } else */ |
2377 | } else */ |
2173 | sw_context->kernel = true; |
2378 | sw_context->kernel = true; |
2174 | 2379 | ||
2175 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; |
2380 | sw_context->fp = vmw_fpriv(file_priv); |
2176 | sw_context->cur_reloc = 0; |
2381 | sw_context->cur_reloc = 0; |
2177 | sw_context->cur_val_buf = 0; |
2382 | sw_context->cur_val_buf = 0; |
2178 | sw_context->fence_flags = 0; |
2383 | sw_context->fence_flags = 0; |
2179 | INIT_LIST_HEAD(&sw_context->resource_list); |
2384 | INIT_LIST_HEAD(&sw_context->resource_list); |
2180 | sw_context->cur_query_bo = dev_priv->pinned_bo; |
2385 | sw_context->cur_query_bo = dev_priv->pinned_bo; |
2181 | sw_context->last_query_ctx = NULL; |
2386 | sw_context->last_query_ctx = NULL; |
2182 | sw_context->needs_post_query_barrier = false; |
2387 | sw_context->needs_post_query_barrier = false; |
2183 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); |
2388 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); |
2184 | INIT_LIST_HEAD(&sw_context->validate_nodes); |
2389 | INIT_LIST_HEAD(&sw_context->validate_nodes); |
2185 | INIT_LIST_HEAD(&sw_context->res_relocations); |
2390 | INIT_LIST_HEAD(&sw_context->res_relocations); |
2186 | if (!sw_context->res_ht_initialized) { |
2391 | if (!sw_context->res_ht_initialized) { |
2187 | ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); |
2392 | ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); |
2188 | if (unlikely(ret != 0)) |
2393 | if (unlikely(ret != 0)) |
2189 | goto out_unlock; |
2394 | goto out_unlock; |
2190 | sw_context->res_ht_initialized = true; |
2395 | sw_context->res_ht_initialized = true; |
2191 | } |
2396 | } |
- | 2397 | INIT_LIST_HEAD(&sw_context->staged_cmd_res); |
|
2192 | 2398 | ||
2193 | INIT_LIST_HEAD(&resource_list); |
2399 | INIT_LIST_HEAD(&resource_list); |
2194 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
2400 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
2195 | command_size); |
2401 | command_size); |
2196 | if (unlikely(ret != 0)) |
2402 | if (unlikely(ret != 0)) |
2197 | goto out_err; |
2403 | goto out_err_nores; |
2198 | 2404 | ||
2199 | ret = vmw_resources_reserve(sw_context); |
2405 | ret = vmw_resources_reserve(sw_context); |
2200 | if (unlikely(ret != 0)) |
2406 | if (unlikely(ret != 0)) |
2201 | goto out_err; |
2407 | goto out_err_nores; |
2202 | 2408 | ||
2203 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); |
2409 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); |
2204 | if (unlikely(ret != 0)) |
2410 | if (unlikely(ret != 0)) |
2205 | goto out_err; |
2411 | goto out_err; |
2206 | 2412 | ||
2207 | ret = vmw_validate_buffers(dev_priv, sw_context); |
2413 | ret = vmw_validate_buffers(dev_priv, sw_context); |
2208 | if (unlikely(ret != 0)) |
2414 | if (unlikely(ret != 0)) |
2209 | goto out_err; |
2415 | goto out_err; |
2210 | 2416 | ||
2211 | ret = vmw_resources_validate(sw_context); |
2417 | ret = vmw_resources_validate(sw_context); |
2212 | if (unlikely(ret != 0)) |
2418 | if (unlikely(ret != 0)) |
2213 | goto out_err; |
2419 | goto out_err; |
2214 | 2420 | ||
2215 | if (throttle_us) { |
2421 | if (throttle_us) { |
2216 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, |
2422 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, |
2217 | throttle_us); |
2423 | throttle_us); |
2218 | 2424 | ||
2219 | if (unlikely(ret != 0)) |
2425 | if (unlikely(ret != 0)) |
2220 | goto out_err; |
2426 | goto out_err; |
2221 | } |
2427 | } |
2222 | 2428 | ||
2223 | ret = mutex_lock_interruptible(&dev_priv->binding_mutex); |
2429 | ret = mutex_lock_interruptible(&dev_priv->binding_mutex); |
2224 | if (unlikely(ret != 0)) { |
2430 | if (unlikely(ret != 0)) { |
2225 | ret = -ERESTARTSYS; |
2431 | ret = -ERESTARTSYS; |
2226 | goto out_err; |
2432 | goto out_err; |
2227 | } |
2433 | } |
- | 2434 | ||
- | 2435 | if (dev_priv->has_mob) { |
|
- | 2436 | ret = vmw_rebind_contexts(sw_context); |
|
- | 2437 | if (unlikely(ret != 0)) |
|
- | 2438 | goto out_unlock_binding; |
|
- | 2439 | } |
|
2228 | 2440 | ||
2229 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
2441 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
2230 | if (unlikely(cmd == NULL)) { |
2442 | if (unlikely(cmd == NULL)) { |
2231 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
2443 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
2232 | ret = -ENOMEM; |
2444 | ret = -ENOMEM; |
2233 | goto out_unlock_binding; |
2445 | goto out_unlock_binding; |
2234 | } |
2446 | } |
2235 | 2447 | ||
2236 | vmw_apply_relocations(sw_context); |
2448 | vmw_apply_relocations(sw_context); |
2237 | memcpy(cmd, kernel_commands, command_size); |
2449 | memcpy(cmd, kernel_commands, command_size); |
2238 | 2450 | ||
2239 | vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); |
2451 | vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); |
2240 | vmw_resource_relocations_free(&sw_context->res_relocations); |
2452 | vmw_resource_relocations_free(&sw_context->res_relocations); |
2241 | 2453 | ||
2242 | vmw_fifo_commit(dev_priv, command_size); |
2454 | vmw_fifo_commit(dev_priv, command_size); |
2243 | 2455 | ||
2244 | vmw_query_bo_switch_commit(dev_priv, sw_context); |
2456 | vmw_query_bo_switch_commit(dev_priv, sw_context); |
2245 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, |
2457 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, |
2246 | &fence, |
2458 | &fence, |
2247 | (user_fence_rep) ? &handle : NULL); |
2459 | (user_fence_rep) ? &handle : NULL); |
2248 | /* |
2460 | /* |
2249 | * This error is harmless, because if fence submission fails, |
2461 | * This error is harmless, because if fence submission fails, |
2250 | * vmw_fifo_send_fence will sync. The error will be propagated to |
2462 | * vmw_fifo_send_fence will sync. The error will be propagated to |
2251 | * user-space in @fence_rep |
2463 | * user-space in @fence_rep |
2252 | */ |
2464 | */ |
2253 | 2465 | ||
2254 | if (ret != 0) |
2466 | if (ret != 0) |
2255 | DRM_ERROR("Fence submission error. Syncing.\n"); |
2467 | DRM_ERROR("Fence submission error. Syncing.\n"); |
2256 | 2468 | ||
2257 | vmw_resource_list_unreserve(&sw_context->resource_list, false); |
2469 | vmw_resource_list_unreserve(&sw_context->resource_list, false); |
2258 | mutex_unlock(&dev_priv->binding_mutex); |
2470 | mutex_unlock(&dev_priv->binding_mutex); |
2259 | 2471 | ||
2260 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, |
2472 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, |
2261 | (void *) fence); |
2473 | (void *) fence); |
2262 | 2474 | ||
2263 | if (unlikely(dev_priv->pinned_bo != NULL && |
2475 | if (unlikely(dev_priv->pinned_bo != NULL && |
2264 | !dev_priv->query_cid_valid)) |
2476 | !dev_priv->query_cid_valid)) |
2265 | __vmw_execbuf_release_pinned_bo(dev_priv, fence); |
2477 | __vmw_execbuf_release_pinned_bo(dev_priv, fence); |
2266 | 2478 | ||
2267 | vmw_clear_validations(sw_context); |
2479 | vmw_clear_validations(sw_context); |
2268 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, |
2480 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, |
2269 | user_fence_rep, fence, handle); |
2481 | user_fence_rep, fence, handle); |
2270 | 2482 | ||
2271 | /* Don't unreference when handing fence out */ |
2483 | /* Don't unreference when handing fence out */ |
2272 | if (unlikely(out_fence != NULL)) { |
2484 | if (unlikely(out_fence != NULL)) { |
2273 | *out_fence = fence; |
2485 | *out_fence = fence; |
2274 | fence = NULL; |
2486 | fence = NULL; |
2275 | } else if (likely(fence != NULL)) { |
2487 | } else if (likely(fence != NULL)) { |
2276 | vmw_fence_obj_unreference(&fence); |
2488 | vmw_fence_obj_unreference(&fence); |
2277 | } |
2489 | } |
2278 | 2490 | ||
2279 | list_splice_init(&sw_context->resource_list, &resource_list); |
2491 | list_splice_init(&sw_context->resource_list, &resource_list); |
- | 2492 | vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res); |
|
2280 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
2493 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
2281 | 2494 | ||
2282 | /* |
2495 | /* |
2283 | * Unreference resources outside of the cmdbuf_mutex to |
2496 | * Unreference resources outside of the cmdbuf_mutex to |
2284 | * avoid deadlocks in resource destruction paths. |
2497 | * avoid deadlocks in resource destruction paths. |
2285 | */ |
2498 | */ |
2286 | vmw_resource_list_unreference(&resource_list); |
2499 | vmw_resource_list_unreference(&resource_list); |
2287 | 2500 | ||
2288 | return 0; |
2501 | return 0; |
2289 | 2502 | ||
2290 | out_unlock_binding: |
2503 | out_unlock_binding: |
2291 | mutex_unlock(&dev_priv->binding_mutex); |
2504 | mutex_unlock(&dev_priv->binding_mutex); |
2292 | out_err: |
2505 | out_err: |
2293 | vmw_resource_relocations_free(&sw_context->res_relocations); |
- | |
2294 | vmw_free_relocations(sw_context); |
- | |
2295 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); |
2506 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); |
- | 2507 | out_err_nores: |
|
2296 | vmw_resource_list_unreserve(&sw_context->resource_list, true); |
2508 | vmw_resource_list_unreserve(&sw_context->resource_list, true); |
- | 2509 | vmw_resource_relocations_free(&sw_context->res_relocations); |
|
- | 2510 | vmw_free_relocations(sw_context); |
|
2297 | vmw_clear_validations(sw_context); |
2511 | vmw_clear_validations(sw_context); |
2298 | if (unlikely(dev_priv->pinned_bo != NULL && |
2512 | if (unlikely(dev_priv->pinned_bo != NULL && |
2299 | !dev_priv->query_cid_valid)) |
2513 | !dev_priv->query_cid_valid)) |
2300 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
2514 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
2301 | out_unlock: |
2515 | out_unlock: |
2302 | list_splice_init(&sw_context->resource_list, &resource_list); |
2516 | list_splice_init(&sw_context->resource_list, &resource_list); |
2303 | error_resource = sw_context->error_resource; |
2517 | error_resource = sw_context->error_resource; |
2304 | sw_context->error_resource = NULL; |
2518 | sw_context->error_resource = NULL; |
- | 2519 | vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); |
|
2305 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
2520 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
2306 | 2521 | ||
2307 | /* |
2522 | /* |
2308 | * Unreference resources outside of the cmdbuf_mutex to |
2523 | * Unreference resources outside of the cmdbuf_mutex to |
2309 | * avoid deadlocks in resource destruction paths. |
2524 | * avoid deadlocks in resource destruction paths. |
2310 | */ |
2525 | */ |
2311 | vmw_resource_list_unreference(&resource_list); |
2526 | vmw_resource_list_unreference(&resource_list); |
2312 | if (unlikely(error_resource != NULL)) |
2527 | if (unlikely(error_resource != NULL)) |
2313 | vmw_resource_unreference(&error_resource); |
2528 | vmw_resource_unreference(&error_resource); |
2314 | 2529 | ||
2315 | return ret; |
2530 | return ret; |
2316 | } |
2531 | } |
2317 | 2532 | ||
2318 | /** |
2533 | /** |
2319 | * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. |
2534 | * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. |
2320 | * |
2535 | * |
2321 | * @dev_priv: The device private structure. |
2536 | * @dev_priv: The device private structure. |
2322 | * |
2537 | * |
2323 | * This function is called to idle the fifo and unpin the query buffer |
2538 | * This function is called to idle the fifo and unpin the query buffer |
2324 | * if the normal way to do this hits an error, which should typically be |
2539 | * if the normal way to do this hits an error, which should typically be |
2325 | * extremely rare. |
2540 | * extremely rare. |
2326 | */ |
2541 | */ |
2327 | static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) |
2542 | static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) |
2328 | { |
2543 | { |
2329 | DRM_ERROR("Can't unpin query buffer. Trying to recover.\n"); |
2544 | DRM_ERROR("Can't unpin query buffer. Trying to recover.\n"); |
2330 | 2545 | ||
2331 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); |
2546 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); |
2332 | vmw_bo_pin(dev_priv->pinned_bo, false); |
2547 | vmw_bo_pin(dev_priv->pinned_bo, false); |
2333 | vmw_bo_pin(dev_priv->dummy_query_bo, false); |
2548 | vmw_bo_pin(dev_priv->dummy_query_bo, false); |
2334 | dev_priv->dummy_query_bo_pinned = false; |
2549 | dev_priv->dummy_query_bo_pinned = false; |
2335 | } |
2550 | } |
2336 | 2551 | ||
2337 | 2552 | ||
2338 | /** |
2553 | /** |
2339 | * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
2554 | * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
2340 | * query bo. |
2555 | * query bo. |
2341 | * |
2556 | * |
2342 | * @dev_priv: The device private structure. |
2557 | * @dev_priv: The device private structure. |
2343 | * @fence: If non-NULL should point to a struct vmw_fence_obj issued |
2558 | * @fence: If non-NULL should point to a struct vmw_fence_obj issued |
2344 | * _after_ a query barrier that flushes all queries touching the current |
2559 | * _after_ a query barrier that flushes all queries touching the current |
2345 | * buffer pointed to by @dev_priv->pinned_bo |
2560 | * buffer pointed to by @dev_priv->pinned_bo |
2346 | * |
2561 | * |
2347 | * This function should be used to unpin the pinned query bo, or |
2562 | * This function should be used to unpin the pinned query bo, or |
2348 | * as a query barrier when we need to make sure that all queries have |
2563 | * as a query barrier when we need to make sure that all queries have |
2349 | * finished before the next fifo command. (For example on hardware |
2564 | * finished before the next fifo command. (For example on hardware |
2350 | * context destructions where the hardware may otherwise leak unfinished |
2565 | * context destructions where the hardware may otherwise leak unfinished |
2351 | * queries). |
2566 | * queries). |
2352 | * |
2567 | * |
2353 | * This function does not return any failure codes, but make attempts |
2568 | * This function does not return any failure codes, but make attempts |
2354 | * to do safe unpinning in case of errors. |
2569 | * to do safe unpinning in case of errors. |
2355 | * |
2570 | * |
2356 | * The function will synchronize on the previous query barrier, and will |
2571 | * The function will synchronize on the previous query barrier, and will |
2357 | * thus not finish until that barrier has executed. |
2572 | * thus not finish until that barrier has executed. |
2358 | * |
2573 | * |
2359 | * the @dev_priv->cmdbuf_mutex needs to be held by the current thread |
2574 | * the @dev_priv->cmdbuf_mutex needs to be held by the current thread |
2360 | * before calling this function. |
2575 | * before calling this function. |
2361 | */ |
2576 | */ |
2362 | void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, |
2577 | void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, |
2363 | struct vmw_fence_obj *fence) |
2578 | struct vmw_fence_obj *fence) |
2364 | { |
2579 | { |
2365 | int ret = 0; |
2580 | int ret = 0; |
2366 | struct list_head validate_list; |
2581 | struct list_head validate_list; |
2367 | struct ttm_validate_buffer pinned_val, query_val; |
2582 | struct ttm_validate_buffer pinned_val, query_val; |
2368 | struct vmw_fence_obj *lfence = NULL; |
2583 | struct vmw_fence_obj *lfence = NULL; |
2369 | struct ww_acquire_ctx ticket; |
2584 | struct ww_acquire_ctx ticket; |
2370 | 2585 | ||
2371 | if (dev_priv->pinned_bo == NULL) |
2586 | if (dev_priv->pinned_bo == NULL) |
2372 | goto out_unlock; |
2587 | goto out_unlock; |
2373 | 2588 | ||
2374 | INIT_LIST_HEAD(&validate_list); |
2589 | INIT_LIST_HEAD(&validate_list); |
2375 | 2590 | ||
2376 | pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); |
2591 | pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); |
2377 | list_add_tail(&pinned_val.head, &validate_list); |
2592 | list_add_tail(&pinned_val.head, &validate_list); |
2378 | 2593 | ||
2379 | query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); |
2594 | query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); |
2380 | list_add_tail(&query_val.head, &validate_list); |
2595 | list_add_tail(&query_val.head, &validate_list); |
2381 | 2596 | ||
2382 | do { |
2597 | do { |
2383 | ret = ttm_eu_reserve_buffers(&ticket, &validate_list); |
2598 | ret = ttm_eu_reserve_buffers(&ticket, &validate_list); |
2384 | } while (ret == -ERESTARTSYS); |
2599 | } while (ret == -ERESTARTSYS); |
2385 | 2600 | ||
2386 | if (unlikely(ret != 0)) { |
2601 | if (unlikely(ret != 0)) { |
2387 | vmw_execbuf_unpin_panic(dev_priv); |
2602 | vmw_execbuf_unpin_panic(dev_priv); |
2388 | goto out_no_reserve; |
2603 | goto out_no_reserve; |
2389 | } |
2604 | } |
2390 | 2605 | ||
2391 | if (dev_priv->query_cid_valid) { |
2606 | if (dev_priv->query_cid_valid) { |
2392 | BUG_ON(fence != NULL); |
2607 | BUG_ON(fence != NULL); |
2393 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); |
2608 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); |
2394 | if (unlikely(ret != 0)) { |
2609 | if (unlikely(ret != 0)) { |
2395 | vmw_execbuf_unpin_panic(dev_priv); |
2610 | vmw_execbuf_unpin_panic(dev_priv); |
2396 | goto out_no_emit; |
2611 | goto out_no_emit; |
2397 | } |
2612 | } |
2398 | dev_priv->query_cid_valid = false; |
2613 | dev_priv->query_cid_valid = false; |
2399 | } |
2614 | } |
2400 | 2615 | ||
2401 | vmw_bo_pin(dev_priv->pinned_bo, false); |
2616 | vmw_bo_pin(dev_priv->pinned_bo, false); |
2402 | vmw_bo_pin(dev_priv->dummy_query_bo, false); |
2617 | vmw_bo_pin(dev_priv->dummy_query_bo, false); |
2403 | dev_priv->dummy_query_bo_pinned = false; |
2618 | dev_priv->dummy_query_bo_pinned = false; |
2404 | 2619 | ||
2405 | if (fence == NULL) { |
2620 | if (fence == NULL) { |
2406 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, |
2621 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, |
2407 | NULL); |
2622 | NULL); |
2408 | fence = lfence; |
2623 | fence = lfence; |
2409 | } |
2624 | } |
2410 | ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence); |
2625 | ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence); |
2411 | if (lfence != NULL) |
2626 | if (lfence != NULL) |
2412 | vmw_fence_obj_unreference(&lfence); |
2627 | vmw_fence_obj_unreference(&lfence); |
2413 | 2628 | ||
2414 | ttm_bo_unref(&query_val.bo); |
2629 | ttm_bo_unref(&query_val.bo); |
2415 | ttm_bo_unref(&pinned_val.bo); |
2630 | ttm_bo_unref(&pinned_val.bo); |
2416 | ttm_bo_unref(&dev_priv->pinned_bo); |
2631 | ttm_bo_unref(&dev_priv->pinned_bo); |
2417 | 2632 | ||
2418 | out_unlock: |
2633 | out_unlock: |
2419 | return; |
2634 | return; |
2420 | 2635 | ||
2421 | out_no_emit: |
2636 | out_no_emit: |
2422 | ttm_eu_backoff_reservation(&ticket, &validate_list); |
2637 | ttm_eu_backoff_reservation(&ticket, &validate_list); |
2423 | out_no_reserve: |
2638 | out_no_reserve: |
2424 | ttm_bo_unref(&query_val.bo); |
2639 | ttm_bo_unref(&query_val.bo); |
2425 | ttm_bo_unref(&pinned_val.bo); |
2640 | ttm_bo_unref(&pinned_val.bo); |
2426 | ttm_bo_unref(&dev_priv->pinned_bo); |
2641 | ttm_bo_unref(&dev_priv->pinned_bo); |
2427 | } |
2642 | } |
2428 | 2643 | ||
2429 | /** |
2644 | /** |
2430 | * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
2645 | * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
2431 | * query bo. |
2646 | * query bo. |
2432 | * |
2647 | * |
2433 | * @dev_priv: The device private structure. |
2648 | * @dev_priv: The device private structure. |
2434 | * |
2649 | * |
2435 | * This function should be used to unpin the pinned query bo, or |
2650 | * This function should be used to unpin the pinned query bo, or |
2436 | * as a query barrier when we need to make sure that all queries have |
2651 | * as a query barrier when we need to make sure that all queries have |
2437 | * finished before the next fifo command. (For example on hardware |
2652 | * finished before the next fifo command. (For example on hardware |
2438 | * context destructions where the hardware may otherwise leak unfinished |
2653 | * context destructions where the hardware may otherwise leak unfinished |
2439 | * queries). |
2654 | * queries). |
2440 | * |
2655 | * |
2441 | * This function does not return any failure codes, but make attempts |
2656 | * This function does not return any failure codes, but make attempts |
2442 | * to do safe unpinning in case of errors. |
2657 | * to do safe unpinning in case of errors. |
2443 | * |
2658 | * |
2444 | * The function will synchronize on the previous query barrier, and will |
2659 | * The function will synchronize on the previous query barrier, and will |
2445 | * thus not finish until that barrier has executed. |
2660 | * thus not finish until that barrier has executed. |
2446 | */ |
2661 | */ |
2447 | void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) |
2662 | void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) |
2448 | { |
2663 | { |
2449 | mutex_lock(&dev_priv->cmdbuf_mutex); |
2664 | mutex_lock(&dev_priv->cmdbuf_mutex); |
2450 | if (dev_priv->query_cid_valid) |
2665 | if (dev_priv->query_cid_valid) |
2451 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
2666 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
2452 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
2667 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
2453 | } |
2668 | } |
2454 | 2669 | ||
2455 | 2670 | ||
2456 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, |
2671 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, |
2457 | struct drm_file *file_priv) |
2672 | struct drm_file *file_priv) |
2458 | { |
2673 | { |
2459 | struct vmw_private *dev_priv = vmw_priv(dev); |
2674 | struct vmw_private *dev_priv = vmw_priv(dev); |
2460 | struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; |
2675 | struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; |
2461 | // struct vmw_master *vmaster = vmw_master(file_priv->master); |
- | |
2462 | int ret; |
2676 | int ret; |
2463 | 2677 | ||
2464 | /* |
2678 | /* |
2465 | * This will allow us to extend the ioctl argument while |
2679 | * This will allow us to extend the ioctl argument while |
2466 | * maintaining backwards compatibility: |
2680 | * maintaining backwards compatibility: |
2467 | * We take different code paths depending on the value of |
2681 | * We take different code paths depending on the value of |
2468 | * arg->version. |
2682 | * arg->version. |
2469 | */ |
2683 | */ |
2470 | 2684 | ||
2471 | if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) { |
2685 | if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) { |
2472 | DRM_ERROR("Incorrect execbuf version.\n"); |
2686 | DRM_ERROR("Incorrect execbuf version.\n"); |
2473 | DRM_ERROR("You're running outdated experimental " |
2687 | DRM_ERROR("You're running outdated experimental " |
2474 | "vmwgfx user-space drivers."); |
2688 | "vmwgfx user-space drivers."); |
2475 | return -EINVAL; |
2689 | return -EINVAL; |
2476 | } |
2690 | } |
2477 | 2691 | ||
2478 | // ret = ttm_read_lock(&vmaster->lock, true); |
2692 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
2479 | if (unlikely(ret != 0)) |
2693 | if (unlikely(ret != 0)) |
2480 | return ret; |
2694 | return ret; |
2481 | 2695 | ||
2482 | ret = vmw_execbuf_process(file_priv, dev_priv, |
2696 | ret = vmw_execbuf_process(file_priv, dev_priv, |
2483 | (void __user *)(unsigned long)arg->commands, |
2697 | (void __user *)(unsigned long)arg->commands, |
2484 | NULL, arg->command_size, arg->throttle_us, |
2698 | NULL, arg->command_size, arg->throttle_us, |
2485 | (void __user *)(unsigned long)arg->fence_rep, |
2699 | (void __user *)(unsigned long)arg->fence_rep, |
2486 | NULL); |
2700 | NULL); |
2487 | 2701 | ||
2488 | if (unlikely(ret != 0)) |
2702 | if (unlikely(ret != 0)) |
2489 | goto out_unlock; |
2703 | goto out_unlock; |
2490 | 2704 | ||
2491 | // vmw_kms_cursor_post_execbuf(dev_priv); |
2705 | // vmw_kms_cursor_post_execbuf(dev_priv); |
2492 | 2706 | ||
2493 | out_unlock: |
2707 | out_unlock: |
2494 | // ttm_read_unlock(&vmaster->lock); |
2708 | ttm_read_unlock(&dev_priv->reservation_sem); |
2495 | return ret; |
2709 | return ret; |
2496 | }>>>>>> |
2710 | }>>>>>> |