Rev 5078 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 5078 | Rev 6296 | ||
---|---|---|---|
1 | /************************************************************************** |
1 | /************************************************************************** |
2 | * |
2 | * |
3 | * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA |
3 | * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. |
4 | * All Rights Reserved. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
12 | * the following conditions: |
13 | * |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
16 | * of the Software. |
17 | * |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
25 | * |
26 | **************************************************************************/ |
26 | **************************************************************************/ |
27 | 27 | ||
28 | #include "vmwgfx_drv.h" |
28 | #include "vmwgfx_drv.h" |
29 | #include "vmwgfx_resource_priv.h" |
29 | #include "vmwgfx_resource_priv.h" |
- | 30 | #include "vmwgfx_binding.h" |
|
30 | #include "ttm/ttm_placement.h" |
31 | #include "ttm/ttm_placement.h" |
31 | 32 | ||
32 | struct vmw_user_context { |
33 | struct vmw_user_context { |
33 | struct ttm_base_object base; |
34 | struct ttm_base_object base; |
34 | struct vmw_resource res; |
35 | struct vmw_resource res; |
35 | struct vmw_ctx_binding_state cbs; |
36 | struct vmw_ctx_binding_state *cbs; |
36 | struct vmw_cmdbuf_res_manager *man; |
37 | struct vmw_cmdbuf_res_manager *man; |
- | 38 | struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX]; |
|
- | 39 | spinlock_t cotable_lock; |
|
- | 40 | struct vmw_dma_buffer *dx_query_mob; |
|
37 | }; |
41 | }; |
38 | - | ||
39 | - | ||
40 | - | ||
41 | typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool); |
- | |
42 | 42 | ||
43 | static void vmw_user_context_free(struct vmw_resource *res); |
43 | static void vmw_user_context_free(struct vmw_resource *res); |
44 | static struct vmw_resource * |
44 | static struct vmw_resource * |
45 | vmw_user_context_base_to_res(struct ttm_base_object *base); |
45 | vmw_user_context_base_to_res(struct ttm_base_object *base); |
46 | 46 | ||
47 | static int vmw_gb_context_create(struct vmw_resource *res); |
47 | static int vmw_gb_context_create(struct vmw_resource *res); |
48 | static int vmw_gb_context_bind(struct vmw_resource *res, |
48 | static int vmw_gb_context_bind(struct vmw_resource *res, |
49 | struct ttm_validate_buffer *val_buf); |
49 | struct ttm_validate_buffer *val_buf); |
50 | static int vmw_gb_context_unbind(struct vmw_resource *res, |
50 | static int vmw_gb_context_unbind(struct vmw_resource *res, |
51 | bool readback, |
51 | bool readback, |
52 | struct ttm_validate_buffer *val_buf); |
52 | struct ttm_validate_buffer *val_buf); |
53 | static int vmw_gb_context_destroy(struct vmw_resource *res); |
53 | static int vmw_gb_context_destroy(struct vmw_resource *res); |
54 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); |
54 | static int vmw_dx_context_create(struct vmw_resource *res); |
55 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, |
55 | static int vmw_dx_context_bind(struct vmw_resource *res, |
56 | bool rebind); |
56 | struct ttm_validate_buffer *val_buf); |
57 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); |
57 | static int vmw_dx_context_unbind(struct vmw_resource *res, |
- | 58 | bool readback, |
|
58 | static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs); |
59 | struct ttm_validate_buffer *val_buf); |
59 | static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); |
60 | static int vmw_dx_context_destroy(struct vmw_resource *res); |
- | 61 | ||
60 | static uint64_t vmw_user_context_size; |
62 | static uint64_t vmw_user_context_size; |
61 | 63 | ||
62 | static const struct vmw_user_resource_conv user_context_conv = { |
64 | static const struct vmw_user_resource_conv user_context_conv = { |
63 | .object_type = VMW_RES_CONTEXT, |
65 | .object_type = VMW_RES_CONTEXT, |
64 | .base_obj_to_res = vmw_user_context_base_to_res, |
66 | .base_obj_to_res = vmw_user_context_base_to_res, |
65 | .res_free = vmw_user_context_free |
67 | .res_free = vmw_user_context_free |
66 | }; |
68 | }; |
67 | 69 | ||
68 | const struct vmw_user_resource_conv *user_context_converter = |
70 | const struct vmw_user_resource_conv *user_context_converter = |
69 | &user_context_conv; |
71 | &user_context_conv; |
70 | 72 | ||
71 | 73 | ||
72 | static const struct vmw_res_func vmw_legacy_context_func = { |
74 | static const struct vmw_res_func vmw_legacy_context_func = { |
73 | .res_type = vmw_res_context, |
75 | .res_type = vmw_res_context, |
74 | .needs_backup = false, |
76 | .needs_backup = false, |
75 | .may_evict = false, |
77 | .may_evict = false, |
76 | .type_name = "legacy contexts", |
78 | .type_name = "legacy contexts", |
77 | .backup_placement = NULL, |
79 | .backup_placement = NULL, |
78 | .create = NULL, |
80 | .create = NULL, |
79 | .destroy = NULL, |
81 | .destroy = NULL, |
80 | .bind = NULL, |
82 | .bind = NULL, |
81 | .unbind = NULL |
83 | .unbind = NULL |
82 | }; |
84 | }; |
83 | 85 | ||
84 | static const struct vmw_res_func vmw_gb_context_func = { |
86 | static const struct vmw_res_func vmw_gb_context_func = { |
85 | .res_type = vmw_res_context, |
87 | .res_type = vmw_res_context, |
86 | .needs_backup = true, |
88 | .needs_backup = true, |
87 | .may_evict = true, |
89 | .may_evict = true, |
88 | .type_name = "guest backed contexts", |
90 | .type_name = "guest backed contexts", |
89 | .backup_placement = &vmw_mob_placement, |
91 | .backup_placement = &vmw_mob_placement, |
90 | .create = vmw_gb_context_create, |
92 | .create = vmw_gb_context_create, |
91 | .destroy = vmw_gb_context_destroy, |
93 | .destroy = vmw_gb_context_destroy, |
92 | .bind = vmw_gb_context_bind, |
94 | .bind = vmw_gb_context_bind, |
93 | .unbind = vmw_gb_context_unbind |
95 | .unbind = vmw_gb_context_unbind |
94 | }; |
96 | }; |
95 | 97 | ||
- | 98 | static const struct vmw_res_func vmw_dx_context_func = { |
|
- | 99 | .res_type = vmw_res_dx_context, |
|
- | 100 | .needs_backup = true, |
|
- | 101 | .may_evict = true, |
|
- | 102 | .type_name = "dx contexts", |
|
96 | static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = { |
103 | .backup_placement = &vmw_mob_placement, |
97 | [vmw_ctx_binding_shader] = vmw_context_scrub_shader, |
104 | .create = vmw_dx_context_create, |
- | 105 | .destroy = vmw_dx_context_destroy, |
|
98 | [vmw_ctx_binding_rt] = vmw_context_scrub_render_target, |
106 | .bind = vmw_dx_context_bind, |
- | 107 | .unbind = vmw_dx_context_unbind |
|
99 | [vmw_ctx_binding_tex] = vmw_context_scrub_texture }; |
108 | }; |
100 | 109 | ||
101 | /** |
110 | /** |
102 | * Context management: |
111 | * Context management: |
103 | */ |
112 | */ |
- | 113 | ||
- | 114 | static void vmw_context_cotables_unref(struct vmw_user_context *uctx) |
|
- | 115 | { |
|
- | 116 | struct vmw_resource *res; |
|
- | 117 | int i; |
|
- | 118 | ||
- | 119 | for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { |
|
- | 120 | spin_lock(&uctx->cotable_lock); |
|
- | 121 | res = uctx->cotables[i]; |
|
- | 122 | uctx->cotables[i] = NULL; |
|
- | 123 | spin_unlock(&uctx->cotable_lock); |
|
- | 124 | ||
- | 125 | if (res) |
|
- | 126 | vmw_resource_unreference(&res); |
|
- | 127 | } |
|
- | 128 | } |
|
104 | 129 | ||
105 | static void vmw_hw_context_destroy(struct vmw_resource *res) |
130 | static void vmw_hw_context_destroy(struct vmw_resource *res) |
106 | { |
131 | { |
107 | struct vmw_user_context *uctx = |
132 | struct vmw_user_context *uctx = |
108 | container_of(res, struct vmw_user_context, res); |
133 | container_of(res, struct vmw_user_context, res); |
109 | struct vmw_private *dev_priv = res->dev_priv; |
134 | struct vmw_private *dev_priv = res->dev_priv; |
110 | struct { |
135 | struct { |
111 | SVGA3dCmdHeader header; |
136 | SVGA3dCmdHeader header; |
112 | SVGA3dCmdDestroyContext body; |
137 | SVGA3dCmdDestroyContext body; |
113 | } *cmd; |
138 | } *cmd; |
114 | 139 | ||
115 | 140 | ||
- | 141 | if (res->func->destroy == vmw_gb_context_destroy || |
|
116 | if (res->func->destroy == vmw_gb_context_destroy) { |
142 | res->func->destroy == vmw_dx_context_destroy) { |
117 | mutex_lock(&dev_priv->cmdbuf_mutex); |
143 | mutex_lock(&dev_priv->cmdbuf_mutex); |
118 | vmw_cmdbuf_res_man_destroy(uctx->man); |
144 | vmw_cmdbuf_res_man_destroy(uctx->man); |
119 | mutex_lock(&dev_priv->binding_mutex); |
145 | mutex_lock(&dev_priv->binding_mutex); |
120 | (void) vmw_context_binding_state_kill(&uctx->cbs); |
146 | vmw_binding_state_kill(uctx->cbs); |
121 | (void) vmw_gb_context_destroy(res); |
147 | (void) res->func->destroy(res); |
122 | mutex_unlock(&dev_priv->binding_mutex); |
148 | mutex_unlock(&dev_priv->binding_mutex); |
123 | if (dev_priv->pinned_bo != NULL && |
149 | if (dev_priv->pinned_bo != NULL && |
124 | !dev_priv->query_cid_valid) |
150 | !dev_priv->query_cid_valid) |
125 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
151 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
126 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
152 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
- | 153 | vmw_context_cotables_unref(uctx); |
|
127 | return; |
154 | return; |
128 | } |
155 | } |
129 | 156 | ||
130 | vmw_execbuf_release_pinned_bo(dev_priv); |
157 | vmw_execbuf_release_pinned_bo(dev_priv); |
131 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
158 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
132 | if (unlikely(cmd == NULL)) { |
159 | if (unlikely(cmd == NULL)) { |
133 | DRM_ERROR("Failed reserving FIFO space for surface " |
160 | DRM_ERROR("Failed reserving FIFO space for surface " |
134 | "destruction.\n"); |
161 | "destruction.\n"); |
135 | return; |
162 | return; |
136 | } |
163 | } |
137 | 164 | ||
138 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY); |
165 | cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY; |
139 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); |
166 | cmd->header.size = sizeof(cmd->body); |
140 | cmd->body.cid = cpu_to_le32(res->id); |
167 | cmd->body.cid = res->id; |
141 | 168 | ||
142 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
169 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
143 | vmw_3d_resource_dec(dev_priv, false); |
170 | vmw_fifo_resource_dec(dev_priv); |
144 | } |
171 | } |
145 | 172 | ||
146 | static int vmw_gb_context_init(struct vmw_private *dev_priv, |
173 | static int vmw_gb_context_init(struct vmw_private *dev_priv, |
- | 174 | bool dx, |
|
147 | struct vmw_resource *res, |
175 | struct vmw_resource *res, |
148 | void (*res_free) (struct vmw_resource *res)) |
176 | void (*res_free)(struct vmw_resource *res)) |
149 | { |
177 | { |
150 | int ret; |
178 | int ret, i; |
151 | struct vmw_user_context *uctx = |
179 | struct vmw_user_context *uctx = |
152 | container_of(res, struct vmw_user_context, res); |
180 | container_of(res, struct vmw_user_context, res); |
- | 181 | ||
- | 182 | res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) : |
|
153 | 183 | SVGA3D_CONTEXT_DATA_SIZE); |
|
- | 184 | ret = vmw_resource_init(dev_priv, res, true, |
|
154 | ret = vmw_resource_init(dev_priv, res, true, |
185 | res_free, |
155 | res_free, &vmw_gb_context_func); |
186 | dx ? &vmw_dx_context_func : |
156 | res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; |
187 | &vmw_gb_context_func); |
157 | if (unlikely(ret != 0)) |
188 | if (unlikely(ret != 0)) |
158 | goto out_err; |
189 | goto out_err; |
159 | 190 | ||
160 | if (dev_priv->has_mob) { |
191 | if (dev_priv->has_mob) { |
161 | uctx->man = vmw_cmdbuf_res_man_create(dev_priv); |
192 | uctx->man = vmw_cmdbuf_res_man_create(dev_priv); |
162 | if (unlikely(IS_ERR(uctx->man))) { |
193 | if (IS_ERR(uctx->man)) { |
163 | ret = PTR_ERR(uctx->man); |
194 | ret = PTR_ERR(uctx->man); |
164 | uctx->man = NULL; |
195 | uctx->man = NULL; |
165 | goto out_err; |
196 | goto out_err; |
166 | } |
197 | } |
167 | } |
198 | } |
- | 199 | ||
- | 200 | uctx->cbs = vmw_binding_state_alloc(dev_priv); |
|
168 | 201 | if (IS_ERR(uctx->cbs)) { |
|
- | 202 | ret = PTR_ERR(uctx->cbs); |
|
- | 203 | goto out_err; |
|
- | 204 | } |
|
169 | memset(&uctx->cbs, 0, sizeof(uctx->cbs)); |
205 | |
- | 206 | spin_lock_init(&uctx->cotable_lock); |
|
- | 207 | ||
- | 208 | if (dx) { |
|
- | 209 | for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { |
|
- | 210 | uctx->cotables[i] = vmw_cotable_alloc(dev_priv, |
|
- | 211 | &uctx->res, i); |
|
- | 212 | if (unlikely(uctx->cotables[i] == NULL)) { |
|
- | 213 | ret = -ENOMEM; |
|
- | 214 | goto out_cotables; |
|
- | 215 | } |
|
- | 216 | } |
|
- | 217 | } |
|
- | 218 | ||
170 | INIT_LIST_HEAD(&uctx->cbs.list); |
219 | |
171 | 220 | ||
172 | vmw_resource_activate(res, vmw_hw_context_destroy); |
221 | vmw_resource_activate(res, vmw_hw_context_destroy); |
173 | return 0; |
222 | return 0; |
- | 223 | ||
- | 224 | out_cotables: |
|
174 | 225 | vmw_context_cotables_unref(uctx); |
|
175 | out_err: |
226 | out_err: |
176 | if (res_free) |
227 | if (res_free) |
177 | res_free(res); |
228 | res_free(res); |
178 | else |
229 | else |
179 | kfree(res); |
230 | kfree(res); |
180 | return ret; |
231 | return ret; |
181 | } |
232 | } |
182 | 233 | ||
183 | static int vmw_context_init(struct vmw_private *dev_priv, |
234 | static int vmw_context_init(struct vmw_private *dev_priv, |
184 | struct vmw_resource *res, |
235 | struct vmw_resource *res, |
185 | void (*res_free) (struct vmw_resource *res)) |
236 | void (*res_free)(struct vmw_resource *res), |
- | 237 | bool dx) |
|
186 | { |
238 | { |
187 | int ret; |
239 | int ret; |
188 | 240 | ||
189 | struct { |
241 | struct { |
190 | SVGA3dCmdHeader header; |
242 | SVGA3dCmdHeader header; |
191 | SVGA3dCmdDefineContext body; |
243 | SVGA3dCmdDefineContext body; |
192 | } *cmd; |
244 | } *cmd; |
193 | 245 | ||
194 | if (dev_priv->has_mob) |
246 | if (dev_priv->has_mob) |
195 | return vmw_gb_context_init(dev_priv, res, res_free); |
247 | return vmw_gb_context_init(dev_priv, dx, res, res_free); |
196 | 248 | ||
197 | ret = vmw_resource_init(dev_priv, res, false, |
249 | ret = vmw_resource_init(dev_priv, res, false, |
198 | res_free, &vmw_legacy_context_func); |
250 | res_free, &vmw_legacy_context_func); |
199 | 251 | ||
200 | if (unlikely(ret != 0)) { |
252 | if (unlikely(ret != 0)) { |
201 | DRM_ERROR("Failed to allocate a resource id.\n"); |
253 | DRM_ERROR("Failed to allocate a resource id.\n"); |
202 | goto out_early; |
254 | goto out_early; |
203 | } |
255 | } |
204 | 256 | ||
205 | if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) { |
257 | if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) { |
206 | DRM_ERROR("Out of hw context ids.\n"); |
258 | DRM_ERROR("Out of hw context ids.\n"); |
207 | vmw_resource_unreference(&res); |
259 | vmw_resource_unreference(&res); |
208 | return -ENOMEM; |
260 | return -ENOMEM; |
209 | } |
261 | } |
210 | 262 | ||
211 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
263 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
212 | if (unlikely(cmd == NULL)) { |
264 | if (unlikely(cmd == NULL)) { |
213 | DRM_ERROR("Fifo reserve failed.\n"); |
265 | DRM_ERROR("Fifo reserve failed.\n"); |
214 | vmw_resource_unreference(&res); |
266 | vmw_resource_unreference(&res); |
215 | return -ENOMEM; |
267 | return -ENOMEM; |
216 | } |
268 | } |
217 | 269 | ||
218 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE); |
270 | cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE; |
219 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); |
271 | cmd->header.size = sizeof(cmd->body); |
220 | cmd->body.cid = cpu_to_le32(res->id); |
272 | cmd->body.cid = res->id; |
221 | 273 | ||
222 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
274 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
223 | (void) vmw_3d_resource_inc(dev_priv, false); |
275 | vmw_fifo_resource_inc(dev_priv); |
224 | vmw_resource_activate(res, vmw_hw_context_destroy); |
276 | vmw_resource_activate(res, vmw_hw_context_destroy); |
225 | return 0; |
277 | return 0; |
226 | 278 | ||
227 | out_early: |
279 | out_early: |
228 | if (res_free == NULL) |
280 | if (res_free == NULL) |
229 | kfree(res); |
281 | kfree(res); |
230 | else |
282 | else |
231 | res_free(res); |
283 | res_free(res); |
232 | return ret; |
284 | return ret; |
233 | } |
285 | } |
234 | - | ||
235 | struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) |
- | |
236 | { |
- | |
237 | struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL); |
- | |
238 | int ret; |
- | |
239 | - | ||
240 | if (unlikely(res == NULL)) |
- | |
241 | return NULL; |
- | |
242 | - | ||
243 | ret = vmw_context_init(dev_priv, res, NULL); |
- | |
244 | - | ||
245 | return (ret == 0) ? res : NULL; |
- | |
- | 286 | ||
- | 287 | ||
- | 288 | /* |
|
246 | } |
289 | * GB context. |
247 | 290 | */ |
|
248 | 291 | ||
249 | static int vmw_gb_context_create(struct vmw_resource *res) |
292 | static int vmw_gb_context_create(struct vmw_resource *res) |
250 | { |
293 | { |
251 | struct vmw_private *dev_priv = res->dev_priv; |
294 | struct vmw_private *dev_priv = res->dev_priv; |
252 | int ret; |
295 | int ret; |
253 | struct { |
296 | struct { |
254 | SVGA3dCmdHeader header; |
297 | SVGA3dCmdHeader header; |
255 | SVGA3dCmdDefineGBContext body; |
298 | SVGA3dCmdDefineGBContext body; |
256 | } *cmd; |
299 | } *cmd; |
257 | 300 | ||
258 | if (likely(res->id != -1)) |
301 | if (likely(res->id != -1)) |
259 | return 0; |
302 | return 0; |
260 | 303 | ||
261 | ret = vmw_resource_alloc_id(res); |
304 | ret = vmw_resource_alloc_id(res); |
262 | if (unlikely(ret != 0)) { |
305 | if (unlikely(ret != 0)) { |
263 | DRM_ERROR("Failed to allocate a context id.\n"); |
306 | DRM_ERROR("Failed to allocate a context id.\n"); |
264 | goto out_no_id; |
307 | goto out_no_id; |
265 | } |
308 | } |
266 | 309 | ||
267 | if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) { |
310 | if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) { |
268 | ret = -EBUSY; |
311 | ret = -EBUSY; |
269 | goto out_no_fifo; |
312 | goto out_no_fifo; |
270 | } |
313 | } |
271 | 314 | ||
272 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
315 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
273 | if (unlikely(cmd == NULL)) { |
316 | if (unlikely(cmd == NULL)) { |
274 | DRM_ERROR("Failed reserving FIFO space for context " |
317 | DRM_ERROR("Failed reserving FIFO space for context " |
275 | "creation.\n"); |
318 | "creation.\n"); |
276 | ret = -ENOMEM; |
319 | ret = -ENOMEM; |
277 | goto out_no_fifo; |
320 | goto out_no_fifo; |
278 | } |
321 | } |
279 | 322 | ||
280 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT; |
323 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT; |
281 | cmd->header.size = sizeof(cmd->body); |
324 | cmd->header.size = sizeof(cmd->body); |
282 | cmd->body.cid = res->id; |
325 | cmd->body.cid = res->id; |
283 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
326 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
284 | (void) vmw_3d_resource_inc(dev_priv, false); |
327 | vmw_fifo_resource_inc(dev_priv); |
285 | 328 | ||
286 | return 0; |
329 | return 0; |
287 | 330 | ||
288 | out_no_fifo: |
331 | out_no_fifo: |
289 | vmw_resource_release_id(res); |
332 | vmw_resource_release_id(res); |
290 | out_no_id: |
333 | out_no_id: |
291 | return ret; |
334 | return ret; |
292 | } |
335 | } |
293 | 336 | ||
294 | static int vmw_gb_context_bind(struct vmw_resource *res, |
337 | static int vmw_gb_context_bind(struct vmw_resource *res, |
295 | struct ttm_validate_buffer *val_buf) |
338 | struct ttm_validate_buffer *val_buf) |
296 | { |
339 | { |
297 | struct vmw_private *dev_priv = res->dev_priv; |
340 | struct vmw_private *dev_priv = res->dev_priv; |
298 | struct { |
341 | struct { |
299 | SVGA3dCmdHeader header; |
342 | SVGA3dCmdHeader header; |
300 | SVGA3dCmdBindGBContext body; |
343 | SVGA3dCmdBindGBContext body; |
301 | } *cmd; |
344 | } *cmd; |
302 | struct ttm_buffer_object *bo = val_buf->bo; |
345 | struct ttm_buffer_object *bo = val_buf->bo; |
303 | 346 | ||
304 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); |
347 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); |
305 | 348 | ||
306 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
349 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
307 | if (unlikely(cmd == NULL)) { |
350 | if (unlikely(cmd == NULL)) { |
308 | DRM_ERROR("Failed reserving FIFO space for context " |
351 | DRM_ERROR("Failed reserving FIFO space for context " |
309 | "binding.\n"); |
352 | "binding.\n"); |
310 | return -ENOMEM; |
353 | return -ENOMEM; |
311 | } |
354 | } |
312 | - | ||
313 | cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; |
355 | cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; |
314 | cmd->header.size = sizeof(cmd->body); |
356 | cmd->header.size = sizeof(cmd->body); |
315 | cmd->body.cid = res->id; |
357 | cmd->body.cid = res->id; |
316 | cmd->body.mobid = bo->mem.start; |
358 | cmd->body.mobid = bo->mem.start; |
317 | cmd->body.validContents = res->backup_dirty; |
359 | cmd->body.validContents = res->backup_dirty; |
318 | res->backup_dirty = false; |
360 | res->backup_dirty = false; |
319 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
361 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
320 | 362 | ||
321 | return 0; |
363 | return 0; |
322 | } |
364 | } |
323 | 365 | ||
324 | static int vmw_gb_context_unbind(struct vmw_resource *res, |
366 | static int vmw_gb_context_unbind(struct vmw_resource *res, |
325 | bool readback, |
367 | bool readback, |
326 | struct ttm_validate_buffer *val_buf) |
368 | struct ttm_validate_buffer *val_buf) |
327 | { |
369 | { |
328 | struct vmw_private *dev_priv = res->dev_priv; |
370 | struct vmw_private *dev_priv = res->dev_priv; |
329 | struct ttm_buffer_object *bo = val_buf->bo; |
371 | struct ttm_buffer_object *bo = val_buf->bo; |
330 | struct vmw_fence_obj *fence; |
372 | struct vmw_fence_obj *fence; |
331 | struct vmw_user_context *uctx = |
373 | struct vmw_user_context *uctx = |
332 | container_of(res, struct vmw_user_context, res); |
374 | container_of(res, struct vmw_user_context, res); |
333 | 375 | ||
334 | struct { |
376 | struct { |
335 | SVGA3dCmdHeader header; |
377 | SVGA3dCmdHeader header; |
336 | SVGA3dCmdReadbackGBContext body; |
378 | SVGA3dCmdReadbackGBContext body; |
337 | } *cmd1; |
379 | } *cmd1; |
338 | struct { |
380 | struct { |
339 | SVGA3dCmdHeader header; |
381 | SVGA3dCmdHeader header; |
340 | SVGA3dCmdBindGBContext body; |
382 | SVGA3dCmdBindGBContext body; |
341 | } *cmd2; |
383 | } *cmd2; |
342 | uint32_t submit_size; |
384 | uint32_t submit_size; |
343 | uint8_t *cmd; |
385 | uint8_t *cmd; |
344 | 386 | ||
345 | 387 | ||
346 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); |
388 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); |
347 | 389 | ||
348 | mutex_lock(&dev_priv->binding_mutex); |
390 | mutex_lock(&dev_priv->binding_mutex); |
349 | vmw_context_binding_state_scrub(&uctx->cbs); |
391 | vmw_binding_state_scrub(uctx->cbs); |
350 | 392 | ||
351 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); |
393 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); |
352 | 394 | ||
353 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
395 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
354 | if (unlikely(cmd == NULL)) { |
396 | if (unlikely(cmd == NULL)) { |
355 | DRM_ERROR("Failed reserving FIFO space for context " |
397 | DRM_ERROR("Failed reserving FIFO space for context " |
356 | "unbinding.\n"); |
398 | "unbinding.\n"); |
357 | mutex_unlock(&dev_priv->binding_mutex); |
399 | mutex_unlock(&dev_priv->binding_mutex); |
358 | return -ENOMEM; |
400 | return -ENOMEM; |
359 | } |
401 | } |
360 | 402 | ||
361 | cmd2 = (void *) cmd; |
403 | cmd2 = (void *) cmd; |
362 | if (readback) { |
404 | if (readback) { |
363 | cmd1 = (void *) cmd; |
405 | cmd1 = (void *) cmd; |
364 | cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT; |
406 | cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT; |
365 | cmd1->header.size = sizeof(cmd1->body); |
407 | cmd1->header.size = sizeof(cmd1->body); |
366 | cmd1->body.cid = res->id; |
408 | cmd1->body.cid = res->id; |
367 | cmd2 = (void *) (&cmd1[1]); |
409 | cmd2 = (void *) (&cmd1[1]); |
368 | } |
410 | } |
369 | cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; |
411 | cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; |
370 | cmd2->header.size = sizeof(cmd2->body); |
412 | cmd2->header.size = sizeof(cmd2->body); |
371 | cmd2->body.cid = res->id; |
413 | cmd2->body.cid = res->id; |
372 | cmd2->body.mobid = SVGA3D_INVALID_ID; |
414 | cmd2->body.mobid = SVGA3D_INVALID_ID; |
373 | 415 | ||
374 | vmw_fifo_commit(dev_priv, submit_size); |
416 | vmw_fifo_commit(dev_priv, submit_size); |
375 | mutex_unlock(&dev_priv->binding_mutex); |
417 | mutex_unlock(&dev_priv->binding_mutex); |
376 | 418 | ||
377 | /* |
419 | /* |
378 | * Create a fence object and fence the backup buffer. |
420 | * Create a fence object and fence the backup buffer. |
379 | */ |
421 | */ |
380 | 422 | ||
381 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, |
423 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, |
382 | &fence, NULL); |
424 | &fence, NULL); |
383 | 425 | ||
384 | vmw_fence_single_bo(bo, fence); |
426 | vmw_fence_single_bo(bo, fence); |
385 | 427 | ||
386 | if (likely(fence != NULL)) |
428 | if (likely(fence != NULL)) |
387 | vmw_fence_obj_unreference(&fence); |
429 | vmw_fence_obj_unreference(&fence); |
388 | 430 | ||
389 | return 0; |
431 | return 0; |
390 | } |
432 | } |
391 | 433 | ||
392 | static int vmw_gb_context_destroy(struct vmw_resource *res) |
434 | static int vmw_gb_context_destroy(struct vmw_resource *res) |
393 | { |
435 | { |
394 | struct vmw_private *dev_priv = res->dev_priv; |
436 | struct vmw_private *dev_priv = res->dev_priv; |
395 | struct { |
437 | struct { |
396 | SVGA3dCmdHeader header; |
438 | SVGA3dCmdHeader header; |
397 | SVGA3dCmdDestroyGBContext body; |
439 | SVGA3dCmdDestroyGBContext body; |
398 | } *cmd; |
440 | } *cmd; |
399 | 441 | ||
400 | if (likely(res->id == -1)) |
442 | if (likely(res->id == -1)) |
401 | return 0; |
443 | return 0; |
402 | 444 | ||
403 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
445 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
404 | if (unlikely(cmd == NULL)) { |
446 | if (unlikely(cmd == NULL)) { |
405 | DRM_ERROR("Failed reserving FIFO space for context " |
447 | DRM_ERROR("Failed reserving FIFO space for context " |
406 | "destruction.\n"); |
448 | "destruction.\n"); |
407 | return -ENOMEM; |
449 | return -ENOMEM; |
408 | } |
450 | } |
409 | 451 | ||
410 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT; |
452 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT; |
411 | cmd->header.size = sizeof(cmd->body); |
453 | cmd->header.size = sizeof(cmd->body); |
412 | cmd->body.cid = res->id; |
454 | cmd->body.cid = res->id; |
413 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
455 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
414 | if (dev_priv->query_cid == res->id) |
456 | if (dev_priv->query_cid == res->id) |
415 | dev_priv->query_cid_valid = false; |
457 | dev_priv->query_cid_valid = false; |
416 | vmw_resource_release_id(res); |
458 | vmw_resource_release_id(res); |
417 | vmw_3d_resource_dec(dev_priv, false); |
459 | vmw_fifo_resource_dec(dev_priv); |
- | 460 | ||
- | 461 | return 0; |
|
- | 462 | } |
|
- | 463 | ||
- | 464 | /* |
|
- | 465 | * DX context. |
|
- | 466 | */ |
|
- | 467 | ||
- | 468 | static int vmw_dx_context_create(struct vmw_resource *res) |
|
- | 469 | { |
|
- | 470 | struct vmw_private *dev_priv = res->dev_priv; |
|
- | 471 | int ret; |
|
- | 472 | struct { |
|
- | 473 | SVGA3dCmdHeader header; |
|
- | 474 | SVGA3dCmdDXDefineContext body; |
|
- | 475 | } *cmd; |
|
- | 476 | ||
- | 477 | if (likely(res->id != -1)) |
|
- | 478 | return 0; |
|
- | 479 | ||
- | 480 | ret = vmw_resource_alloc_id(res); |
|
- | 481 | if (unlikely(ret != 0)) { |
|
- | 482 | DRM_ERROR("Failed to allocate a context id.\n"); |
|
- | 483 | goto out_no_id; |
|
- | 484 | } |
|
- | 485 | ||
- | 486 | if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) { |
|
- | 487 | ret = -EBUSY; |
|
- | 488 | goto out_no_fifo; |
|
- | 489 | } |
|
- | 490 | ||
- | 491 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
|
- | 492 | if (unlikely(cmd == NULL)) { |
|
- | 493 | DRM_ERROR("Failed reserving FIFO space for context " |
|
- | 494 | "creation.\n"); |
|
- | 495 | ret = -ENOMEM; |
|
- | 496 | goto out_no_fifo; |
|
- | 497 | } |
|
- | 498 | ||
- | 499 | cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT; |
|
- | 500 | cmd->header.size = sizeof(cmd->body); |
|
- | 501 | cmd->body.cid = res->id; |
|
- | 502 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
|
- | 503 | vmw_fifo_resource_inc(dev_priv); |
|
- | 504 | ||
- | 505 | return 0; |
|
- | 506 | ||
- | 507 | out_no_fifo: |
|
- | 508 | vmw_resource_release_id(res); |
|
- | 509 | out_no_id: |
|
- | 510 | return ret; |
|
- | 511 | } |
|
- | 512 | ||
- | 513 | static int vmw_dx_context_bind(struct vmw_resource *res, |
|
- | 514 | struct ttm_validate_buffer *val_buf) |
|
- | 515 | { |
|
- | 516 | struct vmw_private *dev_priv = res->dev_priv; |
|
- | 517 | struct { |
|
- | 518 | SVGA3dCmdHeader header; |
|
- | 519 | SVGA3dCmdDXBindContext body; |
|
- | 520 | } *cmd; |
|
- | 521 | struct ttm_buffer_object *bo = val_buf->bo; |
|
- | 522 | ||
- | 523 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); |
|
- | 524 | ||
- | 525 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
|
- | 526 | if (unlikely(cmd == NULL)) { |
|
- | 527 | DRM_ERROR("Failed reserving FIFO space for context " |
|
- | 528 | "binding.\n"); |
|
- | 529 | return -ENOMEM; |
|
- | 530 | } |
|
- | 531 | ||
- | 532 | cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT; |
|
- | 533 | cmd->header.size = sizeof(cmd->body); |
|
- | 534 | cmd->body.cid = res->id; |
|
- | 535 | cmd->body.mobid = bo->mem.start; |
|
- | 536 | cmd->body.validContents = res->backup_dirty; |
|
- | 537 | res->backup_dirty = false; |
|
- | 538 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
|
- | 539 | ||
- | 540 | ||
- | 541 | return 0; |
|
- | 542 | } |
|
- | 543 | ||
- | 544 | /** |
|
- | 545 | * vmw_dx_context_scrub_cotables - Scrub all bindings and |
|
- | 546 | * cotables from a context |
|
- | 547 | * |
|
- | 548 | * @ctx: Pointer to the context resource |
|
- | 549 | * @readback: Whether to save the otable contents on scrubbing. |
|
- | 550 | * |
|
- | 551 | * COtables must be unbound before their context, but unbinding requires |
|
- | 552 | * the backup buffer being reserved, whereas scrubbing does not. |
|
- | 553 | * This function scrubs all cotables of a context, potentially reading back |
|
- | 554 | * the contents into their backup buffers. However, scrubbing cotables |
|
- | 555 | * also makes the device context invalid, so scrub all bindings first so |
|
- | 556 | * that doesn't have to be done later with an invalid context. |
|
- | 557 | */ |
|
- | 558 | void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, |
|
- | 559 | bool readback) |
|
- | 560 | { |
|
- | 561 | struct vmw_user_context *uctx = |
|
- | 562 | container_of(ctx, struct vmw_user_context, res); |
|
- | 563 | int i; |
|
- | 564 | ||
- | 565 | vmw_binding_state_scrub(uctx->cbs); |
|
- | 566 | for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { |
|
- | 567 | struct vmw_resource *res; |
|
- | 568 | ||
- | 569 | /* Avoid racing with ongoing cotable destruction. */ |
|
- | 570 | spin_lock(&uctx->cotable_lock); |
|
- | 571 | res = uctx->cotables[vmw_cotable_scrub_order[i]]; |
|
- | 572 | if (res) |
|
- | 573 | res = vmw_resource_reference_unless_doomed(res); |
|
- | 574 | spin_unlock(&uctx->cotable_lock); |
|
- | 575 | if (!res) |
|
- | 576 | continue; |
|
- | 577 | ||
- | 578 | WARN_ON(vmw_cotable_scrub(res, readback)); |
|
- | 579 | vmw_resource_unreference(&res); |
|
- | 580 | } |
|
- | 581 | } |
|
- | 582 | ||
- | 583 | static int vmw_dx_context_unbind(struct vmw_resource *res, |
|
- | 584 | bool readback, |
|
- | 585 | struct ttm_validate_buffer *val_buf) |
|
- | 586 | { |
|
- | 587 | struct vmw_private *dev_priv = res->dev_priv; |
|
- | 588 | struct ttm_buffer_object *bo = val_buf->bo; |
|
- | 589 | struct vmw_fence_obj *fence; |
|
- | 590 | struct vmw_user_context *uctx = |
|
- | 591 | container_of(res, struct vmw_user_context, res); |
|
- | 592 | ||
- | 593 | struct { |
|
- | 594 | SVGA3dCmdHeader header; |
|
- | 595 | SVGA3dCmdDXReadbackContext body; |
|
- | 596 | } *cmd1; |
|
- | 597 | struct { |
|
- | 598 | SVGA3dCmdHeader header; |
|
- | 599 | SVGA3dCmdDXBindContext body; |
|
- | 600 | } *cmd2; |
|
- | 601 | uint32_t submit_size; |
|
- | 602 | uint8_t *cmd; |
|
- | 603 | ||
- | 604 | ||
- | 605 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); |
|
- | 606 | ||
- | 607 | mutex_lock(&dev_priv->binding_mutex); |
|
- | 608 | vmw_dx_context_scrub_cotables(res, readback); |
|
- | 609 | ||
- | 610 | if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx && |
|
- | 611 | readback) { |
|
- | 612 | WARN_ON(uctx->dx_query_mob->dx_query_ctx != res); |
|
- | 613 | if (vmw_query_readback_all(uctx->dx_query_mob)) |
|
- | 614 | DRM_ERROR("Failed to read back query states\n"); |
|
- | 615 | } |
|
- | 616 | ||
- | 617 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); |
|
- | 618 | ||
- | 619 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
|
- | 620 | if (unlikely(cmd == NULL)) { |
|
- | 621 | DRM_ERROR("Failed reserving FIFO space for context " |
|
- | 622 | "unbinding.\n"); |
|
- | 623 | mutex_unlock(&dev_priv->binding_mutex); |
|
- | 624 | return -ENOMEM; |
|
- | 625 | } |
|
- | 626 | ||
- | 627 | cmd2 = (void *) cmd; |
|
- | 628 | if (readback) { |
|
- | 629 | cmd1 = (void *) cmd; |
|
- | 630 | cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT; |
|
- | 631 | cmd1->header.size = sizeof(cmd1->body); |
|
- | 632 | cmd1->body.cid = res->id; |
|
- | 633 | cmd2 = (void *) (&cmd1[1]); |
|
- | 634 | } |
|
- | 635 | cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT; |
|
- | 636 | cmd2->header.size = sizeof(cmd2->body); |
|
- | 637 | cmd2->body.cid = res->id; |
|
- | 638 | cmd2->body.mobid = SVGA3D_INVALID_ID; |
|
- | 639 | ||
- | 640 | vmw_fifo_commit(dev_priv, submit_size); |
|
- | 641 | mutex_unlock(&dev_priv->binding_mutex); |
|
- | 642 | ||
- | 643 | /* |
|
- | 644 | * Create a fence object and fence the backup buffer. |
|
- | 645 | */ |
|
- | 646 | ||
- | 647 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, |
|
- | 648 | &fence, NULL); |
|
- | 649 | ||
- | 650 | vmw_fence_single_bo(bo, fence); |
|
- | 651 | ||
- | 652 | if (likely(fence != NULL)) |
|
- | 653 | vmw_fence_obj_unreference(&fence); |
|
- | 654 | ||
- | 655 | return 0; |
|
- | 656 | } |
|
- | 657 | ||
- | 658 | static int vmw_dx_context_destroy(struct vmw_resource *res) |
|
- | 659 | { |
|
- | 660 | struct vmw_private *dev_priv = res->dev_priv; |
|
- | 661 | struct { |
|
- | 662 | SVGA3dCmdHeader header; |
|
- | 663 | SVGA3dCmdDXDestroyContext body; |
|
- | 664 | } *cmd; |
|
- | 665 | ||
- | 666 | if (likely(res->id == -1)) |
|
- | 667 | return 0; |
|
- | 668 | ||
- | 669 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
|
- | 670 | if (unlikely(cmd == NULL)) { |
|
- | 671 | DRM_ERROR("Failed reserving FIFO space for context " |
|
- | 672 | "destruction.\n"); |
|
- | 673 | return -ENOMEM; |
|
- | 674 | } |
|
- | 675 | ||
- | 676 | cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT; |
|
- | 677 | cmd->header.size = sizeof(cmd->body); |
|
- | 678 | cmd->body.cid = res->id; |
|
- | 679 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
|
- | 680 | if (dev_priv->query_cid == res->id) |
|
- | 681 | dev_priv->query_cid_valid = false; |
|
- | 682 | vmw_resource_release_id(res); |
|
- | 683 | vmw_fifo_resource_dec(dev_priv); |
|
418 | 684 | ||
419 | return 0; |
685 | return 0; |
420 | } |
686 | } |
421 | 687 | ||
422 | /** |
688 | /** |
423 | * User-space context management: |
689 | * User-space context management: |
424 | */ |
690 | */ |
425 | 691 | ||
426 | static struct vmw_resource * |
692 | static struct vmw_resource * |
427 | vmw_user_context_base_to_res(struct ttm_base_object *base) |
693 | vmw_user_context_base_to_res(struct ttm_base_object *base) |
428 | { |
694 | { |
429 | return &(container_of(base, struct vmw_user_context, base)->res); |
695 | return &(container_of(base, struct vmw_user_context, base)->res); |
430 | } |
696 | } |
431 | 697 | ||
432 | static void vmw_user_context_free(struct vmw_resource *res) |
698 | static void vmw_user_context_free(struct vmw_resource *res) |
433 | { |
699 | { |
434 | struct vmw_user_context *ctx = |
700 | struct vmw_user_context *ctx = |
435 | container_of(res, struct vmw_user_context, res); |
701 | container_of(res, struct vmw_user_context, res); |
436 | struct vmw_private *dev_priv = res->dev_priv; |
702 | struct vmw_private *dev_priv = res->dev_priv; |
- | 703 | ||
- | 704 | if (ctx->cbs) |
|
- | 705 | vmw_binding_state_free(ctx->cbs); |
|
- | 706 | ||
- | 707 | (void) vmw_context_bind_dx_query(res, NULL); |
|
437 | 708 | ||
438 | // ttm_base_object_kfree(ctx, base); |
709 | ttm_base_object_kfree(ctx, base); |
439 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
710 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
440 | vmw_user_context_size); |
711 | vmw_user_context_size); |
441 | } |
712 | } |
442 | 713 | ||
443 | /** |
714 | /** |
444 | * This function is called when user space has no more references on the |
715 | * This function is called when user space has no more references on the |
445 | * base object. It releases the base-object's reference on the resource object. |
716 | * base object. It releases the base-object's reference on the resource object. |
446 | */ |
717 | */ |
447 | 718 | ||
448 | static void vmw_user_context_base_release(struct ttm_base_object **p_base) |
719 | static void vmw_user_context_base_release(struct ttm_base_object **p_base) |
449 | { |
720 | { |
450 | struct ttm_base_object *base = *p_base; |
721 | struct ttm_base_object *base = *p_base; |
451 | struct vmw_user_context *ctx = |
722 | struct vmw_user_context *ctx = |
452 | container_of(base, struct vmw_user_context, base); |
723 | container_of(base, struct vmw_user_context, base); |
453 | struct vmw_resource *res = &ctx->res; |
724 | struct vmw_resource *res = &ctx->res; |
454 | 725 | ||
455 | *p_base = NULL; |
726 | *p_base = NULL; |
456 | vmw_resource_unreference(&res); |
727 | vmw_resource_unreference(&res); |
457 | } |
728 | } |
458 | 729 | ||
459 | #if 0 |
730 | #if 0 |
460 | int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, |
731 | int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, |
461 | struct drm_file *file_priv) |
732 | struct drm_file *file_priv) |
462 | { |
733 | { |
463 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; |
734 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; |
464 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
735 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
465 | 736 | ||
466 | return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); |
737 | return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); |
467 | } |
738 | } |
468 | 739 | ||
469 | int vmw_context_define_ioctl(struct drm_device *dev, void *data, |
740 | static int vmw_context_define(struct drm_device *dev, void *data, |
470 | struct drm_file *file_priv) |
741 | struct drm_file *file_priv, bool dx) |
471 | { |
742 | { |
472 | struct vmw_private *dev_priv = vmw_priv(dev); |
743 | struct vmw_private *dev_priv = vmw_priv(dev); |
473 | struct vmw_user_context *ctx; |
744 | struct vmw_user_context *ctx; |
474 | struct vmw_resource *res; |
745 | struct vmw_resource *res; |
475 | struct vmw_resource *tmp; |
746 | struct vmw_resource *tmp; |
476 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; |
747 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; |
477 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
748 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
478 | int ret; |
749 | int ret; |
- | 750 | ||
- | 751 | if (!dev_priv->has_dx && dx) { |
|
- | 752 | DRM_ERROR("DX contexts not supported by device.\n"); |
|
- | 753 | return -EINVAL; |
|
479 | 754 | } |
|
480 | 755 | ||
481 | /* |
756 | /* |
482 | * Approximate idr memory usage with 128 bytes. It will be limited |
757 | * Approximate idr memory usage with 128 bytes. It will be limited |
483 | * by maximum number_of contexts anyway. |
758 | * by maximum number_of contexts anyway. |
484 | */ |
759 | */ |
485 | 760 | ||
486 | if (unlikely(vmw_user_context_size == 0)) |
761 | if (unlikely(vmw_user_context_size == 0)) |
487 | vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 + |
762 | vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 + |
488 | ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0); |
763 | ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0); |
489 | 764 | ||
490 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
765 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
491 | if (unlikely(ret != 0)) |
766 | if (unlikely(ret != 0)) |
492 | return ret; |
767 | return ret; |
493 | 768 | ||
494 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), |
769 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), |
495 | vmw_user_context_size, |
770 | vmw_user_context_size, |
496 | false, true); |
771 | false, true); |
497 | if (unlikely(ret != 0)) { |
772 | if (unlikely(ret != 0)) { |
498 | if (ret != -ERESTARTSYS) |
773 | if (ret != -ERESTARTSYS) |
499 | DRM_ERROR("Out of graphics memory for context" |
774 | DRM_ERROR("Out of graphics memory for context" |
500 | " creation.\n"); |
775 | " creation.\n"); |
501 | goto out_unlock; |
776 | goto out_unlock; |
502 | } |
777 | } |
503 | 778 | ||
504 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
779 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
505 | if (unlikely(ctx == NULL)) { |
780 | if (unlikely(ctx == NULL)) { |
506 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
781 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
507 | vmw_user_context_size); |
782 | vmw_user_context_size); |
508 | ret = -ENOMEM; |
783 | ret = -ENOMEM; |
509 | goto out_unlock; |
784 | goto out_unlock; |
510 | } |
785 | } |
511 | 786 | ||
512 | res = &ctx->res; |
787 | res = &ctx->res; |
513 | ctx->base.shareable = false; |
788 | ctx->base.shareable = false; |
514 | ctx->base.tfile = NULL; |
789 | ctx->base.tfile = NULL; |
515 | 790 | ||
516 | /* |
791 | /* |
517 | * From here on, the destructor takes over resource freeing. |
792 | * From here on, the destructor takes over resource freeing. |
518 | */ |
793 | */ |
519 | 794 | ||
520 | ret = vmw_context_init(dev_priv, res, vmw_user_context_free); |
795 | ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx); |
521 | if (unlikely(ret != 0)) |
796 | if (unlikely(ret != 0)) |
522 | goto out_unlock; |
797 | goto out_unlock; |
523 | 798 | ||
524 | tmp = vmw_resource_reference(&ctx->res); |
799 | tmp = vmw_resource_reference(&ctx->res); |
525 | ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, |
800 | ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, |
526 | &vmw_user_context_base_release, NULL); |
801 | &vmw_user_context_base_release, NULL); |
527 | 802 | ||
528 | if (unlikely(ret != 0)) { |
803 | if (unlikely(ret != 0)) { |
529 | vmw_resource_unreference(&tmp); |
804 | vmw_resource_unreference(&tmp); |
530 | goto out_err; |
805 | goto out_err; |
531 | } |
806 | } |
532 | 807 | ||
533 | arg->cid = ctx->base.hash.key; |
808 | arg->cid = ctx->base.hash.key; |
534 | out_err: |
809 | out_err: |
535 | vmw_resource_unreference(&res); |
810 | vmw_resource_unreference(&res); |
536 | out_unlock: |
811 | out_unlock: |
537 | ttm_read_unlock(&dev_priv->reservation_sem); |
812 | ttm_read_unlock(&dev_priv->reservation_sem); |
538 | return ret; |
813 | return ret; |
539 | - | ||
540 | } |
814 | } |
541 | #endif |
815 | #endif |
542 | 816 | ||
543 | /** |
817 | /** |
544 | * vmw_context_scrub_shader - scrub a shader binding from a context. |
- | |
545 | * |
- | |
546 | * @bi: single binding information. |
- | |
547 | * @rebind: Whether to issue a bind instead of scrub command. |
- | |
548 | */ |
- | |
549 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) |
- | |
550 | { |
- | |
551 | struct vmw_private *dev_priv = bi->ctx->dev_priv; |
- | |
552 | struct { |
- | |
553 | SVGA3dCmdHeader header; |
- | |
554 | SVGA3dCmdSetShader body; |
- | |
555 | } *cmd; |
- | |
556 | - | ||
557 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
- | |
558 | if (unlikely(cmd == NULL)) { |
- | |
559 | DRM_ERROR("Failed reserving FIFO space for shader " |
- | |
560 | "unbinding.\n"); |
- | |
561 | return -ENOMEM; |
- | |
562 | } |
- | |
563 | - | ||
564 | cmd->header.id = SVGA_3D_CMD_SET_SHADER; |
- | |
565 | cmd->header.size = sizeof(cmd->body); |
- | |
566 | cmd->body.cid = bi->ctx->id; |
- | |
567 | cmd->body.type = bi->i1.shader_type; |
- | |
568 | cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); |
- | |
569 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
- | |
570 | - | ||
571 | return 0; |
- | |
572 | } |
- | |
573 | - | ||
574 | /** |
- | |
575 | * vmw_context_scrub_render_target - scrub a render target binding |
- | |
576 | * from a context. |
- | |
577 | * |
- | |
578 | * @bi: single binding information. |
- | |
579 | * @rebind: Whether to issue a bind instead of scrub command. |
- | |
580 | */ |
- | |
581 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, |
- | |
582 | bool rebind) |
- | |
583 | { |
- | |
584 | struct vmw_private *dev_priv = bi->ctx->dev_priv; |
- | |
585 | struct { |
- | |
586 | SVGA3dCmdHeader header; |
- | |
587 | SVGA3dCmdSetRenderTarget body; |
- | |
588 | } *cmd; |
- | |
589 | - | ||
590 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
- | |
591 | if (unlikely(cmd == NULL)) { |
- | |
592 | DRM_ERROR("Failed reserving FIFO space for render target " |
- | |
593 | "unbinding.\n"); |
- | |
594 | return -ENOMEM; |
- | |
595 | } |
- | |
596 | - | ||
597 | cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET; |
- | |
598 | cmd->header.size = sizeof(cmd->body); |
- | |
599 | cmd->body.cid = bi->ctx->id; |
- | |
600 | cmd->body.type = bi->i1.rt_type; |
- | |
601 | cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); |
- | |
602 | cmd->body.target.face = 0; |
- | |
603 | cmd->body.target.mipmap = 0; |
- | |
604 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
- | |
605 | - | ||
606 | return 0; |
- | |
607 | } |
- | |
608 | - | ||
609 | /** |
- | |
610 | * vmw_context_scrub_texture - scrub a texture binding from a context. |
- | |
611 | * |
- | |
612 | * @bi: single binding information. |
- | |
613 | * @rebind: Whether to issue a bind instead of scrub command. |
- | |
614 | * |
- | |
615 | * TODO: Possibly complement this function with a function that takes |
- | |
616 | * a list of texture bindings and combines them to a single command. |
- | |
617 | */ |
- | |
618 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, |
- | |
619 | bool rebind) |
- | |
620 | { |
- | |
621 | struct vmw_private *dev_priv = bi->ctx->dev_priv; |
- | |
622 | struct { |
- | |
623 | SVGA3dCmdHeader header; |
- | |
624 | struct { |
- | |
625 | SVGA3dCmdSetTextureState c; |
- | |
626 | SVGA3dTextureState s1; |
- | |
627 | } body; |
- | |
628 | } *cmd; |
- | |
629 | - | ||
630 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
- | |
631 | if (unlikely(cmd == NULL)) { |
- | |
632 | DRM_ERROR("Failed reserving FIFO space for texture " |
- | |
633 | "unbinding.\n"); |
- | |
634 | return -ENOMEM; |
- | |
635 | } |
- | |
636 | - | ||
637 | - | ||
638 | cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE; |
- | |
639 | cmd->header.size = sizeof(cmd->body); |
- | |
640 | cmd->body.c.cid = bi->ctx->id; |
- | |
641 | cmd->body.s1.stage = bi->i1.texture_stage; |
- | |
642 | cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; |
- | |
643 | cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); |
- | |
644 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
- | |
645 | - | ||
646 | return 0; |
- | |
647 | } |
- | |
648 | - | ||
649 | /** |
- | |
650 | * vmw_context_binding_drop: Stop tracking a context binding |
- | |
651 | * |
- | |
652 | * @cb: Pointer to binding tracker storage. |
- | |
653 | * |
- | |
654 | * Stops tracking a context binding, and re-initializes its storage. |
- | |
655 | * Typically used when the context binding is replaced with a binding to |
- | |
656 | * another (or the same, for that matter) resource. |
- | |
657 | */ |
- | |
658 | static void vmw_context_binding_drop(struct vmw_ctx_binding *cb) |
- | |
659 | { |
- | |
660 | list_del(&cb->ctx_list); |
- | |
661 | if (!list_empty(&cb->res_list)) |
- | |
662 | list_del(&cb->res_list); |
- | |
663 | cb->bi.ctx = NULL; |
- | |
664 | } |
- | |
665 | - | ||
666 | /** |
- | |
667 | * vmw_context_binding_add: Start tracking a context binding |
818 | * vmw_context_binding_list - Return a list of context bindings |
668 | * |
- | |
669 | * @cbs: Pointer to the context binding state tracker. |
- | |
670 | * @bi: Information about the binding to track. |
- | |
671 | * |
- | |
672 | * Performs basic checks on the binding to make sure arguments are within |
- | |
673 | * bounds and then starts tracking the binding in the context binding |
- | |
674 | * state structure @cbs. |
- | |
675 | */ |
- | |
676 | int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, |
- | |
677 | const struct vmw_ctx_bindinfo *bi) |
- | |
678 | { |
- | |
679 | struct vmw_ctx_binding *loc; |
- | |
680 | - | ||
681 | switch (bi->bt) { |
- | |
682 | case vmw_ctx_binding_rt: |
- | |
683 | if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) { |
- | |
684 | DRM_ERROR("Illegal render target type %u.\n", |
- | |
685 | (unsigned) bi->i1.rt_type); |
- | |
686 | return -EINVAL; |
- | |
687 | } |
- | |
688 | loc = &cbs->render_targets[bi->i1.rt_type]; |
- | |
689 | break; |
- | |
690 | case vmw_ctx_binding_tex: |
- | |
691 | if (unlikely((unsigned)bi->i1.texture_stage >= |
- | |
692 | SVGA3D_NUM_TEXTURE_UNITS)) { |
- | |
693 | DRM_ERROR("Illegal texture/sampler unit %u.\n", |
- | |
694 | (unsigned) bi->i1.texture_stage); |
- | |
695 | return -EINVAL; |
- | |
696 | } |
- | |
697 | loc = &cbs->texture_units[bi->i1.texture_stage]; |
- | |
698 | break; |
- | |
699 | case vmw_ctx_binding_shader: |
- | |
700 | if (unlikely((unsigned)bi->i1.shader_type >= |
- | |
701 | SVGA3D_SHADERTYPE_MAX)) { |
- | |
702 | DRM_ERROR("Illegal shader type %u.\n", |
- | |
703 | (unsigned) bi->i1.shader_type); |
- | |
704 | return -EINVAL; |
- | |
705 | } |
- | |
706 | loc = &cbs->shaders[bi->i1.shader_type]; |
- | |
707 | break; |
- | |
708 | default: |
- | |
709 | BUG(); |
- | |
710 | } |
- | |
711 | - | ||
712 | if (loc->bi.ctx != NULL) |
- | |
713 | vmw_context_binding_drop(loc); |
- | |
714 | - | ||
715 | loc->bi = *bi; |
- | |
716 | loc->bi.scrubbed = false; |
- | |
717 | list_add_tail(&loc->ctx_list, &cbs->list); |
- | |
718 | INIT_LIST_HEAD(&loc->res_list); |
- | |
719 | - | ||
720 | return 0; |
- | |
721 | } |
- | |
722 | - | ||
723 | /** |
- | |
724 | * vmw_context_binding_transfer: Transfer a context binding tracking entry. |
- | |
725 | * |
- | |
726 | * @cbs: Pointer to the persistent context binding state tracker. |
- | |
727 | * @bi: Information about the binding to track. |
- | |
728 | * |
- | |
729 | */ |
- | |
730 | static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, |
- | |
731 | const struct vmw_ctx_bindinfo *bi) |
- | |
732 | { |
- | |
733 | struct vmw_ctx_binding *loc; |
- | |
734 | - | ||
735 | switch (bi->bt) { |
- | |
736 | case vmw_ctx_binding_rt: |
- | |
737 | loc = &cbs->render_targets[bi->i1.rt_type]; |
- | |
738 | break; |
- | |
739 | case vmw_ctx_binding_tex: |
- | |
740 | loc = &cbs->texture_units[bi->i1.texture_stage]; |
- | |
741 | break; |
- | |
742 | case vmw_ctx_binding_shader: |
- | |
743 | loc = &cbs->shaders[bi->i1.shader_type]; |
- | |
744 | break; |
- | |
745 | default: |
- | |
746 | BUG(); |
- | |
747 | } |
- | |
748 | - | ||
749 | if (loc->bi.ctx != NULL) |
- | |
750 | vmw_context_binding_drop(loc); |
- | |
751 | - | ||
752 | if (bi->res != NULL) { |
- | |
753 | loc->bi = *bi; |
- | |
754 | list_add_tail(&loc->ctx_list, &cbs->list); |
- | |
755 | list_add_tail(&loc->res_list, &bi->res->binding_head); |
- | |
756 | } |
- | |
757 | } |
- | |
758 | - | ||
759 | /** |
- | |
760 | * vmw_context_binding_kill - Kill a binding on the device |
- | |
761 | * and stop tracking it. |
- | |
762 | * |
- | |
763 | * @cb: Pointer to binding tracker storage. |
- | |
764 | * |
- | |
765 | * Emits FIFO commands to scrub a binding represented by @cb. |
- | |
766 | * Then stops tracking the binding and re-initializes its storage. |
- | |
767 | */ |
- | |
768 | static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) |
- | |
769 | { |
- | |
770 | if (!cb->bi.scrubbed) { |
- | |
771 | (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false); |
- | |
772 | cb->bi.scrubbed = true; |
- | |
773 | } |
- | |
774 | vmw_context_binding_drop(cb); |
- | |
775 | } |
- | |
776 | - | ||
777 | /** |
- | |
778 | * vmw_context_binding_state_kill - Kill all bindings associated with a |
- | |
779 | * struct vmw_ctx_binding state structure, and re-initialize the structure. |
- | |
780 | * |
819 | * |
781 | * @cbs: Pointer to the context binding state tracker. |
820 | * @ctx: The context resource |
782 | * |
821 | * |
783 | * Emits commands to scrub all bindings associated with the |
822 | * Returns the current list of bindings of the given context. Note that |
784 | * context binding state tracker. Then re-initializes the whole structure. |
823 | * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked. |
785 | */ |
824 | */ |
786 | static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) |
825 | struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) |
787 | { |
826 | { |
788 | struct vmw_ctx_binding *entry, *next; |
827 | struct vmw_user_context *uctx = |
- | 828 | container_of(ctx, struct vmw_user_context, res); |
|
789 | - | ||
790 | list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) |
829 | |
791 | vmw_context_binding_kill(entry); |
830 | return vmw_binding_state_list(uctx->cbs); |
792 | } |
- | |
793 | - | ||
794 | /** |
- | |
795 | * vmw_context_binding_state_scrub - Scrub all bindings associated with a |
- | |
796 | * struct vmw_ctx_binding state structure. |
- | |
797 | * |
- | |
798 | * @cbs: Pointer to the context binding state tracker. |
- | |
799 | * |
- | |
800 | * Emits commands to scrub all bindings associated with the |
- | |
801 | * context binding state tracker. |
831 | } |
802 | */ |
832 | |
803 | static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs) |
- | |
804 | { |
- | |
805 | struct vmw_ctx_binding *entry; |
- | |
806 | - | ||
807 | list_for_each_entry(entry, &cbs->list, ctx_list) { |
833 | struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx) |
808 | if (!entry->bi.scrubbed) { |
- | |
809 | (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); |
- | |
810 | entry->bi.scrubbed = true; |
- | |
811 | } |
834 | { |
812 | } |
- | |
813 | } |
- | |
814 | - | ||
815 | /** |
- | |
816 | * vmw_context_binding_res_list_kill - Kill all bindings on a |
- | |
817 | * resource binding list |
- | |
818 | * |
835 | return container_of(ctx, struct vmw_user_context, res)->man; |
819 | * @head: list head of resource binding list |
836 | } |
820 | * |
- | |
821 | * Kills all bindings associated with a specific resource. Typically |
- | |
822 | * called before the resource is destroyed. |
837 | |
823 | */ |
838 | struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, |
- | 839 | SVGACOTableType cotable_type) |
|
- | 840 | { |
|
824 | void vmw_context_binding_res_list_kill(struct list_head *head) |
841 | if (cotable_type >= SVGA_COTABLE_DX10_MAX) |
825 | { |
842 | return ERR_PTR(-EINVAL); |
826 | struct vmw_ctx_binding *entry, *next; |
843 | |
827 | 844 | return vmw_resource_reference |
|
828 | list_for_each_entry_safe(entry, next, head, res_list) |
845 | (container_of(ctx, struct vmw_user_context, res)-> |
829 | vmw_context_binding_kill(entry); |
846 | cotables[cotable_type]); |
830 | } |
847 | } |
831 | 848 | ||
832 | /** |
849 | /** |
833 | * vmw_context_binding_res_list_scrub - Scrub all bindings on a |
850 | * vmw_context_binding_state - |
834 | * resource binding list |
851 | * Return a pointer to a context binding state structure |
835 | * |
852 | * |
836 | * @head: list head of resource binding list |
853 | * @ctx: The context resource |
837 | * |
854 | * |
838 | * Scrub all bindings associated with a specific resource. Typically |
855 | * Returns the current state of bindings of the given context. Note that |
839 | * called before the resource is evicted. |
856 | * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked. |
840 | */ |
857 | */ |
- | 858 | struct vmw_ctx_binding_state * |
|
841 | void vmw_context_binding_res_list_scrub(struct list_head *head) |
859 | vmw_context_binding_state(struct vmw_resource *ctx) |
842 | { |
860 | { |
843 | struct vmw_ctx_binding *entry; |
- | |
844 | - | ||
845 | list_for_each_entry(entry, head, res_list) { |
- | |
846 | if (!entry->bi.scrubbed) { |
- | |
847 | (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); |
861 | return container_of(ctx, struct vmw_user_context, res)->cbs; |
848 | entry->bi.scrubbed = true; |
- | |
849 | } |
- | |
850 | } |
- | |
851 | } |
862 | } |
852 | 863 | ||
853 | /** |
864 | /** |
- | 865 | * vmw_context_bind_dx_query - |
|
- | 866 | * Sets query MOB for the context. If @mob is NULL, then this function will |
|
854 | * vmw_context_binding_state_transfer - Commit staged binding info |
867 | * remove the association between the MOB and the context. This function |
- | 868 | * assumes the binding_mutex is held. |
|
855 | * |
869 | * |
856 | * @ctx: Pointer to context to commit the staged binding info to. |
870 | * @ctx_res: The context resource |
857 | * @from: Staged binding info built during execbuf. |
871 | * @mob: a reference to the query MOB |
858 | * |
872 | * |
859 | * Transfers binding info from a temporary structure to the persistent |
873 | * Returns -EINVAL if a MOB has already been set and does not match the one |
860 | * structure in the context. This can be done once commands |
874 | * specified in the parameter. 0 otherwise. |
861 | */ |
875 | */ |
862 | void vmw_context_binding_state_transfer(struct vmw_resource *ctx, |
876 | int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, |
863 | struct vmw_ctx_binding_state *from) |
877 | struct vmw_dma_buffer *mob) |
864 | { |
878 | { |
865 | struct vmw_user_context *uctx = |
879 | struct vmw_user_context *uctx = |
866 | container_of(ctx, struct vmw_user_context, res); |
880 | container_of(ctx_res, struct vmw_user_context, res); |
867 | struct vmw_ctx_binding *entry, *next; |
- | |
- | 881 | ||
- | 882 | if (mob == NULL) { |
|
868 | 883 | if (uctx->dx_query_mob) { |
|
869 | list_for_each_entry_safe(entry, next, &from->list, ctx_list) |
884 | uctx->dx_query_mob->dx_query_ctx = NULL; |
- | 885 | vmw_dmabuf_unreference(&uctx->dx_query_mob); |
|
870 | vmw_context_binding_transfer(&uctx->cbs, &entry->bi); |
886 | uctx->dx_query_mob = NULL; |
871 | } |
- | |
872 | - | ||
873 | /** |
- | |
874 | * vmw_context_rebind_all - Rebind all scrubbed bindings of a context |
- | |
875 | * |
- | |
876 | * @ctx: The context resource |
- | |
877 | * |
887 | } |
878 | * Walks through the context binding list and rebinds all scrubbed |
- | |
879 | * resources. |
- | |
880 | */ |
888 | |
881 | int vmw_context_rebind_all(struct vmw_resource *ctx) |
- | |
882 | { |
- | |
883 | struct vmw_ctx_binding *entry; |
- | |
884 | struct vmw_user_context *uctx = |
- | |
885 | container_of(ctx, struct vmw_user_context, res); |
- | |
886 | struct vmw_ctx_binding_state *cbs = &uctx->cbs; |
- | |
887 | int ret; |
- | |
888 | - | ||
889 | list_for_each_entry(entry, &cbs->list, ctx_list) { |
- | |
890 | if (likely(!entry->bi.scrubbed)) |
889 | return 0; |
891 | continue; |
890 | } |
892 | 891 | ||
893 | if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id == |
- | |
894 | SVGA3D_INVALID_ID)) |
892 | /* Can only have one MOB per context for queries */ |
895 | continue; |
- | |
- | 893 | if (uctx->dx_query_mob && uctx->dx_query_mob != mob) |
|
896 | 894 | return -EINVAL; |
|
897 | ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true); |
- | |
898 | if (unlikely(ret != 0)) |
895 | |
899 | return ret; |
896 | mob->dx_query_ctx = ctx_res; |
900 | 897 | ||
901 | entry->bi.scrubbed = false; |
898 | if (!uctx->dx_query_mob) |
902 | } |
899 | uctx->dx_query_mob = vmw_dmabuf_reference(mob); |
903 | 900 | ||
904 | return 0; |
901 | return 0; |
905 | } |
902 | } |
906 | 903 | ||
907 | /** |
904 | /** |
908 | * vmw_context_binding_list - Return a list of context bindings |
905 | * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob |
909 | * |
- | |
910 | * @ctx: The context resource |
- | |
911 | * |
906 | * |
912 | * Returns the current list of bindings of the given context. Note that |
907 | * @ctx_res: The context resource |
913 | * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked. |
- | |
914 | */ |
908 | */ |
- | 909 | struct vmw_dma_buffer * |
|
915 | struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) |
910 | vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res) |
916 | { |
911 | { |
- | 912 | struct vmw_user_context *uctx = |
|
917 | return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); |
913 | container_of(ctx_res, struct vmw_user_context, res); |
918 | } |
- | |
919 | - | ||
920 | struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx) |
- | |
921 | { |
914 | |
922 | return container_of(ctx, struct vmw_user_context, res)->man; |
915 | return uctx->dx_query_mob; |
923 | } |
916 | }>>> |