Subversion Repositories Kolibri OS

Rev

Rev 5078 | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 6296
Line 1... Line 1...
1
/**************************************************************************
1
/**************************************************************************
2
 *
2
 *
3
 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
3
 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
4
 * All Rights Reserved.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
8
 * "Software"), to deal in the Software without restriction, including
Line 25... Line 25...
25
 *
25
 *
26
 **************************************************************************/
26
 **************************************************************************/
Line 27... Line 27...
27
 
27
 
28
#include "vmwgfx_drv.h"
28
#include "vmwgfx_drv.h"
-
 
29
#include "vmwgfx_resource_priv.h"
29
#include "vmwgfx_resource_priv.h"
30
#include "vmwgfx_binding.h"
Line 30... Line 31...
30
#include "ttm/ttm_placement.h"
31
#include "ttm/ttm_placement.h"
31
 
32
 
32
struct vmw_user_context {
33
struct vmw_user_context {
33
	struct ttm_base_object base;
34
	struct ttm_base_object base;
34
	struct vmw_resource res;
35
	struct vmw_resource res;
-
 
36
	struct vmw_ctx_binding_state *cbs;
-
 
37
	struct vmw_cmdbuf_res_manager *man;
-
 
38
	struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
35
	struct vmw_ctx_binding_state cbs;
39
	spinlock_t cotable_lock;
Line 36... Line -...
36
	struct vmw_cmdbuf_res_manager *man;
-
 
37
};
-
 
38
 
-
 
39
 
-
 
40
 
40
	struct vmw_dma_buffer *dx_query_mob;
41
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
41
};
42
 
42
 
Line 43... Line 43...
43
static void vmw_user_context_free(struct vmw_resource *res);
43
static void vmw_user_context_free(struct vmw_resource *res);
Line 49... Line 49...
49
			       struct ttm_validate_buffer *val_buf);
49
			       struct ttm_validate_buffer *val_buf);
50
static int vmw_gb_context_unbind(struct vmw_resource *res,
50
static int vmw_gb_context_unbind(struct vmw_resource *res,
51
				 bool readback,
51
				 bool readback,
52
				 struct ttm_validate_buffer *val_buf);
52
				 struct ttm_validate_buffer *val_buf);
53
static int vmw_gb_context_destroy(struct vmw_resource *res);
53
static int vmw_gb_context_destroy(struct vmw_resource *res);
54
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
54
static int vmw_dx_context_create(struct vmw_resource *res);
55
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
55
static int vmw_dx_context_bind(struct vmw_resource *res,
56
					   bool rebind);
56
			       struct ttm_validate_buffer *val_buf);
57
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
57
static int vmw_dx_context_unbind(struct vmw_resource *res,
-
 
58
				 bool readback,
58
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
59
				 struct ttm_validate_buffer *val_buf);
59
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
60
static int vmw_dx_context_destroy(struct vmw_resource *res);
-
 
61
 
60
static uint64_t vmw_user_context_size;
62
static uint64_t vmw_user_context_size;
Line 61... Line 63...
61
 
63
 
62
static const struct vmw_user_resource_conv user_context_conv = {
64
static const struct vmw_user_resource_conv user_context_conv = {
63
	.object_type = VMW_RES_CONTEXT,
65
	.object_type = VMW_RES_CONTEXT,
Line 91... Line 93...
91
	.destroy = vmw_gb_context_destroy,
93
	.destroy = vmw_gb_context_destroy,
92
	.bind = vmw_gb_context_bind,
94
	.bind = vmw_gb_context_bind,
93
	.unbind = vmw_gb_context_unbind
95
	.unbind = vmw_gb_context_unbind
94
};
96
};
Line 95... Line 97...
95
 
97
 
-
 
98
static const struct vmw_res_func vmw_dx_context_func = {
-
 
99
	.res_type = vmw_res_dx_context,
-
 
100
	.needs_backup = true,
-
 
101
	.may_evict = true,
-
 
102
	.type_name = "dx contexts",
96
static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
103
	.backup_placement = &vmw_mob_placement,
97
	[vmw_ctx_binding_shader] = vmw_context_scrub_shader,
104
	.create = vmw_dx_context_create,
-
 
105
	.destroy = vmw_dx_context_destroy,
98
	[vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
106
	.bind = vmw_dx_context_bind,
-
 
107
	.unbind = vmw_dx_context_unbind
Line 99... Line 108...
99
	[vmw_ctx_binding_tex] = vmw_context_scrub_texture };
108
};
100
 
109
 
101
/**
110
/**
Line -... Line 111...
-
 
111
 * Context management:
-
 
112
 */
-
 
113
 
-
 
114
static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
-
 
115
{
-
 
116
	struct vmw_resource *res;
-
 
117
	int i;
-
 
118
 
-
 
119
	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
-
 
120
		spin_lock(&uctx->cotable_lock);
-
 
121
		res = uctx->cotables[i];
-
 
122
		uctx->cotables[i] = NULL;
-
 
123
		spin_unlock(&uctx->cotable_lock);
-
 
124
 
-
 
125
		if (res)
-
 
126
			vmw_resource_unreference(&res);
102
 * Context management:
127
	}
103
 */
128
}
104
 
129
 
105
static void vmw_hw_context_destroy(struct vmw_resource *res)
130
static void vmw_hw_context_destroy(struct vmw_resource *res)
106
{
131
{
Line 111... Line 136...
111
		SVGA3dCmdHeader header;
136
		SVGA3dCmdHeader header;
112
		SVGA3dCmdDestroyContext body;
137
		SVGA3dCmdDestroyContext body;
113
	} *cmd;
138
	} *cmd;
Line 114... Line 139...
114
 
139
 
-
 
140
 
115
 
141
	if (res->func->destroy == vmw_gb_context_destroy ||
116
	if (res->func->destroy == vmw_gb_context_destroy) {
142
	    res->func->destroy == vmw_dx_context_destroy) {
117
		mutex_lock(&dev_priv->cmdbuf_mutex);
143
		mutex_lock(&dev_priv->cmdbuf_mutex);
118
		vmw_cmdbuf_res_man_destroy(uctx->man);
144
		vmw_cmdbuf_res_man_destroy(uctx->man);
119
		mutex_lock(&dev_priv->binding_mutex);
145
		mutex_lock(&dev_priv->binding_mutex);
120
		(void) vmw_context_binding_state_kill(&uctx->cbs);
146
		vmw_binding_state_kill(uctx->cbs);
121
		(void) vmw_gb_context_destroy(res);
147
		(void) res->func->destroy(res);
122
		mutex_unlock(&dev_priv->binding_mutex);
148
		mutex_unlock(&dev_priv->binding_mutex);
123
		if (dev_priv->pinned_bo != NULL &&
149
		if (dev_priv->pinned_bo != NULL &&
124
		    !dev_priv->query_cid_valid)
150
		    !dev_priv->query_cid_valid)
-
 
151
			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
125
			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
152
		mutex_unlock(&dev_priv->cmdbuf_mutex);
126
		mutex_unlock(&dev_priv->cmdbuf_mutex);
153
		vmw_context_cotables_unref(uctx);
Line 127... Line 154...
127
		return;
154
		return;
128
	}
155
	}
Line 133... Line 160...
133
		DRM_ERROR("Failed reserving FIFO space for surface "
160
		DRM_ERROR("Failed reserving FIFO space for surface "
134
			  "destruction.\n");
161
			  "destruction.\n");
135
		return;
162
		return;
136
	}
163
	}
Line 137... Line 164...
137
 
164
 
138
	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
165
	cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
139
	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
166
	cmd->header.size = sizeof(cmd->body);
Line 140... Line 167...
140
	cmd->body.cid = cpu_to_le32(res->id);
167
	cmd->body.cid = res->id;
141
 
168
 
142
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
169
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
Line 143... Line 170...
143
	vmw_3d_resource_dec(dev_priv, false);
170
	vmw_fifo_resource_dec(dev_priv);
-
 
171
}
144
}
172
 
145
 
173
static int vmw_gb_context_init(struct vmw_private *dev_priv,
146
static int vmw_gb_context_init(struct vmw_private *dev_priv,
174
			       bool dx,
147
			       struct vmw_resource *res,
175
			       struct vmw_resource *res,
148
			       void (*res_free) (struct vmw_resource *res))
176
			       void (*res_free)(struct vmw_resource *res))
149
{
177
{
Line -... Line 178...
-
 
178
	int ret, i;
-
 
179
	struct vmw_user_context *uctx =
150
	int ret;
180
		container_of(res, struct vmw_user_context, res);
-
 
181
 
151
	struct vmw_user_context *uctx =
182
	res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
152
		container_of(res, struct vmw_user_context, res);
183
			    SVGA3D_CONTEXT_DATA_SIZE);
153
 
184
	ret = vmw_resource_init(dev_priv, res, true,
154
	ret = vmw_resource_init(dev_priv, res, true,
185
				res_free,
Line 155... Line 186...
155
				res_free, &vmw_gb_context_func);
186
				dx ? &vmw_dx_context_func :
156
	res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
187
				&vmw_gb_context_func);
157
	if (unlikely(ret != 0))
188
	if (unlikely(ret != 0))
158
		goto out_err;
189
		goto out_err;
159
 
190
 
160
	if (dev_priv->has_mob) {
191
	if (dev_priv->has_mob) {
161
		uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
192
		uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
162
		if (unlikely(IS_ERR(uctx->man))) {
193
		if (IS_ERR(uctx->man)) {
Line -... Line 194...
-
 
194
			ret = PTR_ERR(uctx->man);
-
 
195
			uctx->man = NULL;
163
			ret = PTR_ERR(uctx->man);
196
			goto out_err;
-
 
197
		}
-
 
198
	}
-
 
199
 
164
			uctx->man = NULL;
200
	uctx->cbs = vmw_binding_state_alloc(dev_priv);
-
 
201
	if (IS_ERR(uctx->cbs)) {
-
 
202
		ret = PTR_ERR(uctx->cbs);
-
 
203
		goto out_err;
-
 
204
	}
-
 
205
 
-
 
206
	spin_lock_init(&uctx->cotable_lock);
-
 
207
 
-
 
208
	if (dx) {
-
 
209
		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
-
 
210
			uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
-
 
211
							      &uctx->res, i);
-
 
212
			if (unlikely(uctx->cotables[i] == NULL)) {
-
 
213
				ret = -ENOMEM;
Line 165... Line 214...
165
			goto out_err;
214
				goto out_cotables;
166
		}
215
			}
Line -... Line 216...
-
 
216
		}
-
 
217
	}
167
	}
218
 
168
 
219
 
169
	memset(&uctx->cbs, 0, sizeof(uctx->cbs));
220
 
170
	INIT_LIST_HEAD(&uctx->cbs.list);
221
	vmw_resource_activate(res, vmw_hw_context_destroy);
171
 
222
	return 0;
172
	vmw_resource_activate(res, vmw_hw_context_destroy);
223
 
173
	return 0;
224
out_cotables:
Line 174... Line 225...
174
 
225
	vmw_context_cotables_unref(uctx);
175
out_err:
226
out_err:
176
	if (res_free)
227
	if (res_free)
-
 
228
		res_free(res);
177
		res_free(res);
229
	else
178
	else
230
		kfree(res);
Line 179... Line 231...
179
		kfree(res);
231
	return ret;
180
	return ret;
232
}
181
}
233
 
182
 
234
static int vmw_context_init(struct vmw_private *dev_priv,
Line 183... Line 235...
183
static int vmw_context_init(struct vmw_private *dev_priv,
235
			    struct vmw_resource *res,
184
			    struct vmw_resource *res,
236
			    void (*res_free)(struct vmw_resource *res),
Line 185... Line 237...
185
			    void (*res_free) (struct vmw_resource *res))
237
			    bool dx)
186
{
238
{
Line 187... Line 239...
187
	int ret;
239
	int ret;
Line 213... Line 265...
213
		DRM_ERROR("Fifo reserve failed.\n");
265
		DRM_ERROR("Fifo reserve failed.\n");
214
		vmw_resource_unreference(&res);
266
		vmw_resource_unreference(&res);
215
		return -ENOMEM;
267
		return -ENOMEM;
216
	}
268
	}
Line 217... Line 269...
217
 
269
 
218
	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
270
	cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
219
	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
271
	cmd->header.size = sizeof(cmd->body);
Line 220... Line 272...
220
	cmd->body.cid = cpu_to_le32(res->id);
272
	cmd->body.cid = res->id;
221
 
273
 
222
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
274
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
223
	(void) vmw_3d_resource_inc(dev_priv, false);
275
	vmw_fifo_resource_inc(dev_priv);
Line 224... Line 276...
224
	vmw_resource_activate(res, vmw_hw_context_destroy);
276
	vmw_resource_activate(res, vmw_hw_context_destroy);
225
	return 0;
277
	return 0;
Line 230... Line 282...
230
	else
282
	else
231
		res_free(res);
283
		res_free(res);
232
	return ret;
284
	return ret;
233
}
285
}
Line 234... Line -...
234
 
-
 
235
struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
-
 
236
{
-
 
237
	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
-
 
238
	int ret;
-
 
239
 
-
 
240
	if (unlikely(res == NULL))
-
 
241
		return NULL;
-
 
242
 
-
 
243
	ret = vmw_context_init(dev_priv, res, NULL);
-
 
244
 
-
 
245
	return (ret == 0) ? res : NULL;
-
 
Line -... Line 286...
-
 
286
 
-
 
287
 
-
 
288
/*
Line 246... Line 289...
246
}
289
 * GB context.
247
 
290
 */
248
 
291
 
249
static int vmw_gb_context_create(struct vmw_resource *res)
292
static int vmw_gb_context_create(struct vmw_resource *res)
Line 279... Line 322...
279
 
322
 
280
	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
323
	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
281
	cmd->header.size = sizeof(cmd->body);
324
	cmd->header.size = sizeof(cmd->body);
282
	cmd->body.cid = res->id;
325
	cmd->body.cid = res->id;
283
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
326
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
Line 284... Line 327...
284
	(void) vmw_3d_resource_inc(dev_priv, false);
327
	vmw_fifo_resource_inc(dev_priv);
Line 285... Line 328...
285
 
328
 
286
	return 0;
329
	return 0;
Line 307... Line 350...
307
	if (unlikely(cmd == NULL)) {
350
	if (unlikely(cmd == NULL)) {
308
		DRM_ERROR("Failed reserving FIFO space for context "
351
		DRM_ERROR("Failed reserving FIFO space for context "
309
			  "binding.\n");
352
			  "binding.\n");
310
		return -ENOMEM;
353
		return -ENOMEM;
311
	}
354
	}
312
 
-
 
313
	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
355
	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
314
	cmd->header.size = sizeof(cmd->body);
356
	cmd->header.size = sizeof(cmd->body);
315
	cmd->body.cid = res->id;
357
	cmd->body.cid = res->id;
316
	cmd->body.mobid = bo->mem.start;
358
	cmd->body.mobid = bo->mem.start;
317
	cmd->body.validContents = res->backup_dirty;
359
	cmd->body.validContents = res->backup_dirty;
Line 344... Line 386...
344
 
386
 
Line 345... Line 387...
345
 
387
 
346
	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
388
	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
Line 347... Line 389...
347
 
389
 
Line 348... Line 390...
348
	mutex_lock(&dev_priv->binding_mutex);
390
	mutex_lock(&dev_priv->binding_mutex);
349
	vmw_context_binding_state_scrub(&uctx->cbs);
391
	vmw_binding_state_scrub(uctx->cbs);
Line 412... Line 454...
412
	cmd->body.cid = res->id;
454
	cmd->body.cid = res->id;
413
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
455
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
414
	if (dev_priv->query_cid == res->id)
456
	if (dev_priv->query_cid == res->id)
415
		dev_priv->query_cid_valid = false;
457
		dev_priv->query_cid_valid = false;
416
	vmw_resource_release_id(res);
458
	vmw_resource_release_id(res);
417
	vmw_3d_resource_dec(dev_priv, false);
459
	vmw_fifo_resource_dec(dev_priv);
-
 
460
 
-
 
461
	return 0;
-
 
462
}
-
 
463
 
-
 
464
/*
-
 
465
 * DX context.
-
 
466
 */
-
 
467
 
-
 
468
static int vmw_dx_context_create(struct vmw_resource *res)
-
 
469
{
-
 
470
	struct vmw_private *dev_priv = res->dev_priv;
-
 
471
	int ret;
-
 
472
	struct {
-
 
473
		SVGA3dCmdHeader header;
-
 
474
		SVGA3dCmdDXDefineContext body;
-
 
475
	} *cmd;
-
 
476
 
-
 
477
	if (likely(res->id != -1))
-
 
478
		return 0;
-
 
479
 
-
 
480
	ret = vmw_resource_alloc_id(res);
-
 
481
	if (unlikely(ret != 0)) {
-
 
482
		DRM_ERROR("Failed to allocate a context id.\n");
-
 
483
		goto out_no_id;
-
 
484
	}
-
 
485
 
-
 
486
	if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
-
 
487
		ret = -EBUSY;
-
 
488
		goto out_no_fifo;
-
 
489
	}
-
 
490
 
-
 
491
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
 
492
	if (unlikely(cmd == NULL)) {
-
 
493
		DRM_ERROR("Failed reserving FIFO space for context "
-
 
494
			  "creation.\n");
-
 
495
		ret = -ENOMEM;
-
 
496
		goto out_no_fifo;
-
 
497
	}
-
 
498
 
-
 
499
	cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
-
 
500
	cmd->header.size = sizeof(cmd->body);
-
 
501
	cmd->body.cid = res->id;
-
 
502
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
 
503
	vmw_fifo_resource_inc(dev_priv);
-
 
504
 
-
 
505
	return 0;
-
 
506
 
-
 
507
out_no_fifo:
-
 
508
	vmw_resource_release_id(res);
-
 
509
out_no_id:
-
 
510
	return ret;
-
 
511
}
-
 
512
 
-
 
513
static int vmw_dx_context_bind(struct vmw_resource *res,
-
 
514
			       struct ttm_validate_buffer *val_buf)
-
 
515
{
-
 
516
	struct vmw_private *dev_priv = res->dev_priv;
-
 
517
	struct {
-
 
518
		SVGA3dCmdHeader header;
-
 
519
		SVGA3dCmdDXBindContext body;
-
 
520
	} *cmd;
-
 
521
	struct ttm_buffer_object *bo = val_buf->bo;
-
 
522
 
-
 
523
	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
-
 
524
 
-
 
525
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
 
526
	if (unlikely(cmd == NULL)) {
-
 
527
		DRM_ERROR("Failed reserving FIFO space for context "
-
 
528
			  "binding.\n");
-
 
529
		return -ENOMEM;
-
 
530
	}
-
 
531
 
-
 
532
	cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
-
 
533
	cmd->header.size = sizeof(cmd->body);
-
 
534
	cmd->body.cid = res->id;
-
 
535
	cmd->body.mobid = bo->mem.start;
-
 
536
	cmd->body.validContents = res->backup_dirty;
-
 
537
	res->backup_dirty = false;
-
 
538
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
 
539
 
-
 
540
 
-
 
541
	return 0;
-
 
542
}
-
 
543
 
-
 
544
/**
-
 
545
 * vmw_dx_context_scrub_cotables - Scrub all bindings and
-
 
546
 * cotables from a context
-
 
547
 *
-
 
548
 * @ctx: Pointer to the context resource
-
 
549
 * @readback: Whether to save the otable contents on scrubbing.
-
 
550
 *
-
 
551
 * COtables must be unbound before their context, but unbinding requires
-
 
552
 * the backup buffer being reserved, whereas scrubbing does not.
-
 
553
 * This function scrubs all cotables of a context, potentially reading back
-
 
554
 * the contents into their backup buffers. However, scrubbing cotables
-
 
555
 * also makes the device context invalid, so scrub all bindings first so
-
 
556
 * that doesn't have to be done later with an invalid context.
-
 
557
 */
-
 
558
void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
-
 
559
				   bool readback)
-
 
560
{
-
 
561
	struct vmw_user_context *uctx =
-
 
562
		container_of(ctx, struct vmw_user_context, res);
-
 
563
	int i;
-
 
564
 
-
 
565
	vmw_binding_state_scrub(uctx->cbs);
-
 
566
	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
-
 
567
		struct vmw_resource *res;
-
 
568
 
-
 
569
		/* Avoid racing with ongoing cotable destruction. */
-
 
570
		spin_lock(&uctx->cotable_lock);
-
 
571
		res = uctx->cotables[vmw_cotable_scrub_order[i]];
-
 
572
		if (res)
-
 
573
			res = vmw_resource_reference_unless_doomed(res);
-
 
574
		spin_unlock(&uctx->cotable_lock);
-
 
575
		if (!res)
-
 
576
			continue;
-
 
577
 
-
 
578
		WARN_ON(vmw_cotable_scrub(res, readback));
-
 
579
		vmw_resource_unreference(&res);
-
 
580
	}
-
 
581
}
-
 
582
 
-
 
583
static int vmw_dx_context_unbind(struct vmw_resource *res,
-
 
584
				 bool readback,
-
 
585
				 struct ttm_validate_buffer *val_buf)
-
 
586
{
-
 
587
	struct vmw_private *dev_priv = res->dev_priv;
-
 
588
	struct ttm_buffer_object *bo = val_buf->bo;
-
 
589
	struct vmw_fence_obj *fence;
-
 
590
	struct vmw_user_context *uctx =
-
 
591
		container_of(res, struct vmw_user_context, res);
-
 
592
 
-
 
593
	struct {
-
 
594
		SVGA3dCmdHeader header;
-
 
595
		SVGA3dCmdDXReadbackContext body;
-
 
596
	} *cmd1;
-
 
597
	struct {
-
 
598
		SVGA3dCmdHeader header;
-
 
599
		SVGA3dCmdDXBindContext body;
-
 
600
	} *cmd2;
-
 
601
	uint32_t submit_size;
-
 
602
	uint8_t *cmd;
-
 
603
 
-
 
604
 
-
 
605
	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
-
 
606
 
-
 
607
	mutex_lock(&dev_priv->binding_mutex);
-
 
608
	vmw_dx_context_scrub_cotables(res, readback);
-
 
609
 
-
 
610
	if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
-
 
611
	    readback) {
-
 
612
		WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
-
 
613
		if (vmw_query_readback_all(uctx->dx_query_mob))
-
 
614
			DRM_ERROR("Failed to read back query states\n");
-
 
615
	}
-
 
616
 
-
 
617
	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
-
 
618
 
-
 
619
	cmd = vmw_fifo_reserve(dev_priv, submit_size);
-
 
620
	if (unlikely(cmd == NULL)) {
-
 
621
		DRM_ERROR("Failed reserving FIFO space for context "
-
 
622
			  "unbinding.\n");
-
 
623
		mutex_unlock(&dev_priv->binding_mutex);
-
 
624
		return -ENOMEM;
-
 
625
	}
-
 
626
 
-
 
627
	cmd2 = (void *) cmd;
-
 
628
	if (readback) {
-
 
629
		cmd1 = (void *) cmd;
-
 
630
		cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
-
 
631
		cmd1->header.size = sizeof(cmd1->body);
-
 
632
		cmd1->body.cid = res->id;
-
 
633
		cmd2 = (void *) (&cmd1[1]);
-
 
634
	}
-
 
635
	cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
-
 
636
	cmd2->header.size = sizeof(cmd2->body);
-
 
637
	cmd2->body.cid = res->id;
-
 
638
	cmd2->body.mobid = SVGA3D_INVALID_ID;
-
 
639
 
-
 
640
	vmw_fifo_commit(dev_priv, submit_size);
-
 
641
	mutex_unlock(&dev_priv->binding_mutex);
-
 
642
 
-
 
643
	/*
-
 
644
	 * Create a fence object and fence the backup buffer.
-
 
645
	 */
-
 
646
 
-
 
647
	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
-
 
648
					  &fence, NULL);
-
 
649
 
-
 
650
	vmw_fence_single_bo(bo, fence);
-
 
651
 
-
 
652
	if (likely(fence != NULL))
-
 
653
		vmw_fence_obj_unreference(&fence);
-
 
654
 
-
 
655
	return 0;
-
 
656
}
-
 
657
 
-
 
658
static int vmw_dx_context_destroy(struct vmw_resource *res)
-
 
659
{
-
 
660
	struct vmw_private *dev_priv = res->dev_priv;
-
 
661
	struct {
-
 
662
		SVGA3dCmdHeader header;
-
 
663
		SVGA3dCmdDXDestroyContext body;
-
 
664
	} *cmd;
-
 
665
 
-
 
666
	if (likely(res->id == -1))
-
 
667
		return 0;
-
 
668
 
-
 
669
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
 
670
	if (unlikely(cmd == NULL)) {
-
 
671
		DRM_ERROR("Failed reserving FIFO space for context "
-
 
672
			  "destruction.\n");
-
 
673
		return -ENOMEM;
-
 
674
	}
-
 
675
 
-
 
676
	cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
-
 
677
	cmd->header.size = sizeof(cmd->body);
-
 
678
	cmd->body.cid = res->id;
-
 
679
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
 
680
	if (dev_priv->query_cid == res->id)
-
 
681
		dev_priv->query_cid_valid = false;
-
 
682
	vmw_resource_release_id(res);
-
 
683
	vmw_fifo_resource_dec(dev_priv);
Line 418... Line 684...
418
 
684
 
419
	return 0;
685
	return 0;
Line 420... Line 686...
420
}
686
}
Line 433... Line 699...
433
{
699
{
434
	struct vmw_user_context *ctx =
700
	struct vmw_user_context *ctx =
435
	    container_of(res, struct vmw_user_context, res);
701
	    container_of(res, struct vmw_user_context, res);
436
	struct vmw_private *dev_priv = res->dev_priv;
702
	struct vmw_private *dev_priv = res->dev_priv;
Line -... Line 703...
-
 
703
 
-
 
704
	if (ctx->cbs)
-
 
705
		vmw_binding_state_free(ctx->cbs);
-
 
706
 
-
 
707
	(void) vmw_context_bind_dx_query(res, NULL);
437
 
708
 
438
//   ttm_base_object_kfree(ctx, base);
709
	ttm_base_object_kfree(ctx, base);
439
	ttm_mem_global_free(vmw_mem_glob(dev_priv),
710
	ttm_mem_global_free(vmw_mem_glob(dev_priv),
440
			    vmw_user_context_size);
711
			    vmw_user_context_size);
Line 441... Line 712...
441
}
712
}
Line 464... Line 735...
464
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
735
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
Line 465... Line 736...
465
 
736
 
466
	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
737
	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
Line 467... Line 738...
467
}
738
}
468
 
739
 
469
int vmw_context_define_ioctl(struct drm_device *dev, void *data,
740
static int vmw_context_define(struct drm_device *dev, void *data,
470
			     struct drm_file *file_priv)
741
			      struct drm_file *file_priv, bool dx)
471
{
742
{
472
	struct vmw_private *dev_priv = vmw_priv(dev);
743
	struct vmw_private *dev_priv = vmw_priv(dev);
473
	struct vmw_user_context *ctx;
744
	struct vmw_user_context *ctx;
474
	struct vmw_resource *res;
745
	struct vmw_resource *res;
475
	struct vmw_resource *tmp;
746
	struct vmw_resource *tmp;
476
	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
747
	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
Line -... Line 748...
-
 
748
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-
 
749
	int ret;
-
 
750
 
-
 
751
	if (!dev_priv->has_dx && dx) {
Line 477... Line 752...
477
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
752
		DRM_ERROR("DX contexts not supported by device.\n");
478
	int ret;
753
		return -EINVAL;
479
 
754
	}
480
 
755
 
Line 515... Line 790...
515
 
790
 
516
	/*
791
	/*
517
	 * From here on, the destructor takes over resource freeing.
792
	 * From here on, the destructor takes over resource freeing.
Line 518... Line 793...
518
	 */
793
	 */
519
 
794
 
520
	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
795
	ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
Line 521... Line 796...
521
	if (unlikely(ret != 0))
796
	if (unlikely(ret != 0))
522
		goto out_unlock;
797
		goto out_unlock;
Line 534... Line 809...
534
out_err:
809
out_err:
535
	vmw_resource_unreference(&res);
810
	vmw_resource_unreference(&res);
536
out_unlock:
811
out_unlock:
537
	ttm_read_unlock(&dev_priv->reservation_sem);
812
	ttm_read_unlock(&dev_priv->reservation_sem);
538
	return ret;
813
	return ret;
539
 
-
 
540
}
814
}
541
#endif
815
#endif
Line 542... Line 816...
542
 
816
 
543
/**
-
 
544
 * vmw_context_scrub_shader - scrub a shader binding from a context.
-
 
545
 *
-
 
546
 * @bi: single binding information.
-
 
547
 * @rebind: Whether to issue a bind instead of scrub command.
-
 
548
 */
-
 
549
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
-
 
550
{
-
 
551
	struct vmw_private *dev_priv = bi->ctx->dev_priv;
-
 
552
	struct {
-
 
553
		SVGA3dCmdHeader header;
-
 
554
		SVGA3dCmdSetShader body;
-
 
555
	} *cmd;
-
 
556
 
-
 
557
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
 
558
	if (unlikely(cmd == NULL)) {
-
 
559
		DRM_ERROR("Failed reserving FIFO space for shader "
-
 
560
			  "unbinding.\n");
-
 
561
		return -ENOMEM;
-
 
562
	}
-
 
563
 
-
 
564
	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
-
 
565
	cmd->header.size = sizeof(cmd->body);
-
 
566
	cmd->body.cid = bi->ctx->id;
-
 
567
	cmd->body.type = bi->i1.shader_type;
-
 
568
	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-
 
569
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
 
570
 
-
 
571
	return 0;
-
 
572
}
-
 
573
 
-
 
574
/**
-
 
575
 * vmw_context_scrub_render_target - scrub a render target binding
-
 
576
 * from a context.
-
 
577
 *
-
 
578
 * @bi: single binding information.
-
 
579
 * @rebind: Whether to issue a bind instead of scrub command.
-
 
580
 */
-
 
581
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
-
 
582
					   bool rebind)
-
 
583
{
-
 
584
	struct vmw_private *dev_priv = bi->ctx->dev_priv;
-
 
585
	struct {
-
 
586
		SVGA3dCmdHeader header;
-
 
587
		SVGA3dCmdSetRenderTarget body;
-
 
588
	} *cmd;
-
 
589
 
-
 
590
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
 
591
	if (unlikely(cmd == NULL)) {
-
 
592
		DRM_ERROR("Failed reserving FIFO space for render target "
-
 
593
			  "unbinding.\n");
-
 
594
		return -ENOMEM;
-
 
595
	}
-
 
596
 
-
 
597
	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
-
 
598
	cmd->header.size = sizeof(cmd->body);
-
 
599
	cmd->body.cid = bi->ctx->id;
-
 
600
	cmd->body.type = bi->i1.rt_type;
-
 
601
	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-
 
602
	cmd->body.target.face = 0;
-
 
603
	cmd->body.target.mipmap = 0;
-
 
604
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
 
605
 
-
 
606
	return 0;
-
 
607
}
-
 
608
 
-
 
609
/**
-
 
610
 * vmw_context_scrub_texture - scrub a texture binding from a context.
-
 
611
 *
-
 
612
 * @bi: single binding information.
-
 
613
 * @rebind: Whether to issue a bind instead of scrub command.
-
 
614
 *
-
 
615
 * TODO: Possibly complement this function with a function that takes
-
 
616
 * a list of texture bindings and combines them to a single command.
-
 
617
 */
-
 
618
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
-
 
619
				     bool rebind)
-
 
620
{
-
 
621
	struct vmw_private *dev_priv = bi->ctx->dev_priv;
-
 
622
	struct {
-
 
623
		SVGA3dCmdHeader header;
-
 
624
		struct {
-
 
625
			SVGA3dCmdSetTextureState c;
-
 
626
			SVGA3dTextureState s1;
-
 
627
		} body;
-
 
628
	} *cmd;
-
 
629
 
-
 
630
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
 
631
	if (unlikely(cmd == NULL)) {
-
 
632
		DRM_ERROR("Failed reserving FIFO space for texture "
-
 
633
			  "unbinding.\n");
-
 
634
		return -ENOMEM;
-
 
635
	}
-
 
636
 
-
 
637
 
-
 
638
	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
-
 
639
	cmd->header.size = sizeof(cmd->body);
-
 
640
	cmd->body.c.cid = bi->ctx->id;
-
 
641
	cmd->body.s1.stage = bi->i1.texture_stage;
-
 
642
	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
-
 
643
	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-
 
644
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
 
645
 
-
 
646
	return 0;
-
 
647
}
-
 
648
 
-
 
649
/**
-
 
650
 * vmw_context_binding_drop: Stop tracking a context binding
-
 
651
 *
-
 
652
 * @cb: Pointer to binding tracker storage.
-
 
653
 *
-
 
654
 * Stops tracking a context binding, and re-initializes its storage.
-
 
655
 * Typically used when the context binding is replaced with a binding to
-
 
656
 * another (or the same, for that matter) resource.
-
 
657
 */
-
 
658
static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
-
 
659
{
-
 
660
	list_del(&cb->ctx_list);
-
 
661
	if (!list_empty(&cb->res_list))
-
 
662
		list_del(&cb->res_list);
-
 
663
	cb->bi.ctx = NULL;
-
 
664
}
-
 
665
 
-
 
666
/**
817
/**
667
 * vmw_context_binding_add: Start tracking a context binding
-
 
668
 *
-
 
669
 * @cbs: Pointer to the context binding state tracker.
-
 
670
 * @bi: Information about the binding to track.
-
 
671
 *
-
 
672
 * Performs basic checks on the binding to make sure arguments are within
-
 
673
 * bounds and then starts tracking the binding in the context binding
-
 
674
 * state structure @cbs.
-
 
675
 */
-
 
676
int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
-
 
677
			    const struct vmw_ctx_bindinfo *bi)
-
 
678
{
-
 
679
	struct vmw_ctx_binding *loc;
-
 
680
 
-
 
681
	switch (bi->bt) {
-
 
682
	case vmw_ctx_binding_rt:
-
 
683
		if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
-
 
684
			DRM_ERROR("Illegal render target type %u.\n",
-
 
685
				  (unsigned) bi->i1.rt_type);
-
 
686
			return -EINVAL;
-
 
687
		}
-
 
688
		loc = &cbs->render_targets[bi->i1.rt_type];
-
 
689
		break;
-
 
690
	case vmw_ctx_binding_tex:
-
 
691
		if (unlikely((unsigned)bi->i1.texture_stage >=
-
 
692
			     SVGA3D_NUM_TEXTURE_UNITS)) {
-
 
693
			DRM_ERROR("Illegal texture/sampler unit %u.\n",
-
 
694
				  (unsigned) bi->i1.texture_stage);
-
 
695
			return -EINVAL;
-
 
696
		}
-
 
697
		loc = &cbs->texture_units[bi->i1.texture_stage];
-
 
698
		break;
-
 
699
	case vmw_ctx_binding_shader:
-
 
700
		if (unlikely((unsigned)bi->i1.shader_type >=
-
 
701
			     SVGA3D_SHADERTYPE_MAX)) {
-
 
702
			DRM_ERROR("Illegal shader type %u.\n",
-
 
703
				  (unsigned) bi->i1.shader_type);
-
 
704
			return -EINVAL;
-
 
705
		}
-
 
706
		loc = &cbs->shaders[bi->i1.shader_type];
-
 
707
		break;
-
 
708
	default:
-
 
709
		BUG();
-
 
710
	}
-
 
711
 
-
 
712
	if (loc->bi.ctx != NULL)
-
 
713
		vmw_context_binding_drop(loc);
-
 
714
 
-
 
715
	loc->bi = *bi;
-
 
716
	loc->bi.scrubbed = false;
-
 
717
	list_add_tail(&loc->ctx_list, &cbs->list);
-
 
718
	INIT_LIST_HEAD(&loc->res_list);
-
 
719
 
-
 
720
	return 0;
-
 
721
}
-
 
722
 
-
 
723
/**
-
 
724
 * vmw_context_binding_transfer: Transfer a context binding tracking entry.
-
 
725
 *
-
 
726
 * @cbs: Pointer to the persistent context binding state tracker.
-
 
727
 * @bi: Information about the binding to track.
-
 
728
 *
-
 
729
 */
-
 
730
static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
-
 
731
					 const struct vmw_ctx_bindinfo *bi)
-
 
732
{
-
 
733
	struct vmw_ctx_binding *loc;
-
 
734
 
-
 
735
	switch (bi->bt) {
-
 
736
	case vmw_ctx_binding_rt:
-
 
737
		loc = &cbs->render_targets[bi->i1.rt_type];
-
 
738
		break;
-
 
739
	case vmw_ctx_binding_tex:
-
 
740
		loc = &cbs->texture_units[bi->i1.texture_stage];
-
 
741
		break;
-
 
742
	case vmw_ctx_binding_shader:
-
 
743
		loc = &cbs->shaders[bi->i1.shader_type];
-
 
744
		break;
-
 
745
	default:
-
 
746
		BUG();
-
 
747
	}
-
 
748
 
-
 
749
	if (loc->bi.ctx != NULL)
-
 
750
		vmw_context_binding_drop(loc);
-
 
751
 
-
 
752
	if (bi->res != NULL) {
-
 
753
	loc->bi = *bi;
-
 
754
	list_add_tail(&loc->ctx_list, &cbs->list);
-
 
755
		list_add_tail(&loc->res_list, &bi->res->binding_head);
-
 
756
	}
-
 
757
}
-
 
758
 
-
 
759
/**
-
 
760
 * vmw_context_binding_kill - Kill a binding on the device
-
 
761
 * and stop tracking it.
-
 
762
 *
-
 
763
 * @cb: Pointer to binding tracker storage.
-
 
764
 *
-
 
765
 * Emits FIFO commands to scrub a binding represented by @cb.
-
 
766
 * Then stops tracking the binding and re-initializes its storage.
-
 
767
 */
-
 
768
static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
-
 
769
{
-
 
770
	if (!cb->bi.scrubbed) {
-
 
771
		(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
-
 
772
		cb->bi.scrubbed = true;
-
 
773
	}
-
 
774
	vmw_context_binding_drop(cb);
-
 
775
}
-
 
776
 
-
 
777
/**
-
 
778
 * vmw_context_binding_state_kill - Kill all bindings associated with a
-
 
779
 * struct vmw_ctx_binding state structure, and re-initialize the structure.
818
 * vmw_context_binding_list - Return a list of context bindings
780
 *
819
 *
781
 * @cbs: Pointer to the context binding state tracker.
820
 * @ctx: The context resource
782
 *
821
 *
783
 * Emits commands to scrub all bindings associated with the
822
 * Returns the current list of bindings of the given context. Note that
784
 * context binding state tracker. Then re-initializes the whole structure.
823
 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
785
 */
824
 */
786
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
825
struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
787
{
826
{
-
 
827
	struct vmw_user_context *uctx =
Line 788... Line -...
788
	struct vmw_ctx_binding *entry, *next;
-
 
789
 
828
		container_of(ctx, struct vmw_user_context, res);
790
	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
829
 
Line 791... Line -...
791
		vmw_context_binding_kill(entry);
-
 
792
}
-
 
793
 
-
 
794
/**
-
 
795
 * vmw_context_binding_state_scrub - Scrub all bindings associated with a
-
 
796
 * struct vmw_ctx_binding state structure.
-
 
797
 *
-
 
798
 * @cbs: Pointer to the context binding state tracker.
-
 
799
 *
-
 
800
 * Emits commands to scrub all bindings associated with the
830
	return vmw_binding_state_list(uctx->cbs);
801
 * context binding state tracker.
831
}
802
 */
-
 
803
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
-
 
804
{
-
 
805
	struct vmw_ctx_binding *entry;
-
 
806
 
832
 
807
	list_for_each_entry(entry, &cbs->list, ctx_list) {
-
 
808
		if (!entry->bi.scrubbed) {
-
 
809
			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
-
 
810
			entry->bi.scrubbed = true;
833
struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
Line 811... Line -...
811
		}
-
 
812
	}
-
 
813
}
-
 
814
 
-
 
815
/**
-
 
816
 * vmw_context_binding_res_list_kill - Kill all bindings on a
-
 
817
 * resource binding list
834
{
818
 *
835
	return container_of(ctx, struct vmw_user_context, res)->man;
819
 * @head: list head of resource binding list
-
 
820
 *
-
 
821
 * Kills all bindings associated with a specific resource. Typically
836
}
822
 * called before the resource is destroyed.
837
 
-
 
838
struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
Line -... Line 839...
-
 
839
					 SVGACOTableType cotable_type)
823
 */
840
{
824
void vmw_context_binding_res_list_kill(struct list_head *head)
841
	if (cotable_type >= SVGA_COTABLE_DX10_MAX)
825
{
842
		return ERR_PTR(-EINVAL);
Line 826... Line 843...
826
	struct vmw_ctx_binding *entry, *next;
843
 
827
 
844
	return vmw_resource_reference
828
	list_for_each_entry_safe(entry, next, head, res_list)
845
		(container_of(ctx, struct vmw_user_context, res)->
829
		vmw_context_binding_kill(entry);
846
		 cotables[cotable_type]);
830
}
847
}
831
 
848
 
832
/**
849
/**
833
 * vmw_context_binding_res_list_scrub - Scrub all bindings on a
850
 * vmw_context_binding_state -
834
 * resource binding list
851
 * Return a pointer to a context binding state structure
-
 
852
 *
835
 *
853
 * @ctx: The context resource
836
 * @head: list head of resource binding list
854
 *
837
 *
-
 
838
 * Scrub all bindings associated with a specific resource. Typically
-
 
839
 * called before the resource is evicted.
-
 
840
 */
-
 
841
void vmw_context_binding_res_list_scrub(struct list_head *head)
855
 * Returns the current state of bindings of the given context. Note that
842
{
-
 
843
	struct vmw_ctx_binding *entry;
-
 
844
 
-
 
845
	list_for_each_entry(entry, head, res_list) {
856
 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
Line 846... Line 857...
846
		if (!entry->bi.scrubbed) {
857
 */
-
 
858
struct vmw_ctx_binding_state *
-
 
859
vmw_context_binding_state(struct vmw_resource *ctx)
847
			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
860
{
-
 
861
	return container_of(ctx, struct vmw_user_context, res)->cbs;
848
			entry->bi.scrubbed = true;
862
}
849
		}
863
 
850
	}
864
/**
851
}
865
 * vmw_context_bind_dx_query -
852
 
866
 * Sets query MOB for the context.  If @mob is NULL, then this function will
853
/**
867
 * remove the association between the MOB and the context.  This function
854
 * vmw_context_binding_state_transfer - Commit staged binding info
868
 * assumes the binding_mutex is held.
855
 *
869
 *
856
 * @ctx: Pointer to context to commit the staged binding info to.
870
 * @ctx_res: The context resource
857
 * @from: Staged binding info built during execbuf.
871
 * @mob: a reference to the query MOB
858
 *
872
 *
859
 * Transfers binding info from a temporary structure to the persistent
873
 * Returns -EINVAL if a MOB has already been set and does not match the one
860
 * structure in the context. This can be done once commands
-
 
Line -... Line 874...
-
 
874
 * specified in the parameter.  0 otherwise.
-
 
875
 */
861
 */
876
int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
862
void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
877
			      struct vmw_dma_buffer *mob)
-
 
878
{
863
					struct vmw_ctx_binding_state *from)
879
	struct vmw_user_context *uctx =
Line 864... Line -...
864
{
-
 
865
	struct vmw_user_context *uctx =
-
 
866
		container_of(ctx, struct vmw_user_context, res);
-
 
867
	struct vmw_ctx_binding *entry, *next;
-
 
868
 
-
 
869
	list_for_each_entry_safe(entry, next, &from->list, ctx_list)
-
 
870
		vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
880
		container_of(ctx_res, struct vmw_user_context, res);
871
}
-
 
872
 
-
 
873
/**
881
 
874
 * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
-
 
875
 *
-
 
876
 * @ctx: The context resource
-
 
877
 *
-
 
878
 * Walks through the context binding list and rebinds all scrubbed
-
 
879
 * resources.
-
 
880
 */
-
 
881
int vmw_context_rebind_all(struct vmw_resource *ctx)
-
 
882
{
-
 
Line 883... Line 882...
883
	struct vmw_ctx_binding *entry;
882
	if (mob == NULL) {
884
	struct vmw_user_context *uctx =
883
		if (uctx->dx_query_mob) {
885
		container_of(ctx, struct vmw_user_context, res);
884
			uctx->dx_query_mob->dx_query_ctx = NULL;
Line 886... Line -...
886
	struct vmw_ctx_binding_state *cbs = &uctx->cbs;
-
 
887
	int ret;
885
			vmw_dmabuf_unreference(&uctx->dx_query_mob);
888
 
-
 
Line -... Line 886...
-
 
886
			uctx->dx_query_mob = NULL;
889
	list_for_each_entry(entry, &cbs->list, ctx_list) {
887
		}
890
		if (likely(!entry->bi.scrubbed))
-
 
Line 891... Line 888...
891
			continue;
888
 
892
 
889
		return 0;
Line 893... Line 890...
893
		if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
890
	}
894
			    SVGA3D_INVALID_ID))
891
 
895
			continue;
-
 
896
 
-
 
897
		ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
892
	/* Can only have one MOB per context for queries */
898
		if (unlikely(ret != 0))
893
	if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
899
			return ret;
-
 
900
 
894
		return -EINVAL;
-
 
895
 
901
		entry->bi.scrubbed = false;
896
	mob->dx_query_ctx  = ctx_res;
902
	}
897
 
-
 
898
	if (!uctx->dx_query_mob)
903
 
899
		uctx->dx_query_mob = vmw_dmabuf_reference(mob);
904
	return 0;
-
 
Line 905... Line -...
905
}
-
 
906
 
-
 
907
/**
900
 
908
 * vmw_context_binding_list - Return a list of context bindings
901
	return 0;