Subversion Repositories Kolibri OS

Rev

Rev 4569 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
3
 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
 
28
#include "vmwgfx_drv.h"
29
#include "vmwgfx_resource_priv.h"
30
#include "ttm/ttm_placement.h"
31
 
32
struct vmw_user_context {
33
	struct ttm_base_object base;
34
	struct vmw_resource res;
4569 Serge 35
	struct vmw_ctx_binding_state cbs;
5078 serge 36
	struct vmw_cmdbuf_res_manager *man;
4075 Serge 37
};
38
 
4569 Serge 39
 
40
 
5078 serge 41
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
4569 Serge 42
 
4075 Serge 43
static void vmw_user_context_free(struct vmw_resource *res);
44
static struct vmw_resource *
45
vmw_user_context_base_to_res(struct ttm_base_object *base);
46
 
4569 Serge 47
static int vmw_gb_context_create(struct vmw_resource *res);
48
static int vmw_gb_context_bind(struct vmw_resource *res,
49
			       struct ttm_validate_buffer *val_buf);
50
static int vmw_gb_context_unbind(struct vmw_resource *res,
51
				 bool readback,
52
				 struct ttm_validate_buffer *val_buf);
53
static int vmw_gb_context_destroy(struct vmw_resource *res);
5078 serge 54
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
55
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
56
					   bool rebind);
57
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
58
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
4569 Serge 59
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
4075 Serge 60
static uint64_t vmw_user_context_size;
61
 
62
static const struct vmw_user_resource_conv user_context_conv = {
63
	.object_type = VMW_RES_CONTEXT,
64
	.base_obj_to_res = vmw_user_context_base_to_res,
65
	.res_free = vmw_user_context_free
66
};
67
 
68
const struct vmw_user_resource_conv *user_context_converter =
69
	&user_context_conv;
70
 
71
 
72
static const struct vmw_res_func vmw_legacy_context_func = {
73
	.res_type = vmw_res_context,
74
	.needs_backup = false,
75
	.may_evict = false,
76
	.type_name = "legacy contexts",
77
	.backup_placement = NULL,
78
	.create = NULL,
79
	.destroy = NULL,
80
	.bind = NULL,
81
	.unbind = NULL
82
};
83
 
4569 Serge 84
static const struct vmw_res_func vmw_gb_context_func = {
85
	.res_type = vmw_res_context,
86
	.needs_backup = true,
87
	.may_evict = true,
88
	.type_name = "guest backed contexts",
89
	.backup_placement = &vmw_mob_placement,
90
	.create = vmw_gb_context_create,
91
	.destroy = vmw_gb_context_destroy,
92
	.bind = vmw_gb_context_bind,
93
	.unbind = vmw_gb_context_unbind
94
};
95
 
96
static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
97
	[vmw_ctx_binding_shader] = vmw_context_scrub_shader,
98
	[vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
99
	[vmw_ctx_binding_tex] = vmw_context_scrub_texture };
100
 
4075 Serge 101
/**
102
 * Context management:
103
 */
104
 
105
static void vmw_hw_context_destroy(struct vmw_resource *res)
106
{
5078 serge 107
	struct vmw_user_context *uctx =
108
		container_of(res, struct vmw_user_context, res);
4075 Serge 109
	struct vmw_private *dev_priv = res->dev_priv;
110
	struct {
111
		SVGA3dCmdHeader header;
112
		SVGA3dCmdDestroyContext body;
113
	} *cmd;
114
 
115
 
4569 Serge 116
	if (res->func->destroy == vmw_gb_context_destroy) {
117
		mutex_lock(&dev_priv->cmdbuf_mutex);
5078 serge 118
		vmw_cmdbuf_res_man_destroy(uctx->man);
119
		mutex_lock(&dev_priv->binding_mutex);
120
		(void) vmw_context_binding_state_kill(&uctx->cbs);
4569 Serge 121
		(void) vmw_gb_context_destroy(res);
5078 serge 122
		mutex_unlock(&dev_priv->binding_mutex);
4569 Serge 123
		if (dev_priv->pinned_bo != NULL &&
124
		    !dev_priv->query_cid_valid)
125
			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
126
		mutex_unlock(&dev_priv->cmdbuf_mutex);
127
		return;
128
	}
129
 
4075 Serge 130
	vmw_execbuf_release_pinned_bo(dev_priv);
131
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
132
	if (unlikely(cmd == NULL)) {
133
		DRM_ERROR("Failed reserving FIFO space for surface "
134
			  "destruction.\n");
135
		return;
136
	}
137
 
138
	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
139
	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
140
	cmd->body.cid = cpu_to_le32(res->id);
141
 
142
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
143
	vmw_3d_resource_dec(dev_priv, false);
144
}
145
 
4569 Serge 146
static int vmw_gb_context_init(struct vmw_private *dev_priv,
147
			       struct vmw_resource *res,
148
			       void (*res_free) (struct vmw_resource *res))
149
{
150
	int ret;
151
	struct vmw_user_context *uctx =
152
		container_of(res, struct vmw_user_context, res);
153
 
154
	ret = vmw_resource_init(dev_priv, res, true,
155
				res_free, &vmw_gb_context_func);
156
	res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
5078 serge 157
	if (unlikely(ret != 0))
158
		goto out_err;
4569 Serge 159
 
5078 serge 160
	if (dev_priv->has_mob) {
161
		uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
162
		if (unlikely(IS_ERR(uctx->man))) {
163
			ret = PTR_ERR(uctx->man);
164
			uctx->man = NULL;
165
			goto out_err;
166
		}
4569 Serge 167
	}
168
 
169
	memset(&uctx->cbs, 0, sizeof(uctx->cbs));
170
	INIT_LIST_HEAD(&uctx->cbs.list);
171
 
172
	vmw_resource_activate(res, vmw_hw_context_destroy);
173
	return 0;
5078 serge 174
 
175
out_err:
176
	if (res_free)
177
		res_free(res);
178
	else
179
		kfree(res);
180
	return ret;
4569 Serge 181
}
182
 
4075 Serge 183
static int vmw_context_init(struct vmw_private *dev_priv,
184
			    struct vmw_resource *res,
185
			    void (*res_free) (struct vmw_resource *res))
186
{
187
	int ret;
188
 
189
	struct {
190
		SVGA3dCmdHeader header;
191
		SVGA3dCmdDefineContext body;
192
	} *cmd;
193
 
4569 Serge 194
	if (dev_priv->has_mob)
195
		return vmw_gb_context_init(dev_priv, res, res_free);
196
 
4075 Serge 197
	ret = vmw_resource_init(dev_priv, res, false,
198
				res_free, &vmw_legacy_context_func);
199
 
200
	if (unlikely(ret != 0)) {
201
		DRM_ERROR("Failed to allocate a resource id.\n");
202
		goto out_early;
203
	}
204
 
205
	if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
206
		DRM_ERROR("Out of hw context ids.\n");
207
		vmw_resource_unreference(&res);
208
		return -ENOMEM;
209
	}
210
 
211
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
212
	if (unlikely(cmd == NULL)) {
213
		DRM_ERROR("Fifo reserve failed.\n");
214
		vmw_resource_unreference(&res);
215
		return -ENOMEM;
216
	}
217
 
218
	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
219
	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
220
	cmd->body.cid = cpu_to_le32(res->id);
221
 
222
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
223
	(void) vmw_3d_resource_inc(dev_priv, false);
224
	vmw_resource_activate(res, vmw_hw_context_destroy);
225
	return 0;
226
 
227
out_early:
228
	if (res_free == NULL)
229
		kfree(res);
230
	else
231
		res_free(res);
232
	return ret;
233
}
234
 
235
struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
236
{
237
	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
238
	int ret;
239
 
240
	if (unlikely(res == NULL))
241
		return NULL;
242
 
243
	ret = vmw_context_init(dev_priv, res, NULL);
244
 
245
	return (ret == 0) ? res : NULL;
246
}
247
 
4569 Serge 248
 
249
static int vmw_gb_context_create(struct vmw_resource *res)
250
{
251
	struct vmw_private *dev_priv = res->dev_priv;
252
	int ret;
253
	struct {
254
		SVGA3dCmdHeader header;
255
		SVGA3dCmdDefineGBContext body;
256
	} *cmd;
257
 
258
	if (likely(res->id != -1))
259
		return 0;
260
 
261
	ret = vmw_resource_alloc_id(res);
262
	if (unlikely(ret != 0)) {
263
		DRM_ERROR("Failed to allocate a context id.\n");
264
		goto out_no_id;
265
	}
266
 
267
	if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
268
		ret = -EBUSY;
269
		goto out_no_fifo;
270
	}
271
 
272
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
273
	if (unlikely(cmd == NULL)) {
274
		DRM_ERROR("Failed reserving FIFO space for context "
275
			  "creation.\n");
276
		ret = -ENOMEM;
277
		goto out_no_fifo;
278
	}
279
 
280
	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
281
	cmd->header.size = sizeof(cmd->body);
282
	cmd->body.cid = res->id;
283
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
284
	(void) vmw_3d_resource_inc(dev_priv, false);
285
 
286
	return 0;
287
 
288
out_no_fifo:
289
	vmw_resource_release_id(res);
290
out_no_id:
291
	return ret;
292
}
293
 
294
static int vmw_gb_context_bind(struct vmw_resource *res,
295
			       struct ttm_validate_buffer *val_buf)
296
{
297
	struct vmw_private *dev_priv = res->dev_priv;
298
	struct {
299
		SVGA3dCmdHeader header;
300
		SVGA3dCmdBindGBContext body;
301
	} *cmd;
302
	struct ttm_buffer_object *bo = val_buf->bo;
303
 
304
	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
305
 
306
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
307
	if (unlikely(cmd == NULL)) {
308
		DRM_ERROR("Failed reserving FIFO space for context "
309
			  "binding.\n");
310
		return -ENOMEM;
311
	}
312
 
313
	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
314
	cmd->header.size = sizeof(cmd->body);
315
	cmd->body.cid = res->id;
316
	cmd->body.mobid = bo->mem.start;
317
	cmd->body.validContents = res->backup_dirty;
318
	res->backup_dirty = false;
319
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
320
 
321
	return 0;
322
}
323
 
324
static int vmw_gb_context_unbind(struct vmw_resource *res,
325
				 bool readback,
326
				 struct ttm_validate_buffer *val_buf)
327
{
328
	struct vmw_private *dev_priv = res->dev_priv;
329
	struct ttm_buffer_object *bo = val_buf->bo;
330
	struct vmw_fence_obj *fence;
331
	struct vmw_user_context *uctx =
332
		container_of(res, struct vmw_user_context, res);
333
 
334
	struct {
335
		SVGA3dCmdHeader header;
336
		SVGA3dCmdReadbackGBContext body;
337
	} *cmd1;
338
	struct {
339
		SVGA3dCmdHeader header;
340
		SVGA3dCmdBindGBContext body;
341
	} *cmd2;
342
	uint32_t submit_size;
343
	uint8_t *cmd;
344
 
345
 
346
	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
347
 
348
	mutex_lock(&dev_priv->binding_mutex);
5078 serge 349
	vmw_context_binding_state_scrub(&uctx->cbs);
4569 Serge 350
 
351
	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
352
 
353
	cmd = vmw_fifo_reserve(dev_priv, submit_size);
354
	if (unlikely(cmd == NULL)) {
355
		DRM_ERROR("Failed reserving FIFO space for context "
356
			  "unbinding.\n");
357
		mutex_unlock(&dev_priv->binding_mutex);
358
		return -ENOMEM;
359
	}
360
 
361
	cmd2 = (void *) cmd;
362
	if (readback) {
363
		cmd1 = (void *) cmd;
364
		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
365
		cmd1->header.size = sizeof(cmd1->body);
366
		cmd1->body.cid = res->id;
367
		cmd2 = (void *) (&cmd1[1]);
368
	}
369
	cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
370
	cmd2->header.size = sizeof(cmd2->body);
371
	cmd2->body.cid = res->id;
372
	cmd2->body.mobid = SVGA3D_INVALID_ID;
373
 
374
	vmw_fifo_commit(dev_priv, submit_size);
375
	mutex_unlock(&dev_priv->binding_mutex);
376
 
377
	/*
378
	 * Create a fence object and fence the backup buffer.
379
	 */
380
 
381
	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
382
					  &fence, NULL);
383
 
384
	vmw_fence_single_bo(bo, fence);
385
 
386
	if (likely(fence != NULL))
387
		vmw_fence_obj_unreference(&fence);
388
 
389
	return 0;
390
}
391
 
392
static int vmw_gb_context_destroy(struct vmw_resource *res)
393
{
394
	struct vmw_private *dev_priv = res->dev_priv;
395
	struct {
396
		SVGA3dCmdHeader header;
397
		SVGA3dCmdDestroyGBContext body;
398
	} *cmd;
399
 
400
	if (likely(res->id == -1))
401
		return 0;
402
 
403
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
404
	if (unlikely(cmd == NULL)) {
405
		DRM_ERROR("Failed reserving FIFO space for context "
406
			  "destruction.\n");
407
		return -ENOMEM;
408
	}
409
 
410
	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
411
	cmd->header.size = sizeof(cmd->body);
412
	cmd->body.cid = res->id;
413
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
414
	if (dev_priv->query_cid == res->id)
415
		dev_priv->query_cid_valid = false;
416
	vmw_resource_release_id(res);
417
	vmw_3d_resource_dec(dev_priv, false);
418
 
419
	return 0;
420
}
421
 
4075 Serge 422
/**
423
 * User-space context management:
424
 */
425
 
426
static struct vmw_resource *
427
vmw_user_context_base_to_res(struct ttm_base_object *base)
428
{
429
	return &(container_of(base, struct vmw_user_context, base)->res);
430
}
431
 
432
static void vmw_user_context_free(struct vmw_resource *res)
433
{
434
	struct vmw_user_context *ctx =
435
	    container_of(res, struct vmw_user_context, res);
436
	struct vmw_private *dev_priv = res->dev_priv;
437
 
438
//   ttm_base_object_kfree(ctx, base);
439
	ttm_mem_global_free(vmw_mem_glob(dev_priv),
440
			    vmw_user_context_size);
441
}
442
 
443
/**
444
 * This function is called when user space has no more references on the
445
 * base object. It releases the base-object's reference on the resource object.
446
 */
447
 
448
static void vmw_user_context_base_release(struct ttm_base_object **p_base)
449
{
450
	struct ttm_base_object *base = *p_base;
451
	struct vmw_user_context *ctx =
452
	    container_of(base, struct vmw_user_context, base);
453
	struct vmw_resource *res = &ctx->res;
454
 
455
	*p_base = NULL;
456
	vmw_resource_unreference(&res);
457
}
458
 
459
#if 0
460
int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
461
			      struct drm_file *file_priv)
462
{
463
	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
464
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
465
 
466
	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
467
}
468
 
469
int vmw_context_define_ioctl(struct drm_device *dev, void *data,
470
			     struct drm_file *file_priv)
471
{
472
	struct vmw_private *dev_priv = vmw_priv(dev);
473
	struct vmw_user_context *ctx;
474
	struct vmw_resource *res;
475
	struct vmw_resource *tmp;
476
	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
477
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
478
	int ret;
479
 
480
 
481
	/*
482
	 * Approximate idr memory usage with 128 bytes. It will be limited
483
	 * by maximum number_of contexts anyway.
484
	 */
485
 
486
	if (unlikely(vmw_user_context_size == 0))
5078 serge 487
		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
488
		  ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
4075 Serge 489
 
5078 serge 490
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4075 Serge 491
	if (unlikely(ret != 0))
492
		return ret;
493
 
494
	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
495
				   vmw_user_context_size,
496
				   false, true);
497
	if (unlikely(ret != 0)) {
498
		if (ret != -ERESTARTSYS)
499
			DRM_ERROR("Out of graphics memory for context"
500
				  " creation.\n");
501
		goto out_unlock;
502
	}
503
 
504
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
505
	if (unlikely(ctx == NULL)) {
506
		ttm_mem_global_free(vmw_mem_glob(dev_priv),
507
				    vmw_user_context_size);
508
		ret = -ENOMEM;
509
		goto out_unlock;
510
	}
511
 
512
	res = &ctx->res;
513
	ctx->base.shareable = false;
514
	ctx->base.tfile = NULL;
515
 
516
	/*
517
	 * From here on, the destructor takes over resource freeing.
518
	 */
519
 
520
	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
521
	if (unlikely(ret != 0))
522
		goto out_unlock;
523
 
524
	tmp = vmw_resource_reference(&ctx->res);
525
	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
526
				   &vmw_user_context_base_release, NULL);
527
 
528
	if (unlikely(ret != 0)) {
529
		vmw_resource_unreference(&tmp);
530
		goto out_err;
531
	}
532
 
533
	arg->cid = ctx->base.hash.key;
534
out_err:
535
	vmw_resource_unreference(&res);
536
out_unlock:
5078 serge 537
	ttm_read_unlock(&dev_priv->reservation_sem);
4075 Serge 538
	return ret;
539
 
540
}
541
#endif
4569 Serge 542
 
543
/**
544
 * vmw_context_scrub_shader - scrub a shader binding from a context.
545
 *
546
 * @bi: single binding information.
5078 serge 547
 * @rebind: Whether to issue a bind instead of scrub command.
4569 Serge 548
 */
5078 serge 549
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
4569 Serge 550
{
551
	struct vmw_private *dev_priv = bi->ctx->dev_priv;
552
	struct {
553
		SVGA3dCmdHeader header;
554
		SVGA3dCmdSetShader body;
555
	} *cmd;
556
 
557
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
558
	if (unlikely(cmd == NULL)) {
559
		DRM_ERROR("Failed reserving FIFO space for shader "
560
			  "unbinding.\n");
561
		return -ENOMEM;
562
	}
563
 
564
	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
565
	cmd->header.size = sizeof(cmd->body);
566
	cmd->body.cid = bi->ctx->id;
567
	cmd->body.type = bi->i1.shader_type;
5078 serge 568
	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
4569 Serge 569
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
570
 
571
	return 0;
572
}
573
 
574
/**
575
 * vmw_context_scrub_render_target - scrub a render target binding
576
 * from a context.
577
 *
578
 * @bi: single binding information.
5078 serge 579
 * @rebind: Whether to issue a bind instead of scrub command.
4569 Serge 580
 */
5078 serge 581
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
582
					   bool rebind)
4569 Serge 583
{
584
	struct vmw_private *dev_priv = bi->ctx->dev_priv;
585
	struct {
586
		SVGA3dCmdHeader header;
587
		SVGA3dCmdSetRenderTarget body;
588
	} *cmd;
589
 
590
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
591
	if (unlikely(cmd == NULL)) {
592
		DRM_ERROR("Failed reserving FIFO space for render target "
593
			  "unbinding.\n");
594
		return -ENOMEM;
595
	}
596
 
597
	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
598
	cmd->header.size = sizeof(cmd->body);
599
	cmd->body.cid = bi->ctx->id;
600
	cmd->body.type = bi->i1.rt_type;
5078 serge 601
	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
4569 Serge 602
	cmd->body.target.face = 0;
603
	cmd->body.target.mipmap = 0;
604
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
605
 
606
	return 0;
607
}
608
 
609
/**
610
 * vmw_context_scrub_texture - scrub a texture binding from a context.
611
 *
612
 * @bi: single binding information.
5078 serge 613
 * @rebind: Whether to issue a bind instead of scrub command.
4569 Serge 614
 *
615
 * TODO: Possibly complement this function with a function that takes
616
 * a list of texture bindings and combines them to a single command.
617
 */
5078 serge 618
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
619
				     bool rebind)
4569 Serge 620
{
621
	struct vmw_private *dev_priv = bi->ctx->dev_priv;
622
	struct {
623
		SVGA3dCmdHeader header;
624
		struct {
625
			SVGA3dCmdSetTextureState c;
626
			SVGA3dTextureState s1;
627
		} body;
628
	} *cmd;
629
 
630
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
631
	if (unlikely(cmd == NULL)) {
632
		DRM_ERROR("Failed reserving FIFO space for texture "
633
			  "unbinding.\n");
634
		return -ENOMEM;
635
	}
636
 
637
 
638
	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
639
	cmd->header.size = sizeof(cmd->body);
640
	cmd->body.c.cid = bi->ctx->id;
641
	cmd->body.s1.stage = bi->i1.texture_stage;
642
	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
5078 serge 643
	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
4569 Serge 644
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
645
 
646
	return 0;
647
}
648
 
649
/**
650
 * vmw_context_binding_drop: Stop tracking a context binding
651
 *
652
 * @cb: Pointer to binding tracker storage.
653
 *
654
 * Stops tracking a context binding, and re-initializes its storage.
655
 * Typically used when the context binding is replaced with a binding to
656
 * another (or the same, for that matter) resource.
657
 */
658
static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
659
{
660
	list_del(&cb->ctx_list);
661
	if (!list_empty(&cb->res_list))
662
		list_del(&cb->res_list);
663
	cb->bi.ctx = NULL;
664
}
665
 
666
/**
667
 * vmw_context_binding_add: Start tracking a context binding
668
 *
669
 * @cbs: Pointer to the context binding state tracker.
670
 * @bi: Information about the binding to track.
671
 *
672
 * Performs basic checks on the binding to make sure arguments are within
673
 * bounds and then starts tracking the binding in the context binding
674
 * state structure @cbs.
675
 */
676
int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
677
			    const struct vmw_ctx_bindinfo *bi)
678
{
679
	struct vmw_ctx_binding *loc;
680
 
681
	switch (bi->bt) {
682
	case vmw_ctx_binding_rt:
683
		if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
684
			DRM_ERROR("Illegal render target type %u.\n",
685
				  (unsigned) bi->i1.rt_type);
686
			return -EINVAL;
687
		}
688
		loc = &cbs->render_targets[bi->i1.rt_type];
689
		break;
690
	case vmw_ctx_binding_tex:
691
		if (unlikely((unsigned)bi->i1.texture_stage >=
692
			     SVGA3D_NUM_TEXTURE_UNITS)) {
693
			DRM_ERROR("Illegal texture/sampler unit %u.\n",
694
				  (unsigned) bi->i1.texture_stage);
695
			return -EINVAL;
696
		}
697
		loc = &cbs->texture_units[bi->i1.texture_stage];
698
		break;
699
	case vmw_ctx_binding_shader:
700
		if (unlikely((unsigned)bi->i1.shader_type >=
701
			     SVGA3D_SHADERTYPE_MAX)) {
702
			DRM_ERROR("Illegal shader type %u.\n",
703
				  (unsigned) bi->i1.shader_type);
704
			return -EINVAL;
705
		}
706
		loc = &cbs->shaders[bi->i1.shader_type];
707
		break;
708
	default:
709
		BUG();
710
	}
711
 
712
	if (loc->bi.ctx != NULL)
713
		vmw_context_binding_drop(loc);
714
 
715
	loc->bi = *bi;
5078 serge 716
	loc->bi.scrubbed = false;
4569 Serge 717
	list_add_tail(&loc->ctx_list, &cbs->list);
718
	INIT_LIST_HEAD(&loc->res_list);
719
 
720
	return 0;
721
}
722
 
723
/**
724
 * vmw_context_binding_transfer: Transfer a context binding tracking entry.
725
 *
726
 * @cbs: Pointer to the persistent context binding state tracker.
727
 * @bi: Information about the binding to track.
728
 *
729
 */
730
static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
731
					 const struct vmw_ctx_bindinfo *bi)
732
{
733
	struct vmw_ctx_binding *loc;
734
 
735
	switch (bi->bt) {
736
	case vmw_ctx_binding_rt:
737
		loc = &cbs->render_targets[bi->i1.rt_type];
738
		break;
739
	case vmw_ctx_binding_tex:
740
		loc = &cbs->texture_units[bi->i1.texture_stage];
741
		break;
742
	case vmw_ctx_binding_shader:
743
		loc = &cbs->shaders[bi->i1.shader_type];
744
		break;
745
	default:
746
		BUG();
747
	}
748
 
749
	if (loc->bi.ctx != NULL)
750
		vmw_context_binding_drop(loc);
751
 
5078 serge 752
	if (bi->res != NULL) {
4569 Serge 753
	loc->bi = *bi;
754
	list_add_tail(&loc->ctx_list, &cbs->list);
755
		list_add_tail(&loc->res_list, &bi->res->binding_head);
5078 serge 756
	}
4569 Serge 757
}
758
 
759
/**
760
 * vmw_context_binding_kill - Kill a binding on the device
761
 * and stop tracking it.
762
 *
763
 * @cb: Pointer to binding tracker storage.
764
 *
765
 * Emits FIFO commands to scrub a binding represented by @cb.
766
 * Then stops tracking the binding and re-initializes its storage.
767
 */
768
static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
769
{
5078 serge 770
	if (!cb->bi.scrubbed) {
771
		(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
772
		cb->bi.scrubbed = true;
773
	}
4569 Serge 774
	vmw_context_binding_drop(cb);
775
}
776
 
777
/**
778
 * vmw_context_binding_state_kill - Kill all bindings associated with a
779
 * struct vmw_ctx_binding state structure, and re-initialize the structure.
780
 *
781
 * @cbs: Pointer to the context binding state tracker.
782
 *
783
 * Emits commands to scrub all bindings associated with the
784
 * context binding state tracker. Then re-initializes the whole structure.
785
 */
786
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
787
{
788
	struct vmw_ctx_binding *entry, *next;
789
 
790
	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
791
		vmw_context_binding_kill(entry);
792
}
793
 
794
/**
5078 serge 795
 * vmw_context_binding_state_scrub - Scrub all bindings associated with a
796
 * struct vmw_ctx_binding state structure.
797
 *
798
 * @cbs: Pointer to the context binding state tracker.
799
 *
800
 * Emits commands to scrub all bindings associated with the
801
 * context binding state tracker.
802
 */
803
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
804
{
805
	struct vmw_ctx_binding *entry;
806
 
807
	list_for_each_entry(entry, &cbs->list, ctx_list) {
808
		if (!entry->bi.scrubbed) {
809
			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
810
			entry->bi.scrubbed = true;
811
		}
812
	}
813
}
814
 
815
/**
4569 Serge 816
 * vmw_context_binding_res_list_kill - Kill all bindings on a
817
 * resource binding list
818
 *
819
 * @head: list head of resource binding list
820
 *
821
 * Kills all bindings associated with a specific resource. Typically
822
 * called before the resource is destroyed.
823
 */
824
void vmw_context_binding_res_list_kill(struct list_head *head)
825
{
826
	struct vmw_ctx_binding *entry, *next;
827
 
828
	list_for_each_entry_safe(entry, next, head, res_list)
829
		vmw_context_binding_kill(entry);
830
}
831
 
832
/**
5078 serge 833
 * vmw_context_binding_res_list_scrub - Scrub all bindings on a
834
 * resource binding list
835
 *
836
 * @head: list head of resource binding list
837
 *
838
 * Scrub all bindings associated with a specific resource. Typically
839
 * called before the resource is evicted.
840
 */
841
void vmw_context_binding_res_list_scrub(struct list_head *head)
842
{
843
	struct vmw_ctx_binding *entry;
844
 
845
	list_for_each_entry(entry, head, res_list) {
846
		if (!entry->bi.scrubbed) {
847
			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
848
			entry->bi.scrubbed = true;
849
		}
850
	}
851
}
852
 
853
/**
4569 Serge 854
 * vmw_context_binding_state_transfer - Commit staged binding info
855
 *
856
 * @ctx: Pointer to context to commit the staged binding info to.
857
 * @from: Staged binding info built during execbuf.
858
 *
859
 * Transfers binding info from a temporary structure to the persistent
860
 * structure in the context. This can be done once commands
861
 */
862
void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
863
					struct vmw_ctx_binding_state *from)
864
{
865
	struct vmw_user_context *uctx =
866
		container_of(ctx, struct vmw_user_context, res);
867
	struct vmw_ctx_binding *entry, *next;
868
 
869
	list_for_each_entry_safe(entry, next, &from->list, ctx_list)
870
		vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
871
}
5078 serge 872
 
873
/**
874
 * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
875
 *
876
 * @ctx: The context resource
877
 *
878
 * Walks through the context binding list and rebinds all scrubbed
879
 * resources.
880
 */
881
int vmw_context_rebind_all(struct vmw_resource *ctx)
882
{
883
	struct vmw_ctx_binding *entry;
884
	struct vmw_user_context *uctx =
885
		container_of(ctx, struct vmw_user_context, res);
886
	struct vmw_ctx_binding_state *cbs = &uctx->cbs;
887
	int ret;
888
 
889
	list_for_each_entry(entry, &cbs->list, ctx_list) {
890
		if (likely(!entry->bi.scrubbed))
891
			continue;
892
 
893
		if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
894
			    SVGA3D_INVALID_ID))
895
			continue;
896
 
897
		ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
898
		if (unlikely(ret != 0))
899
			return ret;
900
 
901
		entry->bi.scrubbed = false;
902
	}
903
 
904
	return 0;
905
}
906
 
907
/**
908
 * vmw_context_binding_list - Return a list of context bindings
909
 *
910
 * @ctx: The context resource
911
 *
912
 * Returns the current list of bindings of the given context. Note that
913
 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
914
 */
915
struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
916
{
917
	return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
918
}
919
 
920
struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
921
{
922
	return container_of(ctx, struct vmw_user_context, res)->man;
923
}