Subversion Repositories Kolibri OS

Rev

Rev 4569 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4569 Rev 5078
Line 112... Line 112...
112
		/*
112
		/*
113
		 * Transfer staged context bindings to the
113
		 * Transfer staged context bindings to the
114
		 * persistent context binding tracker.
114
		 * persistent context binding tracker.
115
		 */
115
		 */
116
		if (unlikely(val->staged_bindings)) {
116
		if (unlikely(val->staged_bindings)) {
-
 
117
			if (!backoff) {
117
			vmw_context_binding_state_transfer
118
			vmw_context_binding_state_transfer
118
				(val->res, val->staged_bindings);
119
				(val->res, val->staged_bindings);
-
 
120
			}
119
			kfree(val->staged_bindings);
121
			kfree(val->staged_bindings);
120
			val->staged_bindings = NULL;
122
			val->staged_bindings = NULL;
121
		}
123
		}
122
		vmw_resource_unreserve(res, new_backup,
124
		vmw_resource_unreserve(res, new_backup,
123
			val->new_backup_offset);
125
			val->new_backup_offset);
Line 176... Line 178...
176
 
178
 
177
	return 0;
179
	return 0;
Line 178... Line 180...
178
}
180
}
-
 
181
 
-
 
182
/**
-
 
183
 * vmw_resource_context_res_add - Put resources previously bound to a context on
-
 
184
 * the validation list
-
 
185
 *
-
 
186
 * @dev_priv: Pointer to a device private structure
-
 
187
 * @sw_context: Pointer to a software context used for this command submission
-
 
188
 * @ctx: Pointer to the context resource
-
 
189
 *
-
 
190
 * This function puts all resources that were previously bound to @ctx on
-
 
191
 * the resource validation list. This is part of the context state reemission
-
 
192
 */
-
 
193
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
-
 
194
					struct vmw_sw_context *sw_context,
-
 
195
					struct vmw_resource *ctx)
-
 
196
{
-
 
197
	struct list_head *binding_list;
-
 
198
	struct vmw_ctx_binding *entry;
-
 
199
	int ret = 0;
-
 
200
	struct vmw_resource *res;
-
 
201
 
-
 
202
	mutex_lock(&dev_priv->binding_mutex);
-
 
203
	binding_list = vmw_context_binding_list(ctx);
-
 
204
 
-
 
205
	list_for_each_entry(entry, binding_list, ctx_list) {
-
 
206
		res = vmw_resource_reference_unless_doomed(entry->bi.res);
-
 
207
		if (unlikely(res == NULL))
-
 
208
			continue;
-
 
209
 
-
 
210
		ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
-
 
211
		vmw_resource_unreference(&res);
-
 
212
		if (unlikely(ret != 0))
-
 
213
			break;
-
 
214
	}
-
 
215
 
-
 
216
	mutex_unlock(&dev_priv->binding_mutex);
-
 
217
	return ret;
-
 
218
}
179
 
219
 
180
/**
220
/**
181
 * vmw_resource_relocation_add - Add a relocation to the relocation list
221
 * vmw_resource_relocation_add - Add a relocation to the relocation list
182
 *
222
 *
183
 * @list: Pointer to head of relocation list.
223
 * @list: Pointer to head of relocation list.
Line 231... Line 271...
231
static void vmw_resource_relocations_apply(uint32_t *cb,
271
static void vmw_resource_relocations_apply(uint32_t *cb,
232
					   struct list_head *list)
272
					   struct list_head *list)
233
{
273
{
234
	struct vmw_resource_relocation *rel;
274
	struct vmw_resource_relocation *rel;
Line 235... Line 275...
235
 
275
 
-
 
276
	list_for_each_entry(rel, list, head) {
236
	list_for_each_entry(rel, list, head)
277
		if (likely(rel->res != NULL))
-
 
278
		cb[rel->offset] = rel->res->id;
-
 
279
		else
-
 
280
			cb[rel->offset] = SVGA_3D_CMD_NOP;
237
		cb[rel->offset] = rel->res->id;
281
	}
Line 238... Line 282...
238
}
282
}
239
 
283
 
240
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
284
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
Line 376... Line 420...
376
		}
420
		}
377
	}
421
	}
378
	return 0;
422
	return 0;
379
}
423
}
Line -... Line 424...
-
 
424
 
-
 
425
 
-
 
426
/**
-
 
427
 * vmw_cmd_res_reloc_add - Add a resource to a software context's
-
 
428
 * relocation- and validation lists.
-
 
429
 *
-
 
430
 * @dev_priv: Pointer to a struct vmw_private identifying the device.
-
 
431
 * @sw_context: Pointer to the software context.
-
 
432
 * @res_type: Resource type.
-
 
433
 * @id_loc: Pointer to where the id that needs translation is located.
-
 
434
 * @res: Valid pointer to a struct vmw_resource.
-
 
435
 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
-
 
436
 * used for this resource is returned here.
-
 
437
 */
-
 
438
static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
-
 
439
				 struct vmw_sw_context *sw_context,
-
 
440
				 enum vmw_res_type res_type,
-
 
441
				 uint32_t *id_loc,
-
 
442
				 struct vmw_resource *res,
-
 
443
				 struct vmw_resource_val_node **p_val)
-
 
444
{
-
 
445
	int ret;
-
 
446
	struct vmw_resource_val_node *node;
-
 
447
 
-
 
448
	*p_val = NULL;
-
 
449
	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
-
 
450
					  res,
-
 
451
					  id_loc - sw_context->buf_start);
-
 
452
	if (unlikely(ret != 0))
-
 
453
		goto out_err;
-
 
454
 
-
 
455
	ret = vmw_resource_val_add(sw_context, res, &node);
-
 
456
	if (unlikely(ret != 0))
-
 
457
		goto out_err;
-
 
458
 
-
 
459
	if (res_type == vmw_res_context && dev_priv->has_mob &&
-
 
460
	    node->first_usage) {
-
 
461
 
-
 
462
		/*
-
 
463
		 * Put contexts first on the list to be able to exit
-
 
464
		 * list traversal for contexts early.
-
 
465
		 */
-
 
466
		list_del(&node->head);
-
 
467
		list_add(&node->head, &sw_context->resource_list);
-
 
468
 
-
 
469
		ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
-
 
470
		if (unlikely(ret != 0))
-
 
471
			goto out_err;
-
 
472
		node->staged_bindings =
-
 
473
			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
-
 
474
		if (node->staged_bindings == NULL) {
-
 
475
			DRM_ERROR("Failed to allocate context binding "
-
 
476
				  "information.\n");
-
 
477
			goto out_err;
-
 
478
		}
-
 
479
		INIT_LIST_HEAD(&node->staged_bindings->list);
-
 
480
	}
-
 
481
 
-
 
482
	if (p_val)
-
 
483
		*p_val = node;
-
 
484
 
-
 
485
out_err:
-
 
486
	return ret;
-
 
487
}
-
 
488
 
380
 
489
 
381
/**
490
/**
382
 * vmw_cmd_res_check - Check that a resource is present and if so, put it
491
 * vmw_cmd_res_check - Check that a resource is present and if so, put it
383
 * on the resource validate list unless it's already there.
492
 * on the resource validate list unless it's already there.
384
 *
493
 *
385
 * @dev_priv: Pointer to a device private structure.
494
 * @dev_priv: Pointer to a device private structure.
386
 * @sw_context: Pointer to the software context.
495
 * @sw_context: Pointer to the software context.
387
 * @res_type: Resource type.
496
 * @res_type: Resource type.
388
 * @converter: User-space visisble type specific information.
497
 * @converter: User-space visisble type specific information.
389
 * @id: Pointer to the location in the command buffer currently being
498
 * @id_loc: Pointer to the location in the command buffer currently being
-
 
499
 * parsed from where the user-space resource id handle is located.
-
 
500
 * @p_val: Pointer to pointer to resource validalidation node. Populated
390
 * parsed from where the user-space resource id handle is located.
501
 * on exit.
-
 
502
 */
391
 */
503
static int
392
static int vmw_cmd_res_check(struct vmw_private *dev_priv,
504
vmw_cmd_res_check(struct vmw_private *dev_priv,
393
			     struct vmw_sw_context *sw_context,
505
			     struct vmw_sw_context *sw_context,
394
			     enum vmw_res_type res_type,
506
			     enum vmw_res_type res_type,
395
			     const struct vmw_user_resource_conv *converter,
507
			     const struct vmw_user_resource_conv *converter,
396
			     uint32_t *id,
508
			 uint32_t *id_loc,
397
			     struct vmw_resource_val_node **p_val)
509
			     struct vmw_resource_val_node **p_val)
398
{
510
{
399
	struct vmw_res_cache_entry *rcache =
511
	struct vmw_res_cache_entry *rcache =
400
		&sw_context->res_cache[res_type];
512
		&sw_context->res_cache[res_type];
401
	struct vmw_resource *res;
513
	struct vmw_resource *res;
402
	struct vmw_resource_val_node *node;
514
	struct vmw_resource_val_node *node;
Line 403... Line 515...
403
	int ret;
515
	int ret;
404
 
516
 
405
	if (*id == SVGA3D_INVALID_ID) {
517
	if (*id_loc == SVGA3D_INVALID_ID) {
406
		if (p_val)
518
		if (p_val)
407
			*p_val = NULL;
519
			*p_val = NULL;
408
		if (res_type == vmw_res_context) {
520
		if (res_type == vmw_res_context) {
Line 415... Line 527...
415
	/*
527
	/*
416
	 * Fastpath in case of repeated commands referencing the same
528
	 * Fastpath in case of repeated commands referencing the same
417
	 * resource
529
	 * resource
418
	 */
530
	 */
Line 419... Line 531...
419
 
531
 
420
	if (likely(rcache->valid && *id == rcache->handle)) {
532
	if (likely(rcache->valid && *id_loc == rcache->handle)) {
Line 421... Line 533...
421
		const struct vmw_resource *res = rcache->res;
533
		const struct vmw_resource *res = rcache->res;
422
 
534
 
423
		rcache->node->first_usage = false;
535
		rcache->node->first_usage = false;
Line 424... Line 536...
424
		if (p_val)
536
		if (p_val)
425
			*p_val = rcache->node;
537
			*p_val = rcache->node;
426
 
538
 
427
		return vmw_resource_relocation_add
539
		return vmw_resource_relocation_add
Line 428... Line 540...
428
			(&sw_context->res_relocations, res,
540
			(&sw_context->res_relocations, res,
429
			 id - sw_context->buf_start);
541
			 id_loc - sw_context->buf_start);
430
	}
542
	}
431
 
543
 
432
	ret = vmw_user_resource_lookup_handle(dev_priv,
544
   ret = vmw_user_resource_lookup_handle(dev_priv,
433
					      sw_context->tfile,
545
                         sw_context->fp->tfile,
434
					      *id,
546
					      *id_loc,
435
					      converter,
547
                         converter,
436
					      &res);
548
                         &res);
437
	if (unlikely(ret != 0)) {
549
	if (unlikely(ret != 0)) {
438
		DRM_ERROR("Could not find or use resource 0x%08x.\n",
550
		DRM_ERROR("Could not find or use resource 0x%08x.\n",
Line 439... Line 551...
439
			  (unsigned) *id);
551
			  (unsigned) *id_loc);
440
//       dump_stack();
552
..		dump_stack();
441
		return ret;
553
		return ret;
Line 442... Line 554...
442
	}
554
	}
443
 
555
 
444
	rcache->valid = true;
-
 
445
	rcache->res = res;
-
 
446
	rcache->handle = *id;
-
 
447
 
-
 
448
	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
-
 
449
					  res,
556
	rcache->valid = true;
450
					  id - sw_context->buf_start);
557
	rcache->res = res;
Line 451... Line 558...
451
	if (unlikely(ret != 0))
558
	rcache->handle = *id_loc;
452
		goto out_no_reloc;
559
 
453
 
560
	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
454
	ret = vmw_resource_val_add(sw_context, res, &node);
-
 
455
	if (unlikely(ret != 0))
-
 
456
		goto out_no_reloc;
-
 
457
 
-
 
458
	rcache->node = node;
-
 
459
	if (p_val)
-
 
460
		*p_val = node;
-
 
461
 
-
 
462
	if (node->first_usage && res_type == vmw_res_context) {
-
 
463
		node->staged_bindings =
-
 
464
			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
-
 
465
		if (node->staged_bindings == NULL) {
-
 
466
			DRM_ERROR("Failed to allocate context binding "
561
				    res, &node);
467
				  "information.\n");
562
	if (unlikely(ret != 0))
Line 468... Line 563...
468
			goto out_no_reloc;
563
		goto out_no_reloc;
469
		}
564
 
Line 479... Line 574...
479
 
574
 
480
	return ret;
575
	return ret;
Line 481... Line 576...
481
}
576
}
-
 
577
 
-
 
578
/**
-
 
579
 * vmw_rebind_contexts - Rebind all resources previously bound to
-
 
580
 * referenced contexts.
-
 
581
 *
-
 
582
 * @sw_context: Pointer to the software context.
-
 
583
 *
-
 
584
 * Rebind context binding points that have been scrubbed because of eviction.
-
 
585
 */
-
 
586
static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
-
 
587
{
-
 
588
	struct vmw_resource_val_node *val;
-
 
589
	int ret;
-
 
590
 
-
 
591
	list_for_each_entry(val, &sw_context->resource_list, head) {
-
 
592
		if (unlikely(!val->staged_bindings))
-
 
593
			break;
-
 
594
 
-
 
595
		ret = vmw_context_rebind_all(val->res);
-
 
596
		if (unlikely(ret != 0)) {
-
 
597
			if (ret != -ERESTARTSYS)
-
 
598
				DRM_ERROR("Failed to rebind context.\n");
-
 
599
			return ret;
-
 
600
		}
-
 
601
	}
-
 
602
 
-
 
603
	return 0;
-
 
604
}
482
 
605
 
483
/**
606
/**
484
 * vmw_cmd_cid_check - Check a command header for valid context information.
607
 * vmw_cmd_cid_check - Check a command header for valid context information.
485
 *
608
 *
486
 * @dev_priv: Pointer to a device private structure.
609
 * @dev_priv: Pointer to a device private structure.
Line 494... Line 617...
494
			     struct vmw_sw_context *sw_context,
617
			     struct vmw_sw_context *sw_context,
495
			     SVGA3dCmdHeader *header)
618
			     SVGA3dCmdHeader *header)
496
{
619
{
497
	struct vmw_cid_cmd {
620
	struct vmw_cid_cmd {
498
		SVGA3dCmdHeader header;
621
		SVGA3dCmdHeader header;
499
		__le32 cid;
622
		uint32_t cid;
500
	} *cmd;
623
	} *cmd;
Line 501... Line 624...
501
 
624
 
502
	cmd = container_of(header, struct vmw_cid_cmd, header);
625
	cmd = container_of(header, struct vmw_cid_cmd, header);
503
	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
626
	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
Line 765... Line 888...
765
	struct ttm_buffer_object *bo;
888
	struct ttm_buffer_object *bo;
766
	uint32_t handle = *id;
889
	uint32_t handle = *id;
767
	struct vmw_relocation *reloc;
890
	struct vmw_relocation *reloc;
768
	int ret;
891
	int ret;
Line 769... Line 892...
769
 
892
 
770
	ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
893
	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
771
	if (unlikely(ret != 0)) {
894
	if (unlikely(ret != 0)) {
772
		DRM_ERROR("Could not find or use MOB buffer.\n");
895
		DRM_ERROR("Could not find or use MOB buffer.\n");
773
		return -EINVAL;
896
		return -EINVAL;
774
	}
897
	}
Line 826... Line 949...
826
	struct ttm_buffer_object *bo;
949
	struct ttm_buffer_object *bo;
827
	uint32_t handle = ptr->gmrId;
950
	uint32_t handle = ptr->gmrId;
828
	struct vmw_relocation *reloc;
951
	struct vmw_relocation *reloc;
829
	int ret;
952
	int ret;
Line 830... Line 953...
830
 
953
 
831
	ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
954
	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
832
	if (unlikely(ret != 0)) {
955
	if (unlikely(ret != 0)) {
833
		DRM_ERROR("Could not find or use GMR region.\n");
956
		DRM_ERROR("Could not find or use GMR region.\n");
834
		return -EINVAL;
957
		return -EINVAL;
835
	}
958
	}
Line 1106... Line 1229...
1106
	struct vmw_dma_cmd {
1229
	struct vmw_dma_cmd {
1107
		SVGA3dCmdHeader header;
1230
		SVGA3dCmdHeader header;
1108
		SVGA3dCmdSurfaceDMA dma;
1231
		SVGA3dCmdSurfaceDMA dma;
1109
	} *cmd;
1232
	} *cmd;
1110
	int ret;
1233
	int ret;
-
 
1234
	SVGA3dCmdSurfaceDMASuffix *suffix;
-
 
1235
	uint32_t bo_size;
Line 1111... Line 1236...
1111
 
1236
 
-
 
1237
	cmd = container_of(header, struct vmw_dma_cmd, header);
-
 
1238
	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
-
 
1239
					       header->size - sizeof(*suffix));
-
 
1240
 
-
 
1241
	/* Make sure device and verifier stays in sync. */
-
 
1242
	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
-
 
1243
		DRM_ERROR("Invalid DMA suffix size.\n");
-
 
1244
		return -EINVAL;
-
 
1245
	}
1112
	cmd = container_of(header, struct vmw_dma_cmd, header);
1246
 
1113
	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1247
	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1114
				      &cmd->dma.guest.ptr,
1248
				      &cmd->dma.guest.ptr,
1115
				      &vmw_bo);
1249
				      &vmw_bo);
1116
	if (unlikely(ret != 0))
1250
	if (unlikely(ret != 0))
Line -... Line 1251...
-
 
1251
		return ret;
-
 
1252
 
-
 
1253
	/* Make sure DMA doesn't cross BO boundaries. */
-
 
1254
	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
-
 
1255
	if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
-
 
1256
		DRM_ERROR("Invalid DMA offset.\n");
-
 
1257
		return -EINVAL;
-
 
1258
	}
-
 
1259
 
-
 
1260
	bo_size -= cmd->dma.guest.ptr.offset;
-
 
1261
	if (unlikely(suffix->maximumOffset > bo_size))
1117
		return ret;
1262
		suffix->maximumOffset = bo_size;
1118
 
1263
 
1119
	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1264
	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1120
				user_surface_converter, &cmd->dma.host.sid,
1265
				user_surface_converter, &cmd->dma.host.sid,
1121
				NULL);
1266
				NULL);
Line 1476... Line 1621...
1476
	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1621
	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1477
				 user_surface_converter,
1622
				 user_surface_converter,
1478
				 &cmd->body.sid, NULL);
1623
				 &cmd->body.sid, NULL);
1479
}
1624
}
Line -... Line 1625...
-
 
1625
 
1480
 
1626
#if 0
1481
/**
1627
/**
1482
 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1628
 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1483
 * command
1629
 * command
1484
 *
1630
 *
Line 1492... Line 1638...
1492
{
1638
{
1493
	struct vmw_set_shader_cmd {
1639
	struct vmw_set_shader_cmd {
1494
		SVGA3dCmdHeader header;
1640
		SVGA3dCmdHeader header;
1495
		SVGA3dCmdSetShader body;
1641
		SVGA3dCmdSetShader body;
1496
	} *cmd;
1642
	} *cmd;
1497
	struct vmw_resource_val_node *ctx_node;
1643
	struct vmw_resource_val_node *ctx_node, *res_node = NULL;
-
 
1644
	struct vmw_ctx_bindinfo bi;
-
 
1645
	struct vmw_resource *res = NULL;
1498
	int ret;
1646
	int ret;
Line 1499... Line 1647...
1499
 
1647
 
1500
	cmd = container_of(header, struct vmw_set_shader_cmd,
1648
	cmd = container_of(header, struct vmw_set_shader_cmd,
Line 1504... Line 1652...
1504
				user_context_converter, &cmd->body.cid,
1652
				user_context_converter, &cmd->body.cid,
1505
				&ctx_node);
1653
				&ctx_node);
1506
	if (unlikely(ret != 0))
1654
	if (unlikely(ret != 0))
1507
		return ret;
1655
		return ret;
Line 1508... Line 1656...
1508
 
1656
 
1509
	if (dev_priv->has_mob) {
1657
	if (!dev_priv->has_mob)
1510
		struct vmw_ctx_bindinfo bi;
-
 
Line -... Line 1658...
-
 
1658
		return 0;
-
 
1659
 
-
 
1660
	if (cmd->body.shid != SVGA3D_INVALID_ID) {
-
 
1661
		res = vmw_compat_shader_lookup
-
 
1662
			(vmw_context_res_man(ctx_node->res),
-
 
1663
			 cmd->body.shid,
-
 
1664
			 cmd->body.type);
-
 
1665
 
-
 
1666
		if (!IS_ERR(res)) {
-
 
1667
			ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
-
 
1668
						    vmw_res_shader,
-
 
1669
						    &cmd->body.shid, res,
-
 
1670
						    &res_node);
-
 
1671
			vmw_resource_unreference(&res);
-
 
1672
			if (unlikely(ret != 0))
-
 
1673
				return ret;
-
 
1674
		}
-
 
1675
	}
1511
		struct vmw_resource_val_node *res_node;
1676
 
-
 
1677
	if (!res_node) {
1512
 
1678
		ret = vmw_cmd_res_check(dev_priv, sw_context,
1513
		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
1679
					       vmw_res_shader,
1514
					user_shader_converter,
1680
					user_shader_converter,
1515
					&cmd->body.shid, &res_node);
1681
					&cmd->body.shid, &res_node);
-
 
1682
	if (unlikely(ret != 0))
Line 1516... Line 1683...
1516
	if (unlikely(ret != 0))
1683
		return ret;
1517
		return ret;
1684
	}
1518
 
1685
 
1519
		bi.ctx = ctx_node->res;
1686
		bi.ctx = ctx_node->res;
1520
		bi.res = res_node ? res_node->res : NULL;
1687
		bi.res = res_node ? res_node->res : NULL;
1521
		bi.bt = vmw_ctx_binding_shader;
1688
		bi.bt = vmw_ctx_binding_shader;
-
 
1689
		bi.i1.shader_type = cmd->body.type;
-
 
1690
		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
-
 
1691
}
-
 
1692
#endif
-
 
1693
 
-
 
1694
/**
-
 
1695
 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
-
 
1696
 * command
-
 
1697
 *
-
 
1698
 * @dev_priv: Pointer to a device private struct.
-
 
1699
 * @sw_context: The software context being used for this batch.
-
 
1700
 * @header: Pointer to the command header in the command stream.
-
 
1701
 */
-
 
1702
static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
-
 
1703
				    struct vmw_sw_context *sw_context,
-
 
1704
				    SVGA3dCmdHeader *header)
-
 
1705
{
-
 
1706
	struct vmw_set_shader_const_cmd {
-
 
1707
		SVGA3dCmdHeader header;
-
 
1708
		SVGA3dCmdSetShaderConst body;
-
 
1709
	} *cmd;
-
 
1710
	int ret;
-
 
1711
 
-
 
1712
	cmd = container_of(header, struct vmw_set_shader_const_cmd,
-
 
1713
			   header);
-
 
1714
 
-
 
1715
	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
-
 
1716
				user_context_converter, &cmd->body.cid,
-
 
1717
				NULL);
-
 
1718
	if (unlikely(ret != 0))
-
 
1719
		return ret;
Line 1522... Line 1720...
1522
		bi.i1.shader_type = cmd->body.type;
1720
 
1523
		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1721
	if (dev_priv->has_mob)
Line -... Line 1722...
-
 
1722
		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1524
	}
1723
 
1525
 
1724
	return 0;
1526
	return 0;
1725
}
1527
}
1726
 
1528
 
1727
#if 0
Line 1549... Line 1748...
1549
	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1748
	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1550
				     user_shader_converter,
1749
				     user_shader_converter,
1551
				     &cmd->body.shid, &cmd->body.mobid,
1750
				     &cmd->body.shid, &cmd->body.mobid,
1552
				     cmd->body.offsetInBytes);
1751
				     cmd->body.offsetInBytes);
1553
}
1752
}
-
 
1753
#endif
Line 1554... Line 1754...
1554
 
1754
 
1555
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1755
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1556
				struct vmw_sw_context *sw_context,
1756
				struct vmw_sw_context *sw_context,
1557
				void *buf, uint32_t *size)
1757
				void *buf, uint32_t *size)
Line 1593... Line 1793...
1593
		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1793
		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
Line 1594... Line 1794...
1594
 
1794
 
1595
	return 0;
1795
	return 0;
Line 1596... Line 1796...
1596
}
1796
}
1597
 
1797
 
1598
static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1798
static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1599
	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1799
	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1600
		    false, false, false),
1800
		    false, false, false),
1601
	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1801
	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
Line 1632... Line 1832...
1632
		    true, false, false),
1832
		    true, false, false),
1633
	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1833
	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1634
		    true, false, false),
1834
		    true, false, false),
1635
	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1835
	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1636
		    false, false, false),
1836
		    false, false, false),
1637
	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check,
1837
//   VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1638
		    true, true, false),
1838
//           true, false, false),
1639
	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check,
1839
//   VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1640
		    true, true, false),
1840
//           true, false, false),
1641
	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1841
//   VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1642
		    true, false, false),
1842
//           true, false, false),
1643
	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check,
1843
//   VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1644
		    true, true, false),
1844
//           true, false, false),
1645
	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1845
	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1646
		    true, false, false),
1846
		    true, false, false),
1647
	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1847
	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1648
		    true, false, false),
1848
		    true, false, false),
1649
	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1849
	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
Line 1724... Line 1924...
1724
		    false, false, true),
1924
		    false, false, true),
1725
	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
1925
	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
1726
		    false, false, true),
1926
		    false, false, true),
1727
	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
1927
	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
1728
		    false, false, true),
1928
		    false, false, true),
1729
	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
1929
//   VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
1730
		    true, false, true),
1930
//           true, false, true),
1731
	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
1931
	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
1732
		    false, false, true),
1932
		    false, false, true),
1733
	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
1933
	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
1734
		    false, false, false),
1934
		    false, false, false),
1735
	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
1935
	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
Line 1790... Line 1990...
1790
 
1990
 
1791
	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
1991
	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
Line 1792... Line 1992...
1792
		goto out_invalid;
1992
		goto out_invalid;
-
 
1993
 
-
 
1994
	entry = &vmw_cmd_entries[cmd_id];
-
 
1995
	if (unlikely(!entry->func))
1793
 
1996
		goto out_invalid;
1794
	entry = &vmw_cmd_entries[cmd_id];
1997
 
Line 1795... Line 1998...
1795
	if (unlikely(!entry->user_allow && !sw_context->kernel))
1998
	if (unlikely(!entry->user_allow && !sw_context->kernel))
1796
		goto out_privileged;
1999
		goto out_privileged;
Line 2127... Line 2330...
2127
					  false, false,
2330
					  false, false,
2128
					  VMW_FENCE_WAIT_TIMEOUT);
2331
					  VMW_FENCE_WAIT_TIMEOUT);
2129
	}
2332
	}
2130
}
2333
}
Line -... Line 2334...
-
 
2334
 
-
 
2335
 
2131
 
2336
 
2132
int vmw_execbuf_process(struct drm_file *file_priv,
2337
int vmw_execbuf_process(struct drm_file *file_priv,
2133
			struct vmw_private *dev_priv,
2338
			struct vmw_private *dev_priv,
2134
			void __user *user_commands,
2339
			void __user *user_commands,
2135
			void *kernel_commands,
2340
			void *kernel_commands,
Line 2170... Line 2375...
2170
		}
2375
		}
2171
		kernel_commands = sw_context->cmd_bounce;
2376
		kernel_commands = sw_context->cmd_bounce;
2172
    } else  */
2377
    } else  */
2173
		sw_context->kernel = true;
2378
		sw_context->kernel = true;
Line 2174... Line 2379...
2174
 
2379
 
2175
	sw_context->tfile = vmw_fpriv(file_priv)->tfile;
2380
	sw_context->fp = vmw_fpriv(file_priv);
2176
	sw_context->cur_reloc = 0;
2381
	sw_context->cur_reloc = 0;
2177
	sw_context->cur_val_buf = 0;
2382
	sw_context->cur_val_buf = 0;
2178
	sw_context->fence_flags = 0;
2383
	sw_context->fence_flags = 0;
2179
	INIT_LIST_HEAD(&sw_context->resource_list);
2384
	INIT_LIST_HEAD(&sw_context->resource_list);
Line 2187... Line 2392...
2187
		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2392
		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2188
		if (unlikely(ret != 0))
2393
		if (unlikely(ret != 0))
2189
			goto out_unlock;
2394
			goto out_unlock;
2190
		sw_context->res_ht_initialized = true;
2395
		sw_context->res_ht_initialized = true;
2191
	}
2396
	}
-
 
2397
	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
Line 2192... Line 2398...
2192
 
2398
 
2193
	INIT_LIST_HEAD(&resource_list);
2399
	INIT_LIST_HEAD(&resource_list);
2194
	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2400
	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2195
				command_size);
2401
				command_size);
2196
	if (unlikely(ret != 0))
2402
	if (unlikely(ret != 0))
Line 2197... Line 2403...
2197
		goto out_err;
2403
		goto out_err_nores;
2198
 
2404
 
2199
	ret = vmw_resources_reserve(sw_context);
2405
	ret = vmw_resources_reserve(sw_context);
Line 2200... Line 2406...
2200
	if (unlikely(ret != 0))
2406
	if (unlikely(ret != 0))
2201
		goto out_err;
2407
		goto out_err_nores;
2202
 
2408
 
Line 2224... Line 2430...
2224
	if (unlikely(ret != 0)) {
2430
	if (unlikely(ret != 0)) {
2225
		ret = -ERESTARTSYS;
2431
		ret = -ERESTARTSYS;
2226
		goto out_err;
2432
		goto out_err;
2227
	}
2433
	}
Line -... Line 2434...
-
 
2434
 
-
 
2435
	if (dev_priv->has_mob) {
-
 
2436
		ret = vmw_rebind_contexts(sw_context);
-
 
2437
		if (unlikely(ret != 0))
-
 
2438
			goto out_unlock_binding;
-
 
2439
	}
2228
 
2440
 
2229
	cmd = vmw_fifo_reserve(dev_priv, command_size);
2441
	cmd = vmw_fifo_reserve(dev_priv, command_size);
2230
	if (unlikely(cmd == NULL)) {
2442
	if (unlikely(cmd == NULL)) {
2231
		DRM_ERROR("Failed reserving fifo space for commands.\n");
2443
		DRM_ERROR("Failed reserving fifo space for commands.\n");
2232
		ret = -ENOMEM;
2444
		ret = -ENOMEM;
Line 2275... Line 2487...
2275
	} else if (likely(fence != NULL)) {
2487
	} else if (likely(fence != NULL)) {
2276
		vmw_fence_obj_unreference(&fence);
2488
		vmw_fence_obj_unreference(&fence);
2277
	}
2489
	}
Line 2278... Line 2490...
2278
 
2490
 
-
 
2491
	list_splice_init(&sw_context->resource_list, &resource_list);
2279
	list_splice_init(&sw_context->resource_list, &resource_list);
2492
	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
Line 2280... Line 2493...
2280
	mutex_unlock(&dev_priv->cmdbuf_mutex);
2493
	mutex_unlock(&dev_priv->cmdbuf_mutex);
2281
 
2494
 
2282
	/*
2495
	/*
Line 2288... Line 2501...
2288
	return 0;
2501
	return 0;
Line 2289... Line 2502...
2289
 
2502
 
2290
out_unlock_binding:
2503
out_unlock_binding:
2291
	mutex_unlock(&dev_priv->binding_mutex);
2504
	mutex_unlock(&dev_priv->binding_mutex);
2292
out_err:
-
 
2293
	vmw_resource_relocations_free(&sw_context->res_relocations);
-
 
2294
	vmw_free_relocations(sw_context);
2505
out_err:
-
 
2506
	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2295
	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2507
out_err_nores:
-
 
2508
	vmw_resource_list_unreserve(&sw_context->resource_list, true);
-
 
2509
	vmw_resource_relocations_free(&sw_context->res_relocations);
2296
	vmw_resource_list_unreserve(&sw_context->resource_list, true);
2510
	vmw_free_relocations(sw_context);
2297
	vmw_clear_validations(sw_context);
2511
	vmw_clear_validations(sw_context);
2298
	if (unlikely(dev_priv->pinned_bo != NULL &&
2512
	if (unlikely(dev_priv->pinned_bo != NULL &&
2299
		     !dev_priv->query_cid_valid))
2513
		     !dev_priv->query_cid_valid))
2300
		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2514
		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2301
out_unlock:
2515
out_unlock:
2302
	list_splice_init(&sw_context->resource_list, &resource_list);
2516
	list_splice_init(&sw_context->resource_list, &resource_list);
2303
	error_resource = sw_context->error_resource;
2517
	error_resource = sw_context->error_resource;
-
 
2518
	sw_context->error_resource = NULL;
2304
	sw_context->error_resource = NULL;
2519
	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
Line 2305... Line 2520...
2305
	mutex_unlock(&dev_priv->cmdbuf_mutex);
2520
	mutex_unlock(&dev_priv->cmdbuf_mutex);
2306
 
2521
 
2307
	/*
2522
	/*
Line 2456... Line 2671...
2456
int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2671
int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2457
		      struct drm_file *file_priv)
2672
		      struct drm_file *file_priv)
2458
{
2673
{
2459
	struct vmw_private *dev_priv = vmw_priv(dev);
2674
	struct vmw_private *dev_priv = vmw_priv(dev);
2460
	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2675
	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2461
//   struct vmw_master *vmaster = vmw_master(file_priv->master);
-
 
2462
	int ret;
2676
	int ret;
Line 2463... Line 2677...
2463
 
2677
 
2464
	/*
2678
	/*
2465
	 * This will allow us to extend the ioctl argument while
2679
	 * This will allow us to extend the ioctl argument while
Line 2473... Line 2687...
2473
		DRM_ERROR("You're running outdated experimental "
2687
		DRM_ERROR("You're running outdated experimental "
2474
			  "vmwgfx user-space drivers.");
2688
			  "vmwgfx user-space drivers.");
2475
		return -EINVAL;
2689
		return -EINVAL;
2476
	}
2690
	}
Line 2477... Line 2691...
2477
 
2691
 
2478
//   ret = ttm_read_lock(&vmaster->lock, true);
2692
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2479
	if (unlikely(ret != 0))
2693
	if (unlikely(ret != 0))
Line 2480... Line 2694...
2480
		return ret;
2694
		return ret;
2481
 
2695
 
Line 2489... Line 2703...
2489
		goto out_unlock;
2703
		goto out_unlock;
Line 2490... Line 2704...
2490
 
2704
 
Line 2491... Line 2705...
2491
//   vmw_kms_cursor_post_execbuf(dev_priv);
2705
//   vmw_kms_cursor_post_execbuf(dev_priv);
2492
 
2706
 
2493
out_unlock:
2707
out_unlock:
2494
//   ttm_read_unlock(&vmaster->lock);
2708
	ttm_read_unlock(&dev_priv->reservation_sem);