Subversion Repositories Kolibri OS

Rev

Rev 5078 | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 6296
Line 1... Line 1...
1
/**************************************************************************
1
/**************************************************************************
2
 *
2
 *
3
 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
3
 * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
4
 * All Rights Reserved.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
8
 * "Software"), to deal in the Software without restriction, including
Line 27... Line 27...
27
 
27
 
28
#include "vmwgfx_drv.h"
28
#include "vmwgfx_drv.h"
29
#include "vmwgfx_reg.h"
29
#include "vmwgfx_reg.h"
30
#include 
30
#include 
-
 
31
#include 
-
 
32
#include "vmwgfx_so.h"
Line 31... Line 33...
31
#include 
33
#include "vmwgfx_binding.h"
Line 32... Line 34...
32
 
34
 
33
#define VMW_RES_HT_ORDER 12
35
#define VMW_RES_HT_ORDER 12
Line 57... Line 59...
57
 * @staged_bindings: If @res is a context, tracks bindings set up during
59
 * @staged_bindings: If @res is a context, tracks bindings set up during
58
 * the command batch. Otherwise NULL.
60
 * the command batch. Otherwise NULL.
59
 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
61
 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60
 * @first_usage: Set to true the first time the resource is referenced in
62
 * @first_usage: Set to true the first time the resource is referenced in
61
 * the command stream.
63
 * the command stream.
-
 
64
 * @switching_backup: The command stream provides a new backup buffer for a
-
 
65
 * resource.
-
 
66
 * @no_buffer_needed: This means @switching_backup is true on first buffer
62
 * @no_buffer_needed: Resources do not need to allocate buffer backup on
67
 * reference. So resource reservation does not need to allocate a backup
63
 * reservation. The command stream will provide one.
68
 * buffer for the resource.
64
 */
69
 */
65
struct vmw_resource_val_node {
70
struct vmw_resource_val_node {
66
	struct list_head head;
71
	struct list_head head;
67
	struct drm_hash_item hash;
72
	struct drm_hash_item hash;
68
	struct vmw_resource *res;
73
	struct vmw_resource *res;
69
	struct vmw_dma_buffer *new_backup;
74
	struct vmw_dma_buffer *new_backup;
70
	struct vmw_ctx_binding_state *staged_bindings;
75
	struct vmw_ctx_binding_state *staged_bindings;
71
	unsigned long new_backup_offset;
76
	unsigned long new_backup_offset;
72
	bool first_usage;
77
	u32 first_usage : 1;
-
 
78
	u32 switching_backup : 1;
73
	bool no_buffer_needed;
79
	u32 no_buffer_needed : 1;
74
};
80
};
Line 75... Line 81...
75
 
81
 
76
/**
82
/**
77
 * struct vmw_cmd_entry - Describe a command for the verifier
83
 * struct vmw_cmd_entry - Describe a command for the verifier
Line 90... Line 96...
90
 
96
 
91
#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
97
#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
92
	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
98
	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
Line -... Line 99...
-
 
99
				       (_gb_disable), (_gb_enable)}
-
 
100
 
-
 
101
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
-
 
102
					struct vmw_sw_context *sw_context,
-
 
103
					struct vmw_resource *ctx);
-
 
104
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
-
 
105
				 struct vmw_sw_context *sw_context,
-
 
106
				 SVGAMobId *id,
-
 
107
				 struct vmw_dma_buffer **vmw_bo_p);
-
 
108
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
-
 
109
				   struct vmw_dma_buffer *vbo,
-
 
110
				   bool validate_as_mob,
-
 
111
				   uint32_t *p_val_node);
93
				       (_gb_disable), (_gb_enable)}
112
 
94
 
113
 
95
/**
114
/**
96
 * vmw_resource_unreserve - unreserve resources previously reserved for
115
 * vmw_resources_unreserve - unreserve resources previously reserved for
97
 * command submission.
116
 * command submission.
98
 *
117
 *
99
 * @list_head: list of resources to unreserve.
118
 * @sw_context: pointer to the software context
100
 * @backoff: Whether command submission failed.
119
 * @backoff: Whether command submission failed.
101
 */
120
 */
102
static void vmw_resource_list_unreserve(struct list_head *list,
121
static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
103
					bool backoff)
122
				    bool backoff)
-
 
123
{
-
 
124
	struct vmw_resource_val_node *val;
-
 
125
	struct list_head *list = &sw_context->resource_list;
-
 
126
 
-
 
127
	if (sw_context->dx_query_mob && !backoff)
Line 104... Line 128...
104
{
128
		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
105
	struct vmw_resource_val_node *val;
129
					  sw_context->dx_query_mob);
106
 
130
 
107
	list_for_each_entry(val, list, head) {
131
	list_for_each_entry(val, list, head) {
Line 108... Line 132...
108
		struct vmw_resource *res = val->res;
132
		struct vmw_resource *res = val->res;
109
		struct vmw_dma_buffer *new_backup =
133
		bool switch_backup =
110
			backoff ? NULL : val->new_backup;
134
			(backoff) ? false : val->switching_backup;
111
 
135
 
112
		/*
136
		/*
113
		 * Transfer staged context bindings to the
137
		 * Transfer staged context bindings to the
-
 
138
		 * persistent context binding tracker.
114
		 * persistent context binding tracker.
139
		 */
115
		 */
140
		if (unlikely(val->staged_bindings)) {
116
		if (unlikely(val->staged_bindings)) {
141
			if (!backoff) {
-
 
142
				vmw_binding_state_commit
-
 
143
					(vmw_context_binding_state(val->res),
117
			if (!backoff) {
144
					 val->staged_bindings);
-
 
145
			}
-
 
146
 
118
			vmw_context_binding_state_transfer
147
			if (val->staged_bindings != sw_context->staged_bindings)
119
				(val->res, val->staged_bindings);
148
				vmw_binding_state_free(val->staged_bindings);
120
			}
149
			else
121
			kfree(val->staged_bindings);
150
				sw_context->staged_bindings_inuse = false;
122
			val->staged_bindings = NULL;
151
			val->staged_bindings = NULL;
123
		}
152
		}
124
		vmw_resource_unreserve(res, new_backup,
153
		vmw_resource_unreserve(res, switch_backup, val->new_backup,
Line -... Line 154...
-
 
154
				       val->new_backup_offset);
-
 
155
		vmw_dmabuf_unreference(&val->new_backup);
-
 
156
	}
-
 
157
}
-
 
158
 
-
 
159
/**
-
 
160
 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
-
 
161
 * added to the validate list.
-
 
162
 *
-
 
163
 * @dev_priv: Pointer to the device private:
-
 
164
 * @sw_context: The validation context:
-
 
165
 * @node: The validation node holding this context.
-
 
166
 */
-
 
167
static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
-
 
168
				   struct vmw_sw_context *sw_context,
-
 
169
				   struct vmw_resource_val_node *node)
-
 
170
{
-
 
171
	int ret;
-
 
172
 
-
 
173
	ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
-
 
174
	if (unlikely(ret != 0))
-
 
175
		goto out_err;
-
 
176
 
-
 
177
	if (!sw_context->staged_bindings) {
-
 
178
		sw_context->staged_bindings =
-
 
179
			vmw_binding_state_alloc(dev_priv);
-
 
180
		if (IS_ERR(sw_context->staged_bindings)) {
-
 
181
			DRM_ERROR("Failed to allocate context binding "
-
 
182
				  "information.\n");
-
 
183
			ret = PTR_ERR(sw_context->staged_bindings);
-
 
184
			sw_context->staged_bindings = NULL;
-
 
185
			goto out_err;
-
 
186
		}
-
 
187
	}
-
 
188
 
-
 
189
	if (sw_context->staged_bindings_inuse) {
-
 
190
		node->staged_bindings = vmw_binding_state_alloc(dev_priv);
-
 
191
		if (IS_ERR(node->staged_bindings)) {
-
 
192
			DRM_ERROR("Failed to allocate context binding "
-
 
193
				  "information.\n");
-
 
194
			ret = PTR_ERR(node->staged_bindings);
-
 
195
			node->staged_bindings = NULL;
-
 
196
			goto out_err;
-
 
197
		}
-
 
198
	} else {
-
 
199
		node->staged_bindings = sw_context->staged_bindings;
-
 
200
		sw_context->staged_bindings_inuse = true;
-
 
201
	}
Line 125... Line 202...
125
			val->new_backup_offset);
202
 
126
		vmw_dmabuf_unreference(&val->new_backup);
203
	return 0;
127
	}
204
out_err:
128
}
205
	return ret;
Line 139... Line 216...
139
 */
216
 */
140
static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
217
static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
141
				struct vmw_resource *res,
218
				struct vmw_resource *res,
142
				struct vmw_resource_val_node **p_node)
219
				struct vmw_resource_val_node **p_node)
143
{
220
{
-
 
221
	struct vmw_private *dev_priv = res->dev_priv;
144
	struct vmw_resource_val_node *node;
222
	struct vmw_resource_val_node *node;
145
	struct drm_hash_item *hash;
223
	struct drm_hash_item *hash;
146
	int ret;
224
	int ret;
Line 147... Line 225...
147
 
225
 
Line 167... Line 245...
167
		DRM_ERROR("Failed to initialize a resource validation "
245
		DRM_ERROR("Failed to initialize a resource validation "
168
			  "entry.\n");
246
			  "entry.\n");
169
		kfree(node);
247
		kfree(node);
170
		return ret;
248
		return ret;
171
	}
249
	}
172
	list_add_tail(&node->head, &sw_context->resource_list);
-
 
173
	node->res = vmw_resource_reference(res);
250
	node->res = vmw_resource_reference(res);
174
	node->first_usage = true;
251
	node->first_usage = true;
175
 
-
 
176
	if (unlikely(p_node != NULL))
252
	if (unlikely(p_node != NULL))
177
		*p_node = node;
253
		*p_node = node;
Line -... Line 254...
-
 
254
 
-
 
255
	if (!dev_priv->has_mob) {
178
 
256
		list_add_tail(&node->head, &sw_context->resource_list);
179
	return 0;
257
		return 0;
Line -... Line 258...
-
 
258
	}
-
 
259
 
-
 
260
	switch (vmw_res_type(res)) {
-
 
261
	case vmw_res_context:
-
 
262
	case vmw_res_dx_context:
-
 
263
		list_add(&node->head, &sw_context->ctx_resource_list);
-
 
264
		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
-
 
265
		break;
-
 
266
	case vmw_res_cotable:
-
 
267
		list_add_tail(&node->head, &sw_context->ctx_resource_list);
-
 
268
		break;
-
 
269
	default:
-
 
270
		list_add_tail(&node->head, &sw_context->resource_list);
-
 
271
		break;
-
 
272
	}
-
 
273
 
-
 
274
	return ret;
-
 
275
}
-
 
276
 
-
 
277
/**
-
 
278
 * vmw_view_res_val_add - Add a view and the surface it's pointing to
-
 
279
 * to the validation list
-
 
280
 *
-
 
281
 * @sw_context: The software context holding the validation list.
-
 
282
 * @view: Pointer to the view resource.
-
 
283
 *
-
 
284
 * Returns 0 if success, negative error code otherwise.
-
 
285
 */
-
 
286
static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
-
 
287
				struct vmw_resource *view)
-
 
288
{
-
 
289
	int ret;
-
 
290
 
-
 
291
	/*
-
 
292
	 * First add the resource the view is pointing to, otherwise
-
 
293
	 * it may be swapped out when the view is validated.
-
 
294
	 */
-
 
295
	ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
-
 
296
	if (ret)
-
 
297
		return ret;
-
 
298
 
-
 
299
	return vmw_resource_val_add(sw_context, view, NULL);
-
 
300
}
-
 
301
 
-
 
302
/**
-
 
303
 * vmw_view_id_val_add - Look up a view and add it and the surface it's
-
 
304
 * pointing to to the validation list.
-
 
305
 *
-
 
306
 * @sw_context: The software context holding the validation list.
-
 
307
 * @view_type: The view type to look up.
-
 
308
 * @id: view id of the view.
-
 
309
 *
-
 
310
 * The view is represented by a view id and the DX context it's created on,
-
 
311
 * or scheduled for creation on. If there is no DX context set, the function
-
 
312
 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
-
 
313
 */
-
 
314
static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
-
 
315
			       enum vmw_view_type view_type, u32 id)
-
 
316
{
-
 
317
	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
-
 
318
	struct vmw_resource *view;
-
 
319
	int ret;
-
 
320
 
-
 
321
	if (!ctx_node) {
-
 
322
		DRM_ERROR("DX Context not set.\n");
-
 
323
		return -EINVAL;
-
 
324
	}
-
 
325
 
-
 
326
	view = vmw_view_lookup(sw_context->man, view_type, id);
-
 
327
	if (IS_ERR(view))
-
 
328
		return PTR_ERR(view);
-
 
329
 
-
 
330
	ret = vmw_view_res_val_add(sw_context, view);
-
 
331
	vmw_resource_unreference(&view);
-
 
332
 
-
 
333
	return ret;
180
}
334
}
181
 
335
 
182
/**
336
/**
183
 * vmw_resource_context_res_add - Put resources previously bound to a context on
337
 * vmw_resource_context_res_add - Put resources previously bound to a context on
184
 * the validation list
338
 * the validation list
Line 193... Line 347...
193
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
347
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
194
					struct vmw_sw_context *sw_context,
348
					struct vmw_sw_context *sw_context,
195
					struct vmw_resource *ctx)
349
					struct vmw_resource *ctx)
196
{
350
{
197
	struct list_head *binding_list;
351
	struct list_head *binding_list;
198
	struct vmw_ctx_binding *entry;
352
	struct vmw_ctx_bindinfo *entry;
199
	int ret = 0;
353
	int ret = 0;
200
	struct vmw_resource *res;
354
	struct vmw_resource *res;
-
 
355
	u32 i;
-
 
356
 
-
 
357
	/* Add all cotables to the validation list. */
-
 
358
	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
-
 
359
		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
-
 
360
			res = vmw_context_cotable(ctx, i);
-
 
361
			if (IS_ERR(res))
-
 
362
				continue;
Line -... Line 363...
-
 
363
 
-
 
364
			ret = vmw_resource_val_add(sw_context, res, NULL);
-
 
365
			vmw_resource_unreference(&res);
-
 
366
			if (unlikely(ret != 0))
-
 
367
				return ret;
-
 
368
		}
-
 
369
	}
-
 
370
 
-
 
371
 
201
 
372
	/* Add all resources bound to the context to the validation list */
202
	mutex_lock(&dev_priv->binding_mutex);
373
	mutex_lock(&dev_priv->binding_mutex);
Line 203... Line 374...
203
	binding_list = vmw_context_binding_list(ctx);
374
	binding_list = vmw_context_binding_list(ctx);
-
 
375
 
204
 
376
	list_for_each_entry(entry, binding_list, ctx_list) {
205
	list_for_each_entry(entry, binding_list, ctx_list) {
377
		/* entry->res is not refcounted */
206
		res = vmw_resource_reference_unless_doomed(entry->bi.res);
378
		res = vmw_resource_reference_unless_doomed(entry->res);
Line -... Line 379...
-
 
379
		if (unlikely(res == NULL))
-
 
380
			continue;
-
 
381
 
207
		if (unlikely(res == NULL))
382
		if (vmw_res_type(entry->res) == vmw_res_view)
-
 
383
			ret = vmw_view_res_val_add(sw_context, entry->res);
208
			continue;
384
		else
209
 
385
			ret = vmw_resource_val_add(sw_context, entry->res,
210
		ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
386
						   NULL);
211
		vmw_resource_unreference(&res);
387
		vmw_resource_unreference(&res);
Line -... Line 388...
-
 
388
		if (unlikely(ret != 0))
-
 
389
			break;
-
 
390
	}
-
 
391
 
-
 
392
	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
-
 
393
		struct vmw_dma_buffer *dx_query_mob;
-
 
394
 
-
 
395
		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
-
 
396
		if (dx_query_mob)
-
 
397
			ret = vmw_bo_to_validate_list(sw_context,
212
		if (unlikely(ret != 0))
398
						      dx_query_mob,
213
			break;
399
						      true, NULL);
214
	}
400
	}
Line 215... Line 401...
215
 
401
 
Line 306... Line 492...
306
 *
492
 *
307
 * Returns -EINVAL if the limit of number of buffer objects per command
493
 * Returns -EINVAL if the limit of number of buffer objects per command
308
 * submission is reached.
494
 * submission is reached.
309
 */
495
 */
310
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
496
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
311
				   struct ttm_buffer_object *bo,
497
				   struct vmw_dma_buffer *vbo,
312
				   bool validate_as_mob,
498
				   bool validate_as_mob,
313
				   uint32_t *p_val_node)
499
				   uint32_t *p_val_node)
314
{
500
{
315
	uint32_t val_node;
501
	uint32_t val_node;
316
	struct vmw_validate_buffer *vval_buf;
502
	struct vmw_validate_buffer *vval_buf;
317
	struct ttm_validate_buffer *val_buf;
503
	struct ttm_validate_buffer *val_buf;
318
	struct drm_hash_item *hash;
504
	struct drm_hash_item *hash;
319
	int ret;
505
	int ret;
Line 320... Line 506...
320
 
506
 
321
	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
507
	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
322
				    &hash) == 0)) {
508
				    &hash) == 0)) {
323
		vval_buf = container_of(hash, struct vmw_validate_buffer,
509
		vval_buf = container_of(hash, struct vmw_validate_buffer,
324
					hash);
510
					hash);
325
		if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
511
		if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
Line 334... Line 520...
334
			DRM_ERROR("Max number of DMA buffers per submission "
520
			DRM_ERROR("Max number of DMA buffers per submission "
335
				  "exceeded.\n");
521
				  "exceeded.\n");
336
			return -EINVAL;
522
			return -EINVAL;
337
		}
523
		}
338
		vval_buf = &sw_context->val_bufs[val_node];
524
		vval_buf = &sw_context->val_bufs[val_node];
339
		vval_buf->hash.key = (unsigned long) bo;
525
		vval_buf->hash.key = (unsigned long) vbo;
340
		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
526
		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
341
		if (unlikely(ret != 0)) {
527
		if (unlikely(ret != 0)) {
342
			DRM_ERROR("Failed to initialize a buffer validation "
528
			DRM_ERROR("Failed to initialize a buffer validation "
343
				  "entry.\n");
529
				  "entry.\n");
344
			return ret;
530
			return ret;
345
		}
531
		}
346
		++sw_context->cur_val_buf;
532
		++sw_context->cur_val_buf;
347
		val_buf = &vval_buf->base;
533
		val_buf = &vval_buf->base;
348
		val_buf->bo = ttm_bo_reference(bo);
534
		val_buf->bo = ttm_bo_reference(&vbo->base);
349
		val_buf->reserved = false;
535
		val_buf->shared = false;
350
		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
536
		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
351
		vval_buf->validate_as_mob = validate_as_mob;
537
		vval_buf->validate_as_mob = validate_as_mob;
352
	}
538
	}
Line 353... Line -...
353
 
-
 
354
	sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
-
 
355
 
539
 
356
	if (p_val_node)
540
	if (p_val_node)
Line 357... Line 541...
357
		*p_val_node = val_node;
541
		*p_val_node = val_node;
358
 
542
 
Line 370... Line 554...
370
 * since only a single thread at once will attempt this.
554
 * since only a single thread at once will attempt this.
371
 */
555
 */
372
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
556
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
373
{
557
{
374
	struct vmw_resource_val_node *val;
558
	struct vmw_resource_val_node *val;
375
	int ret;
559
	int ret = 0;
Line 376... Line 560...
376
 
560
 
377
	list_for_each_entry(val, &sw_context->resource_list, head) {
561
	list_for_each_entry(val, &sw_context->resource_list, head) {
Line 378... Line 562...
378
		struct vmw_resource *res = val->res;
562
		struct vmw_resource *res = val->res;
379
 
563
 
380
		ret = vmw_resource_reserve(res, val->no_buffer_needed);
564
		ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
Line 381... Line 565...
381
		if (unlikely(ret != 0))
565
		if (unlikely(ret != 0))
382
			return ret;
566
			return ret;
Line 383... Line 567...
383
 
567
 
384
		if (res->backup) {
568
		if (res->backup) {
385
			struct ttm_buffer_object *bo = &res->backup->base;
569
			struct vmw_dma_buffer *vbo = res->backup;
Line 386... Line 570...
386
 
570
 
387
			ret = vmw_bo_to_validate_list
571
			ret = vmw_bo_to_validate_list
388
				(sw_context, bo,
572
				(sw_context, vbo,
389
				 vmw_resource_needs_backup(res), NULL);
573
				 vmw_resource_needs_backup(res), NULL);
-
 
574
 
-
 
575
			if (unlikely(ret != 0))
-
 
576
				return ret;
-
 
577
		}
-
 
578
	}
-
 
579
 
-
 
580
	if (sw_context->dx_query_mob) {
-
 
581
		struct vmw_dma_buffer *expected_dx_query_mob;
-
 
582
 
-
 
583
		expected_dx_query_mob =
-
 
584
			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
-
 
585
		if (expected_dx_query_mob &&
390
 
586
		    expected_dx_query_mob != sw_context->dx_query_mob) {
391
			if (unlikely(ret != 0))
587
			ret = -EINVAL;
Line 392... Line 588...
392
				return ret;
588
		}
393
		}
589
	}
394
	}
590
 
Line 409... Line 605...
409
	struct vmw_resource_val_node *val;
605
	struct vmw_resource_val_node *val;
410
	int ret;
606
	int ret;
Line 411... Line 607...
411
 
607
 
412
	list_for_each_entry(val, &sw_context->resource_list, head) {
608
	list_for_each_entry(val, &sw_context->resource_list, head) {
-
 
609
		struct vmw_resource *res = val->res;
Line 413... Line 610...
413
		struct vmw_resource *res = val->res;
610
		struct vmw_dma_buffer *backup = res->backup;
414
 
611
 
415
		ret = vmw_resource_validate(res);
612
		ret = vmw_resource_validate(res);
416
		if (unlikely(ret != 0)) {
613
		if (unlikely(ret != 0)) {
417
			if (ret != -ERESTARTSYS)
614
			if (ret != -ERESTARTSYS)
418
				DRM_ERROR("Failed to validate resource.\n");
615
				DRM_ERROR("Failed to validate resource.\n");
-
 
616
			return ret;
-
 
617
		}
-
 
618
 
-
 
619
		/* Check if the resource switched backup buffer */
-
 
620
		if (backup && res->backup && (backup != res->backup)) {
-
 
621
			struct vmw_dma_buffer *vbo = res->backup;
-
 
622
 
-
 
623
			ret = vmw_bo_to_validate_list
-
 
624
				(sw_context, vbo,
-
 
625
				 vmw_resource_needs_backup(res), NULL);
-
 
626
			if (ret) {
-
 
627
				ttm_bo_unreserve(&vbo->base);
-
 
628
				return ret;
419
			return ret;
629
			}
420
		}
630
		}
421
	}
631
	}
Line 422... Line -...
422
	return 0;
-
 
423
}
632
	return 0;
424
 
633
}
425
 
634
 
426
/**
635
/**
427
 * vmw_cmd_res_reloc_add - Add a resource to a software context's
636
 * vmw_cmd_res_reloc_add - Add a resource to a software context's
428
 * relocation- and validation lists.
637
 * relocation- and validation lists.
429
 *
-
 
430
 * @dev_priv: Pointer to a struct vmw_private identifying the device.
638
 *
431
 * @sw_context: Pointer to the software context.
639
 * @dev_priv: Pointer to a struct vmw_private identifying the device.
432
 * @res_type: Resource type.
640
 * @sw_context: Pointer to the software context.
433
 * @id_loc: Pointer to where the id that needs translation is located.
641
 * @id_loc: Pointer to where the id that needs translation is located.
434
 * @res: Valid pointer to a struct vmw_resource.
642
 * @res: Valid pointer to a struct vmw_resource.
435
 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
643
 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
436
 * used for this resource is returned here.
644
 * used for this resource is returned here.
437
 */
-
 
438
static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
645
 */
439
				 struct vmw_sw_context *sw_context,
646
static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
440
				 enum vmw_res_type res_type,
647
				 struct vmw_sw_context *sw_context,
441
				 uint32_t *id_loc,
648
				 uint32_t *id_loc,
442
				 struct vmw_resource *res,
649
				 struct vmw_resource *res,
Line 448... Line 655...
448
	*p_val = NULL;
655
	*p_val = NULL;
449
	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
656
	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
450
					  res,
657
					  res,
451
					  id_loc - sw_context->buf_start);
658
					  id_loc - sw_context->buf_start);
452
	if (unlikely(ret != 0))
659
	if (unlikely(ret != 0))
453
		goto out_err;
660
		return ret;
Line 454... Line 661...
454
 
661
 
455
	ret = vmw_resource_val_add(sw_context, res, &node);
662
	ret = vmw_resource_val_add(sw_context, res, &node);
456
	if (unlikely(ret != 0))
663
	if (unlikely(ret != 0))
457
		goto out_err;
-
 
458
 
-
 
459
	if (res_type == vmw_res_context && dev_priv->has_mob &&
-
 
460
	    node->first_usage) {
-
 
461
 
-
 
462
		/*
-
 
463
		 * Put contexts first on the list to be able to exit
-
 
464
		 * list traversal for contexts early.
-
 
465
		 */
-
 
466
		list_del(&node->head);
-
 
467
		list_add(&node->head, &sw_context->resource_list);
-
 
468
 
-
 
469
		ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
-
 
470
		if (unlikely(ret != 0))
-
 
471
			goto out_err;
-
 
472
		node->staged_bindings =
-
 
473
			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
-
 
474
		if (node->staged_bindings == NULL) {
-
 
475
			DRM_ERROR("Failed to allocate context binding "
-
 
476
				  "information.\n");
-
 
477
			goto out_err;
-
 
478
		}
-
 
479
		INIT_LIST_HEAD(&node->staged_bindings->list);
-
 
Line 480... Line 664...
480
	}
664
		return ret;
481
 
665
 
Line 482... Line -...
482
	if (p_val)
-
 
483
		*p_val = node;
666
	if (p_val)
484
 
667
		*p_val = node;
Line 485... Line 668...
485
out_err:
668
 
486
	return ret;
669
	return 0;
Line 547... Line 730...
547
                         converter,
730
					      converter,
548
                         &res);
731
					      &res);
549
	if (unlikely(ret != 0)) {
732
	if (unlikely(ret != 0)) {
550
		DRM_ERROR("Could not find or use resource 0x%08x.\n",
733
		DRM_ERROR("Could not find or use resource 0x%08x.\n",
551
			  (unsigned) *id_loc);
734
			  (unsigned) *id_loc);
552
..		dump_stack();
735
//       dump_stack();
553
		return ret;
736
		return ret;
554
	}
737
	}
Line 555... Line 738...
555
 
738
 
556
	rcache->valid = true;
739
	rcache->valid = true;
557
	rcache->res = res;
740
	rcache->res = res;
Line 558... Line 741...
558
	rcache->handle = *id_loc;
741
	rcache->handle = *id_loc;
559
 
742
 
560
	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
743
	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
561
				    res, &node);
744
				    res, &node);
Line 562... Line 745...
562
	if (unlikely(ret != 0))
745
	if (unlikely(ret != 0))
Line 574... Line 757...
574
 
757
 
575
	return ret;
758
	return ret;
Line 576... Line 759...
576
}
759
}
-
 
760
 
-
 
761
/**
-
 
762
 * vmw_rebind_dx_query - Rebind DX query associated with the context
-
 
763
 *
-
 
764
 * @ctx_res: context the query belongs to
-
 
765
 *
-
 
766
 * This function assumes binding_mutex is held.
-
 
767
 */
-
 
768
static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
-
 
769
{
-
 
770
	struct vmw_private *dev_priv = ctx_res->dev_priv;
-
 
771
	struct vmw_dma_buffer *dx_query_mob;
-
 
772
	struct {
-
 
773
		SVGA3dCmdHeader header;
-
 
774
		SVGA3dCmdDXBindAllQuery body;
-
 
775
	} *cmd;
-
 
776
 
-
 
777
 
-
 
778
	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
-
 
779
 
-
 
780
	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
-
 
781
		return 0;
-
 
782
 
-
 
783
	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
-
 
784
 
-
 
785
	if (cmd == NULL) {
-
 
786
		DRM_ERROR("Failed to rebind queries.\n");
-
 
787
		return -ENOMEM;
-
 
788
	}
-
 
789
 
-
 
790
	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
-
 
791
	cmd->header.size = sizeof(cmd->body);
-
 
792
	cmd->body.cid = ctx_res->id;
-
 
793
	cmd->body.mobid = dx_query_mob->base.mem.start;
-
 
794
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
 
795
 
-
 
796
	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
-
 
797
 
-
 
798
	return 0;
-
 
799
}
577
 
800
 
578
/**
801
/**
579
 * vmw_rebind_contexts - Rebind all resources previously bound to
802
 * vmw_rebind_contexts - Rebind all resources previously bound to
580
 * referenced contexts.
803
 * referenced contexts.
581
 *
804
 *
Line 590... Line 813...
590
 
813
 
591
	list_for_each_entry(val, &sw_context->resource_list, head) {
814
	list_for_each_entry(val, &sw_context->resource_list, head) {
592
		if (unlikely(!val->staged_bindings))
815
		if (unlikely(!val->staged_bindings))
Line -... Line 816...
-
 
816
			break;
593
			break;
817
 
594
 
818
		ret = vmw_binding_rebind_all
595
		ret = vmw_context_rebind_all(val->res);
819
			(vmw_context_binding_state(val->res));
596
		if (unlikely(ret != 0)) {
820
		if (unlikely(ret != 0)) {
597
			if (ret != -ERESTARTSYS)
821
			if (ret != -ERESTARTSYS)
598
				DRM_ERROR("Failed to rebind context.\n");
822
				DRM_ERROR("Failed to rebind context.\n");
-
 
823
			return ret;
-
 
824
		}
-
 
825
 
-
 
826
		ret = vmw_rebind_all_dx_query(val->res);
-
 
827
		if (ret != 0)
-
 
828
			return ret;
-
 
829
	}
-
 
830
 
-
 
831
	return 0;
-
 
832
}
-
 
833
 
-
 
834
/**
-
 
835
 * vmw_view_bindings_add - Add an array of view bindings to a context
-
 
836
 * binding state tracker.
-
 
837
 *
-
 
838
 * @sw_context: The execbuf state used for this command.
-
 
839
 * @view_type: View type for the bindings.
-
 
840
 * @binding_type: Binding type for the bindings.
-
 
841
 * @shader_slot: The shader slot to user for the bindings.
-
 
842
 * @view_ids: Array of view ids to be bound.
-
 
843
 * @num_views: Number of view ids in @view_ids.
-
 
844
 * @first_slot: The binding slot to be used for the first view id in @view_ids.
-
 
845
 */
-
 
846
static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
-
 
847
				 enum vmw_view_type view_type,
-
 
848
				 enum vmw_ctx_binding_type binding_type,
-
 
849
				 uint32 shader_slot,
-
 
850
				 uint32 view_ids[], u32 num_views,
-
 
851
				 u32 first_slot)
-
 
852
{
-
 
853
	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
-
 
854
	struct vmw_cmdbuf_res_manager *man;
-
 
855
	u32 i;
-
 
856
	int ret;
-
 
857
 
-
 
858
	if (!ctx_node) {
-
 
859
		DRM_ERROR("DX Context not set.\n");
-
 
860
		return -EINVAL;
-
 
861
	}
-
 
862
 
-
 
863
	man = sw_context->man;
-
 
864
	for (i = 0; i < num_views; ++i) {
-
 
865
		struct vmw_ctx_bindinfo_view binding;
-
 
866
		struct vmw_resource *view = NULL;
-
 
867
 
-
 
868
		if (view_ids[i] != SVGA3D_INVALID_ID) {
-
 
869
			view = vmw_view_lookup(man, view_type, view_ids[i]);
-
 
870
			if (IS_ERR(view)) {
-
 
871
				DRM_ERROR("View not found.\n");
-
 
872
				return PTR_ERR(view);
-
 
873
			}
-
 
874
 
-
 
875
			ret = vmw_view_res_val_add(sw_context, view);
-
 
876
			if (ret) {
-
 
877
				DRM_ERROR("Could not add view to "
-
 
878
					  "validation list.\n");
-
 
879
				vmw_resource_unreference(&view);
-
 
880
				return ret;
-
 
881
			}
-
 
882
		}
-
 
883
		binding.bi.ctx = ctx_node->res;
-
 
884
		binding.bi.res = view;
-
 
885
		binding.bi.bt = binding_type;
-
 
886
		binding.shader_slot = shader_slot;
-
 
887
		binding.slot = first_slot + i;
-
 
888
		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
-
 
889
				shader_slot, binding.slot);
599
			return ret;
890
		if (view)
Line 600... Line 891...
600
		}
891
			vmw_resource_unreference(&view);
601
	}
892
	}
Line 639... Line 930...
639
	struct vmw_resource_val_node *res_node;
930
	struct vmw_resource_val_node *res_node;
640
	int ret;
931
	int ret;
Line 641... Line 932...
641
 
932
 
Line -... Line 933...
-
 
933
	cmd = container_of(header, struct vmw_sid_cmd, header);
-
 
934
 
-
 
935
	if (cmd->body.type >= SVGA3D_RT_MAX) {
-
 
936
		DRM_ERROR("Illegal render target type %u.\n",
-
 
937
			  (unsigned) cmd->body.type);
-
 
938
		return -EINVAL;
642
	cmd = container_of(header, struct vmw_sid_cmd, header);
939
	}
643
 
940
 
644
	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
941
	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
645
				user_context_converter, &cmd->body.cid,
942
				user_context_converter, &cmd->body.cid,
646
				&ctx_node);
943
				&ctx_node);
Line 652... Line 949...
652
				&cmd->body.target.sid, &res_node);
949
				&cmd->body.target.sid, &res_node);
653
	if (unlikely(ret != 0))
950
	if (unlikely(ret != 0))
654
	return ret;
951
		return ret;
Line 655... Line 952...
655
 
952
 
656
	if (dev_priv->has_mob) {
953
	if (dev_priv->has_mob) {
Line 657... Line 954...
657
		struct vmw_ctx_bindinfo bi;
954
		struct vmw_ctx_bindinfo_view binding;
658
 
955
 
659
		bi.ctx = ctx_node->res;
956
		binding.bi.ctx = ctx_node->res;
660
		bi.res = res_node ? res_node->res : NULL;
957
		binding.bi.res = res_node ? res_node->res : NULL;
661
		bi.bt = vmw_ctx_binding_rt;
958
		binding.bi.bt = vmw_ctx_binding_rt;
-
 
959
		binding.slot = cmd->body.type;
662
		bi.i1.rt_type = cmd->body.type;
960
		vmw_binding_add(ctx_node->staged_bindings,
Line 663... Line 961...
663
		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
961
				&binding.bi, 0, binding.slot);
664
	}
962
	}
Line 675... Line 973...
675
		SVGA3dCmdSurfaceCopy body;
973
		SVGA3dCmdSurfaceCopy body;
676
	} *cmd;
974
	} *cmd;
677
	int ret;
975
	int ret;
Line 678... Line 976...
678
 
976
 
-
 
977
	cmd = container_of(header, struct vmw_sid_cmd, header);
679
	cmd = container_of(header, struct vmw_sid_cmd, header);
978
 
680
	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
979
	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
681
				user_surface_converter,
980
				user_surface_converter,
682
				&cmd->body.src.sid, NULL);
981
				&cmd->body.src.sid, NULL);
683
	if (unlikely(ret != 0))
982
	if (ret)
-
 
983
		return ret;
684
		return ret;
984
 
685
	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
985
	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
686
				 user_surface_converter,
986
				 user_surface_converter,
687
				 &cmd->body.dest.sid, NULL);
987
				 &cmd->body.dest.sid, NULL);
Line -... Line 988...
-
 
988
}
-
 
989
 
-
 
990
static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
-
 
991
				      struct vmw_sw_context *sw_context,
-
 
992
				      SVGA3dCmdHeader *header)
-
 
993
{
-
 
994
	struct {
-
 
995
		SVGA3dCmdHeader header;
-
 
996
		SVGA3dCmdDXBufferCopy body;
-
 
997
	} *cmd;
-
 
998
	int ret;
-
 
999
 
-
 
1000
	cmd = container_of(header, typeof(*cmd), header);
-
 
1001
	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
-
 
1002
				user_surface_converter,
-
 
1003
				&cmd->body.src, NULL);
-
 
1004
	if (ret != 0)
-
 
1005
		return ret;
-
 
1006
 
-
 
1007
	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
-
 
1008
				 user_surface_converter,
-
 
1009
				 &cmd->body.dest, NULL);
-
 
1010
}
-
 
1011
 
-
 
1012
static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
-
 
1013
				   struct vmw_sw_context *sw_context,
-
 
1014
				   SVGA3dCmdHeader *header)
-
 
1015
{
-
 
1016
	struct {
-
 
1017
		SVGA3dCmdHeader header;
-
 
1018
		SVGA3dCmdDXPredCopyRegion body;
-
 
1019
	} *cmd;
-
 
1020
	int ret;
-
 
1021
 
-
 
1022
	cmd = container_of(header, typeof(*cmd), header);
-
 
1023
	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
-
 
1024
				user_surface_converter,
-
 
1025
				&cmd->body.srcSid, NULL);
-
 
1026
	if (ret != 0)
-
 
1027
		return ret;
-
 
1028
 
-
 
1029
	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
-
 
1030
				 user_surface_converter,
-
 
1031
				 &cmd->body.dstSid, NULL);
688
}
1032
}
689
 
1033
 
690
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1034
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
691
				     struct vmw_sw_context *sw_context,
1035
				     struct vmw_sw_context *sw_context,
692
				     SVGA3dCmdHeader *header)
1036
				     SVGA3dCmdHeader *header)
Line 753... Line 1097...
753
 * results. If so, the function prepares the state of @sw_context for
1097
 * results. If so, the function prepares the state of @sw_context for
754
 * switching pinned buffers after successful submission of the current
1098
 * switching pinned buffers after successful submission of the current
755
 * command batch.
1099
 * command batch.
756
 */
1100
 */
757
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1101
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
758
				       struct ttm_buffer_object *new_query_bo,
1102
				       struct vmw_dma_buffer *new_query_bo,
759
				       struct vmw_sw_context *sw_context)
1103
				       struct vmw_sw_context *sw_context)
760
{
1104
{
761
	struct vmw_res_cache_entry *ctx_entry =
1105
	struct vmw_res_cache_entry *ctx_entry =
762
		&sw_context->res_cache[vmw_res_context];
1106
		&sw_context->res_cache[vmw_res_context];
763
	int ret;
1107
	int ret;
Line 765... Line 1109...
765
	BUG_ON(!ctx_entry->valid);
1109
	BUG_ON(!ctx_entry->valid);
766
	sw_context->last_query_ctx = ctx_entry->res;
1110
	sw_context->last_query_ctx = ctx_entry->res;
Line 767... Line 1111...
767
 
1111
 
Line 768... Line 1112...
768
	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1112
	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
769
 
1113
 
770
		if (unlikely(new_query_bo->num_pages > 4)) {
1114
		if (unlikely(new_query_bo->base.num_pages > 4)) {
771
			DRM_ERROR("Query buffer too large.\n");
1115
			DRM_ERROR("Query buffer too large.\n");
Line 772... Line 1116...
772
			return -EINVAL;
1116
			return -EINVAL;
Line 834... Line 1178...
834
			DRM_ERROR("Out of fifo space for dummy query.\n");
1178
			DRM_ERROR("Out of fifo space for dummy query.\n");
835
	}
1179
	}
Line 836... Line 1180...
836
 
1180
 
837
	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1181
	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
838
		if (dev_priv->pinned_bo) {
1182
		if (dev_priv->pinned_bo) {
839
			vmw_bo_pin(dev_priv->pinned_bo, false);
1183
			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
840
			ttm_bo_unref(&dev_priv->pinned_bo);
1184
			vmw_dmabuf_unreference(&dev_priv->pinned_bo);
Line 841... Line 1185...
841
		}
1185
		}
842
 
1186
 
Line 843... Line 1187...
843
		if (!sw_context->needs_post_query_barrier) {
1187
		if (!sw_context->needs_post_query_barrier) {
844
			vmw_bo_pin(sw_context->cur_query_bo, true);
1188
			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
845
 
1189
 
846
			/*
1190
			/*
847
			 * We pin also the dummy_query_bo buffer so that we
1191
			 * We pin also the dummy_query_bo buffer so that we
Line -... Line 1192...
-
 
1192
			 * don't need to validate it when emitting
848
			 * don't need to validate it when emitting
1193
			 * dummy queries in context destroy paths.
-
 
1194
			 */
849
			 * dummy queries in context destroy paths.
1195
 
-
 
1196
			if (!dev_priv->dummy_query_bo_pinned) {
Line 850... Line 1197...
850
			 */
1197
				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
851
 
1198
						    true);
852
			vmw_bo_pin(dev_priv->dummy_query_bo, true);
1199
				dev_priv->dummy_query_bo_pinned = true;
853
			dev_priv->dummy_query_bo_pinned = true;
1200
			}
854
 
1201
 
855
			BUG_ON(sw_context->last_query_ctx == NULL);
1202
			BUG_ON(sw_context->last_query_ctx == NULL);
856
			dev_priv->query_cid = sw_context->last_query_ctx->id;
1203
			dev_priv->query_cid = sw_context->last_query_ctx->id;
857
			dev_priv->query_cid_valid = true;
1204
			dev_priv->query_cid_valid = true;
Line 858... Line 1205...
858
			dev_priv->pinned_bo =
1205
			dev_priv->pinned_bo =
Line 883... Line 1230...
883
				 struct vmw_sw_context *sw_context,
1230
				 struct vmw_sw_context *sw_context,
884
				 SVGAMobId *id,
1231
				 SVGAMobId *id,
885
				 struct vmw_dma_buffer **vmw_bo_p)
1232
				 struct vmw_dma_buffer **vmw_bo_p)
886
{
1233
{
887
	struct vmw_dma_buffer *vmw_bo = NULL;
1234
	struct vmw_dma_buffer *vmw_bo = NULL;
888
	struct ttm_buffer_object *bo;
-
 
889
	uint32_t handle = *id;
1235
	uint32_t handle = *id;
890
	struct vmw_relocation *reloc;
1236
	struct vmw_relocation *reloc;
891
	int ret;
1237
	int ret;
Line 892... Line 1238...
892
 
1238
 
-
 
1239
	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
893
	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
1240
				     NULL);
894
	if (unlikely(ret != 0)) {
1241
	if (unlikely(ret != 0)) {
895
		DRM_ERROR("Could not find or use MOB buffer.\n");
1242
		DRM_ERROR("Could not find or use MOB buffer.\n");
-
 
1243
		ret = -EINVAL;
896
		return -EINVAL;
1244
		goto out_no_reloc;
897
	}
-
 
Line 898... Line 1245...
898
	bo = &vmw_bo->base;
1245
	}
899
 
1246
 
900
	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1247
	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
901
		DRM_ERROR("Max number relocations per submission"
1248
		DRM_ERROR("Max number relocations per submission"
Line 906... Line 1253...
906
 
1253
 
907
	reloc = &sw_context->relocs[sw_context->cur_reloc++];
1254
	reloc = &sw_context->relocs[sw_context->cur_reloc++];
908
	reloc->mob_loc = id;
1255
	reloc->mob_loc = id;
Line 909... Line 1256...
909
	reloc->location = NULL;
1256
	reloc->location = NULL;
910
 
1257
 
911
	ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
1258
	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
Line 912... Line 1259...
912
	if (unlikely(ret != 0))
1259
	if (unlikely(ret != 0))
913
		goto out_no_reloc;
1260
		goto out_no_reloc;
Line 914... Line 1261...
914
 
1261
 
915
	*vmw_bo_p = vmw_bo;
1262
	*vmw_bo_p = vmw_bo;
916
	return 0;
1263
	return 0;
917
 
1264
 
918
out_no_reloc:
1265
out_no_reloc:
Line 919... Line 1266...
919
	vmw_dmabuf_unreference(&vmw_bo);
1266
	vmw_dmabuf_unreference(&vmw_bo);
920
	vmw_bo_p = NULL;
1267
	*vmw_bo_p = NULL;
Line 944... Line 1291...
944
				   struct vmw_sw_context *sw_context,
1291
				   struct vmw_sw_context *sw_context,
945
				   SVGAGuestPtr *ptr,
1292
				   SVGAGuestPtr *ptr,
946
				   struct vmw_dma_buffer **vmw_bo_p)
1293
				   struct vmw_dma_buffer **vmw_bo_p)
947
{
1294
{
948
	struct vmw_dma_buffer *vmw_bo = NULL;
1295
	struct vmw_dma_buffer *vmw_bo = NULL;
949
	struct ttm_buffer_object *bo;
-
 
950
	uint32_t handle = ptr->gmrId;
1296
	uint32_t handle = ptr->gmrId;
951
	struct vmw_relocation *reloc;
1297
	struct vmw_relocation *reloc;
952
	int ret;
1298
	int ret;
Line 953... Line 1299...
953
 
1299
 
-
 
1300
	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
954
	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
1301
				     NULL);
955
	if (unlikely(ret != 0)) {
1302
	if (unlikely(ret != 0)) {
956
		DRM_ERROR("Could not find or use GMR region.\n");
1303
		DRM_ERROR("Could not find or use GMR region.\n");
-
 
1304
		ret = -EINVAL;
957
		return -EINVAL;
1305
		goto out_no_reloc;
958
	}
-
 
Line 959... Line 1306...
959
	bo = &vmw_bo->base;
1306
	}
960
 
1307
 
961
	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1308
	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
962
		DRM_ERROR("Max number relocations per submission"
1309
		DRM_ERROR("Max number relocations per submission"
Line 966... Line 1313...
966
	}
1313
	}
Line 967... Line 1314...
967
 
1314
 
968
	reloc = &sw_context->relocs[sw_context->cur_reloc++];
1315
	reloc = &sw_context->relocs[sw_context->cur_reloc++];
Line 969... Line 1316...
969
	reloc->location = ptr;
1316
	reloc->location = ptr;
970
 
1317
 
971
	ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
1318
	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
Line 972... Line 1319...
972
	if (unlikely(ret != 0))
1319
	if (unlikely(ret != 0))
973
		goto out_no_reloc;
1320
		goto out_no_reloc;
Line 974... Line 1321...
974
 
1321
 
975
	*vmw_bo_p = vmw_bo;
1322
	*vmw_bo_p = vmw_bo;
976
	return 0;
1323
	return 0;
977
 
1324
 
978
out_no_reloc:
1325
out_no_reloc:
Line -... Line 1326...
-
 
1326
	vmw_dmabuf_unreference(&vmw_bo);
-
 
1327
	*vmw_bo_p = NULL;
-
 
1328
	return ret;
-
 
1329
}
-
 
1330
 
-
 
1331
 
-
 
1332
 
-
 
1333
/**
-
 
1334
 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
-
 
1335
 *
-
 
1336
 * @dev_priv: Pointer to a device private struct.
-
 
1337
 * @sw_context: The software context used for this command submission.
-
 
1338
 * @header: Pointer to the command header in the command stream.
-
 
1339
 *
-
 
1340
 * This function adds the new query into the query COTABLE
-
 
1341
 */
-
 
1342
static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
-
 
1343
				   struct vmw_sw_context *sw_context,
-
 
1344
				   SVGA3dCmdHeader *header)
-
 
1345
{
-
 
1346
	struct vmw_dx_define_query_cmd {
-
 
1347
		SVGA3dCmdHeader header;
-
 
1348
		SVGA3dCmdDXDefineQuery q;
-
 
1349
	} *cmd;
-
 
1350
 
-
 
1351
	int    ret;
-
 
1352
	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
-
 
1353
	struct vmw_resource *cotable_res;
-
 
1354
 
-
 
1355
 
-
 
1356
	if (ctx_node == NULL) {
-
 
1357
		DRM_ERROR("DX Context not set for query.\n");
-
 
1358
		return -EINVAL;
-
 
1359
	}
-
 
1360
 
-
 
1361
	cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
-
 
1362
 
-
 
1363
	if (cmd->q.type <  SVGA3D_QUERYTYPE_MIN ||
-
 
1364
	    cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
-
 
1365
		return -EINVAL;
-
 
1366
 
-
 
1367
	cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
-
 
1368
	ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
-
 
1369
	vmw_resource_unreference(&cotable_res);
-
 
1370
 
-
 
1371
	return ret;
-
 
1372
}
-
 
1373
 
-
 
1374
 
-
 
1375
 
-
 
1376
/**
-
 
1377
 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
-
 
1378
 *
-
 
1379
 * @dev_priv: Pointer to a device private struct.
-
 
1380
 * @sw_context: The software context used for this command submission.
-
 
1381
 * @header: Pointer to the command header in the command stream.
-
 
1382
 *
-
 
1383
 * The query bind operation will eventually associate the query ID
-
 
1384
 * with its backing MOB.  In this function, we take the user mode
-
 
1385
 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
-
 
1386
 * kernel mode equivalent.
-
 
1387
 */
-
 
1388
static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
-
 
1389
				 struct vmw_sw_context *sw_context,
-
 
1390
				 SVGA3dCmdHeader *header)
-
 
1391
{
-
 
1392
	struct vmw_dx_bind_query_cmd {
-
 
1393
		SVGA3dCmdHeader header;
-
 
1394
		SVGA3dCmdDXBindQuery q;
-
 
1395
	} *cmd;
-
 
1396
 
-
 
1397
	struct vmw_dma_buffer *vmw_bo;
-
 
1398
	int    ret;
-
 
1399
 
-
 
1400
 
-
 
1401
	cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
-
 
1402
 
-
 
1403
	/*
-
 
1404
	 * Look up the buffer pointed to by q.mobid, put it on the relocation
-
 
1405
	 * list so its kernel mode MOB ID can be filled in later
-
 
1406
	 */
-
 
1407
	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
-
 
1408
				    &vmw_bo);
-
 
1409
 
-
 
1410
	if (ret != 0)
-
 
1411
		return ret;
-
 
1412
 
-
 
1413
	sw_context->dx_query_mob = vmw_bo;
-
 
1414
	sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
-
 
1415
 
-
 
1416
	vmw_dmabuf_unreference(&vmw_bo);
-
 
1417
 
979
	vmw_dmabuf_unreference(&vmw_bo);
1418
	return ret;
980
	vmw_bo_p = NULL;
1419
}
981
	return ret;
1420
 
982
}
1421
 
983
 
1422
 
Line 1073... Line 1512...
1073
				    &cmd->q.mobid,
1512
				    &cmd->q.mobid,
1074
				    &vmw_bo);
1513
				    &vmw_bo);
1075
	if (unlikely(ret != 0))
1514
	if (unlikely(ret != 0))
1076
		return ret;
1515
		return ret;
Line 1077... Line 1516...
1077
 
1516
 
Line 1078... Line 1517...
1078
	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1517
	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1079
 
1518
 
1080
	vmw_dmabuf_unreference(&vmw_bo);
1519
	vmw_dmabuf_unreference(&vmw_bo);
Line 1127... Line 1566...
1127
				      &cmd->q.guestResult,
1566
				      &cmd->q.guestResult,
1128
				      &vmw_bo);
1567
				      &vmw_bo);
1129
	if (unlikely(ret != 0))
1568
	if (unlikely(ret != 0))
1130
		return ret;
1569
		return ret;
Line 1131... Line 1570...
1131
 
1570
 
Line 1132... Line 1571...
1132
	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1571
	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1133
 
1572
 
1134
	vmw_dmabuf_unreference(&vmw_bo);
1573
	vmw_dmabuf_unreference(&vmw_bo);
Line 1361... Line 1800...
1361
 
1800
 
1362
	for (; cur_state < last_state; ++cur_state) {
1801
	for (; cur_state < last_state; ++cur_state) {
1363
		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1802
		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
Line -... Line 1803...
-
 
1803
			continue;
-
 
1804
 
-
 
1805
		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
-
 
1806
			DRM_ERROR("Illegal texture/sampler unit %u.\n",
-
 
1807
				  (unsigned) cur_state->stage);
-
 
1808
			return -EINVAL;
1364
			continue;
1809
		}
1365
 
1810
 
1366
		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1811
		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1367
					user_surface_converter,
1812
					user_surface_converter,
1368
					&cur_state->value, &res_node);
1813
					&cur_state->value, &res_node);
Line 1369... Line 1814...
1369
		if (unlikely(ret != 0))
1814
		if (unlikely(ret != 0))
1370
			return ret;
1815
			return ret;
Line 1371... Line 1816...
1371
 
1816
 
1372
		if (dev_priv->has_mob) {
1817
		if (dev_priv->has_mob) {
1373
			struct vmw_ctx_bindinfo bi;
1818
			struct vmw_ctx_bindinfo_tex binding;
1374
 
1819
 
1375
			bi.ctx = ctx_node->res;
1820
			binding.bi.ctx = ctx_node->res;
1376
			bi.res = res_node ? res_node->res : NULL;
1821
			binding.bi.res = res_node ? res_node->res : NULL;
1377
			bi.bt = vmw_ctx_binding_tex;
1822
			binding.bi.bt = vmw_ctx_binding_tex;
1378
			bi.i1.texture_stage = cur_state->stage;
1823
			binding.texture_stage = cur_state->stage;
Line 1379... Line 1824...
1379
			vmw_context_binding_add(ctx_node->staged_bindings,
1824
			vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1380
						&bi);
1825
					0, binding.texture_stage);
Line 1405... Line 1850...
1405
	vmw_dmabuf_unreference(&vmw_bo);
1850
	vmw_dmabuf_unreference(&vmw_bo);
Line 1406... Line 1851...
1406
 
1851
 
1407
	return ret;
1852
	return ret;
Line -... Line 1853...
-
 
1853
}
-
 
1854
 
-
 
1855
 
-
 
1856
/**
-
 
1857
 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
-
 
1858
 * switching
-
 
1859
 *
-
 
1860
 * @dev_priv: Pointer to a device private struct.
-
 
1861
 * @sw_context: The software context being used for this batch.
-
 
1862
 * @val_node: The validation node representing the resource.
-
 
1863
 * @buf_id: Pointer to the user-space backup buffer handle in the command
-
 
1864
 * stream.
-
 
1865
 * @backup_offset: Offset of backup into MOB.
-
 
1866
 *
-
 
1867
 * This function prepares for registering a switch of backup buffers
-
 
1868
 * in the resource metadata just prior to unreserving. It's basically a wrapper
-
 
1869
 * around vmw_cmd_res_switch_backup with a different interface.
-
 
1870
 */
-
 
1871
static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
-
 
1872
				     struct vmw_sw_context *sw_context,
-
 
1873
				     struct vmw_resource_val_node *val_node,
-
 
1874
				     uint32_t *buf_id,
-
 
1875
				     unsigned long backup_offset)
-
 
1876
{
-
 
1877
	struct vmw_dma_buffer *dma_buf;
-
 
1878
	int ret;
-
 
1879
 
-
 
1880
	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
-
 
1881
	if (ret)
-
 
1882
		return ret;
-
 
1883
 
-
 
1884
	val_node->switching_backup = true;
-
 
1885
	if (val_node->first_usage)
-
 
1886
		val_node->no_buffer_needed = true;
-
 
1887
 
-
 
1888
	vmw_dmabuf_unreference(&val_node->new_backup);
-
 
1889
	val_node->new_backup = dma_buf;
-
 
1890
	val_node->new_backup_offset = backup_offset;
-
 
1891
 
-
 
1892
	return 0;
-
 
1893
}
1408
}
1894
 
1409
 
1895
 
1410
/**
1896
/**
1411
 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1897
 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1412
 *
1898
 *
Line 1418... Line 1904...
1418
 * @buf_id: Pointer to the user-space backup buffer handle in the command
1904
 * @buf_id: Pointer to the user-space backup buffer handle in the command
1419
 * stream.
1905
 * stream.
1420
 * @backup_offset: Offset of backup into MOB.
1906
 * @backup_offset: Offset of backup into MOB.
1421
 *
1907
 *
1422
 * This function prepares for registering a switch of backup buffers
1908
 * This function prepares for registering a switch of backup buffers
1423
 * in the resource metadata just prior to unreserving.
1909
 * in the resource metadata just prior to unreserving. It's basically a wrapper
-
 
1910
 * around vmw_cmd_res_switch_backup with a different interface.
1424
 */
1911
 */
1425
static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1912
static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1426
				 struct vmw_sw_context *sw_context,
1913
				 struct vmw_sw_context *sw_context,
1427
				 enum vmw_res_type res_type,
1914
				 enum vmw_res_type res_type,
1428
				 const struct vmw_user_resource_conv
1915
				 const struct vmw_user_resource_conv
1429
				 *converter,
1916
				 *converter,
1430
				 uint32_t *res_id,
1917
				 uint32_t *res_id,
1431
				 uint32_t *buf_id,
1918
				 uint32_t *buf_id,
1432
				 unsigned long backup_offset)
1919
				 unsigned long backup_offset)
1433
{
1920
{
1434
	int ret;
-
 
1435
	struct vmw_dma_buffer *dma_buf;
-
 
1436
	struct vmw_resource_val_node *val_node;
1921
	struct vmw_resource_val_node *val_node;
-
 
1922
	int ret;
Line 1437... Line 1923...
1437
 
1923
 
1438
	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1924
	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1439
				converter, res_id, &val_node);
-
 
1440
	if (unlikely(ret != 0))
1925
				converter, res_id, &val_node);
1441
		return ret;
-
 
1442
 
-
 
1443
	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
-
 
1444
	if (unlikely(ret != 0))
1926
	if (ret)
Line 1445... Line -...
1445
		return ret;
-
 
1446
 
-
 
1447
	if (val_node->first_usage)
-
 
1448
		val_node->no_buffer_needed = true;
1927
		return ret;
1449
 
-
 
1450
	vmw_dmabuf_unreference(&val_node->new_backup);
1928
 
1451
	val_node->new_backup = dma_buf;
-
 
1452
	val_node->new_backup_offset = backup_offset;
-
 
1453
 
1929
	return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
Line 1454... Line 1930...
1454
	return 0;
1930
					 buf_id, backup_offset);
1455
}
1931
}
1456
 
1932
 
Line 1621... Line 2097...
1621
	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2097
	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1622
				 user_surface_converter,
2098
				 user_surface_converter,
1623
				 &cmd->body.sid, NULL);
2099
				 &cmd->body.sid, NULL);
1624
}
2100
}
Line -... Line 2101...
-
 
2101
 
-
 
2102
 
-
 
2103
/**
-
 
2104
 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
-
 
2105
 * command
-
 
2106
 *
-
 
2107
 * @dev_priv: Pointer to a device private struct.
-
 
2108
 * @sw_context: The software context being used for this batch.
-
 
2109
 * @header: Pointer to the command header in the command stream.
-
 
2110
 */
-
 
2111
static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
-
 
2112
				 struct vmw_sw_context *sw_context,
-
 
2113
				 SVGA3dCmdHeader *header)
-
 
2114
{
-
 
2115
	struct vmw_shader_define_cmd {
-
 
2116
		SVGA3dCmdHeader header;
-
 
2117
		SVGA3dCmdDefineShader body;
-
 
2118
	} *cmd;
-
 
2119
	int ret;
-
 
2120
	size_t size;
-
 
2121
	struct vmw_resource_val_node *val;
-
 
2122
 
-
 
2123
	cmd = container_of(header, struct vmw_shader_define_cmd,
-
 
2124
			   header);
-
 
2125
 
-
 
2126
	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
-
 
2127
				user_context_converter, &cmd->body.cid,
-
 
2128
				&val);
-
 
2129
	if (unlikely(ret != 0))
-
 
2130
		return ret;
-
 
2131
 
-
 
2132
	if (unlikely(!dev_priv->has_mob))
-
 
2133
		return 0;
-
 
2134
 
-
 
2135
	size = cmd->header.size - sizeof(cmd->body);
-
 
2136
	ret = vmw_compat_shader_add(dev_priv,
-
 
2137
				    vmw_context_res_man(val->res),
-
 
2138
				    cmd->body.shid, cmd + 1,
-
 
2139
				    cmd->body.type, size,
-
 
2140
				    &sw_context->staged_cmd_res);
-
 
2141
	if (unlikely(ret != 0))
-
 
2142
		return ret;
-
 
2143
 
-
 
2144
	return vmw_resource_relocation_add(&sw_context->res_relocations,
-
 
2145
					   NULL, &cmd->header.id -
-
 
2146
					   sw_context->buf_start);
-
 
2147
 
-
 
2148
	return 0;
-
 
2149
}
-
 
2150
 
-
 
2151
/**
-
 
2152
 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1625
 
2153
 * command
-
 
2154
 *
-
 
2155
 * @dev_priv: Pointer to a device private struct.
-
 
2156
 * @sw_context: The software context being used for this batch.
-
 
2157
 * @header: Pointer to the command header in the command stream.
-
 
2158
 */
-
 
2159
static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
-
 
2160
				  struct vmw_sw_context *sw_context,
-
 
2161
				  SVGA3dCmdHeader *header)
-
 
2162
{
-
 
2163
	struct vmw_shader_destroy_cmd {
-
 
2164
		SVGA3dCmdHeader header;
-
 
2165
		SVGA3dCmdDestroyShader body;
-
 
2166
	} *cmd;
-
 
2167
	int ret;
-
 
2168
	struct vmw_resource_val_node *val;
-
 
2169
 
-
 
2170
	cmd = container_of(header, struct vmw_shader_destroy_cmd,
-
 
2171
			   header);
-
 
2172
 
-
 
2173
	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
-
 
2174
				user_context_converter, &cmd->body.cid,
-
 
2175
				&val);
-
 
2176
	if (unlikely(ret != 0))
-
 
2177
		return ret;
-
 
2178
 
-
 
2179
	if (unlikely(!dev_priv->has_mob))
-
 
2180
		return 0;
-
 
2181
 
-
 
2182
	ret = vmw_shader_remove(vmw_context_res_man(val->res),
-
 
2183
				cmd->body.shid,
-
 
2184
				cmd->body.type,
-
 
2185
				&sw_context->staged_cmd_res);
-
 
2186
	if (unlikely(ret != 0))
-
 
2187
		return ret;
-
 
2188
 
-
 
2189
	return vmw_resource_relocation_add(&sw_context->res_relocations,
-
 
2190
					   NULL, &cmd->header.id -
-
 
2191
					   sw_context->buf_start);
-
 
2192
 
-
 
2193
	return 0;
-
 
2194
}
1626
#if 0
2195
 
1627
/**
2196
/**
1628
 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2197
 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1629
 * command
2198
 * command
1630
 *
2199
 *
Line 1639... Line 2208...
1639
	struct vmw_set_shader_cmd {
2208
	struct vmw_set_shader_cmd {
1640
		SVGA3dCmdHeader header;
2209
		SVGA3dCmdHeader header;
1641
		SVGA3dCmdSetShader body;
2210
		SVGA3dCmdSetShader body;
1642
	} *cmd;
2211
	} *cmd;
1643
	struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2212
	struct vmw_resource_val_node *ctx_node, *res_node = NULL;
1644
	struct vmw_ctx_bindinfo bi;
2213
	struct vmw_ctx_bindinfo_shader binding;
1645
	struct vmw_resource *res = NULL;
2214
	struct vmw_resource *res = NULL;
1646
	int ret;
2215
	int ret;
Line 1647... Line 2216...
1647
 
2216
 
1648
	cmd = container_of(header, struct vmw_set_shader_cmd,
2217
	cmd = container_of(header, struct vmw_set_shader_cmd,
Line -... Line 2218...
-
 
2218
			   header);
-
 
2219
 
-
 
2220
	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
-
 
2221
		DRM_ERROR("Illegal shader type %u.\n",
-
 
2222
			  (unsigned) cmd->body.type);
-
 
2223
		return -EINVAL;
1649
			   header);
2224
	}
1650
 
2225
 
1651
	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2226
	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1652
				user_context_converter, &cmd->body.cid,
2227
				user_context_converter, &cmd->body.cid,
1653
				&ctx_node);
2228
				&ctx_node);
Line 1654... Line 2229...
1654
	if (unlikely(ret != 0))
2229
	if (unlikely(ret != 0))
1655
		return ret;
2230
		return ret;
Line 1656... Line 2231...
1656
 
2231
 
1657
	if (!dev_priv->has_mob)
-
 
1658
		return 0;
2232
	if (!dev_priv->has_mob)
1659
 
2233
		return 0;
1660
	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2234
 
Line 1661... Line 2235...
1661
		res = vmw_compat_shader_lookup
2235
	if (cmd->body.shid != SVGA3D_INVALID_ID) {
1662
			(vmw_context_res_man(ctx_node->res),
2236
		res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
1663
			 cmd->body.shid,
-
 
1664
			 cmd->body.type);
2237
					cmd->body.shid,
1665
 
2238
					cmd->body.type);
1666
		if (!IS_ERR(res)) {
2239
 
1667
			ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2240
		if (!IS_ERR(res)) {
1668
						    vmw_res_shader,
2241
			ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
Line 1681... Line 2254...
1681
					&cmd->body.shid, &res_node);
2254
					&cmd->body.shid, &res_node);
1682
	if (unlikely(ret != 0))
2255
		if (unlikely(ret != 0))
1683
		return ret;
2256
			return ret;
1684
	}
2257
	}
Line 1685... Line 2258...
1685
 
2258
 
1686
		bi.ctx = ctx_node->res;
2259
	binding.bi.ctx = ctx_node->res;
1687
		bi.res = res_node ? res_node->res : NULL;
2260
	binding.bi.res = res_node ? res_node->res : NULL;
1688
		bi.bt = vmw_ctx_binding_shader;
2261
	binding.bi.bt = vmw_ctx_binding_shader;
1689
		bi.i1.shader_type = cmd->body.type;
2262
	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
-
 
2263
	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
-
 
2264
			binding.shader_slot, 0);
1690
		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
2265
	return 0;
1691
}
-
 
Line 1692... Line 2266...
1692
#endif
2266
}
1693
 
2267
 
1694
/**
2268
/**
1695
 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2269
 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
Line 1722... Line 2296...
1722
		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2296
		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
Line 1723... Line 2297...
1723
 
2297
 
1724
	return 0;
2298
	return 0;
Line 1725... Line -...
1725
}
-
 
1726
 
2299
}
1727
#if 0
2300
 
1728
/**
2301
/**
1729
 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2302
 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1730
 * command
2303
 * command
Line 1748... Line 2321...
1748
	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2321
	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1749
				     user_shader_converter,
2322
				     user_shader_converter,
1750
				     &cmd->body.shid, &cmd->body.mobid,
2323
				     &cmd->body.shid, &cmd->body.mobid,
1751
				     cmd->body.offsetInBytes);
2324
				     cmd->body.offsetInBytes);
1752
}
2325
}
-
 
2326
 
-
 
2327
/**
-
 
2328
 * vmw_cmd_dx_set_single_constant_buffer - Validate an
-
 
2329
 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
-
 
2330
 *
-
 
2331
 * @dev_priv: Pointer to a device private struct.
-
 
2332
 * @sw_context: The software context being used for this batch.
-
 
2333
 * @header: Pointer to the command header in the command stream.
-
 
2334
 */
-
 
2335
static int
-
 
2336
vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
-
 
2337
				      struct vmw_sw_context *sw_context,
-
 
2338
				      SVGA3dCmdHeader *header)
-
 
2339
{
-
 
2340
	struct {
-
 
2341
		SVGA3dCmdHeader header;
-
 
2342
		SVGA3dCmdDXSetSingleConstantBuffer body;
-
 
2343
	} *cmd;
-
 
2344
	struct vmw_resource_val_node *res_node = NULL;
-
 
2345
	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
-
 
2346
	struct vmw_ctx_bindinfo_cb binding;
-
 
2347
	int ret;
-
 
2348
 
-
 
2349
	if (unlikely(ctx_node == NULL)) {
-
 
2350
		DRM_ERROR("DX Context not set.\n");
-
 
2351
		return -EINVAL;
-
 
2352
	}
-
 
2353
 
-
 
2354
	cmd = container_of(header, typeof(*cmd), header);
-
 
2355
	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
-
 
2356
				user_surface_converter,
-
 
2357
				&cmd->body.sid, &res_node);
-
 
2358
	if (unlikely(ret != 0))
-
 
2359
		return ret;
-
 
2360
 
-
 
2361
	binding.bi.ctx = ctx_node->res;
-
 
2362
	binding.bi.res = res_node ? res_node->res : NULL;
-
 
2363
	binding.bi.bt = vmw_ctx_binding_cb;
-
 
2364
	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
-
 
2365
	binding.offset = cmd->body.offsetInBytes;
-
 
2366
	binding.size = cmd->body.sizeInBytes;
-
 
2367
	binding.slot = cmd->body.slot;
-
 
2368
 
-
 
2369
	if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
-
 
2370
	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
-
 
2371
		DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
-
 
2372
			  (unsigned) cmd->body.type,
-
 
2373
			  (unsigned) binding.slot);
-
 
2374
		return -EINVAL;
-
 
2375
	}
-
 
2376
 
-
 
2377
	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
-
 
2378
			binding.shader_slot, binding.slot);
-
 
2379
 
-
 
2380
	return 0;
-
 
2381
}
-
 
2382
 
-
 
2383
/**
-
 
2384
 * vmw_cmd_dx_set_shader_res - Validate an
-
 
2385
 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
-
 
2386
 *
-
 
2387
 * @dev_priv: Pointer to a device private struct.
-
 
2388
 * @sw_context: The software context being used for this batch.
-
 
2389
 * @header: Pointer to the command header in the command stream.
-
 
2390
 */
-
 
2391
static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
-
 
2392
				     struct vmw_sw_context *sw_context,
-
 
2393
				     SVGA3dCmdHeader *header)
-
 
2394
{
-
 
2395
	struct {
-
 
2396
		SVGA3dCmdHeader header;
-
 
2397
		SVGA3dCmdDXSetShaderResources body;
-
 
2398
	} *cmd = container_of(header, typeof(*cmd), header);
-
 
2399
	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
-
 
2400
		sizeof(SVGA3dShaderResourceViewId);
-
 
2401
 
-
 
2402
	if ((u64) cmd->body.startView + (u64) num_sr_view >
-
 
2403
	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
-
 
2404
	    cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
-
 
2405
		DRM_ERROR("Invalid shader binding.\n");
-
 
2406
		return -EINVAL;
-
 
2407
	}
-
 
2408
 
-
 
2409
	return vmw_view_bindings_add(sw_context, vmw_view_sr,
-
 
2410
				     vmw_ctx_binding_sr,
-
 
2411
				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
-
 
2412
				     (void *) &cmd[1], num_sr_view,
-
 
2413
				     cmd->body.startView);
-
 
2414
}
-
 
2415
 
-
 
2416
/**
-
 
2417
 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
-
 
2418
 * command
-
 
2419
 *
-
 
2420
 * @dev_priv: Pointer to a device private struct.
-
 
2421
 * @sw_context: The software context being used for this batch.
-
 
2422
 * @header: Pointer to the command header in the command stream.
-
 
2423
 */
-
 
2424
static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
-
 
2425
				 struct vmw_sw_context *sw_context,
-
 
2426
				 SVGA3dCmdHeader *header)
-
 
2427
{
-
 
2428
	struct {
-
 
2429
		SVGA3dCmdHeader header;
-
 
2430
		SVGA3dCmdDXSetShader body;
-
 
2431
	} *cmd;
-
 
2432
	struct vmw_resource *res = NULL;
-
 
2433
	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
-
 
2434
	struct vmw_ctx_bindinfo_shader binding;
-
 
2435
	int ret = 0;
-
 
2436
 
-
 
2437
	if (unlikely(ctx_node == NULL)) {
-
 
2438
		DRM_ERROR("DX Context not set.\n");
-
 
2439
		return -EINVAL;
-
 
2440
	}
-
 
2441
 
-
 
2442
	cmd = container_of(header, typeof(*cmd), header);
-
 
2443
 
-
 
2444
	if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
-
 
2445
		DRM_ERROR("Illegal shader type %u.\n",
-
 
2446
			  (unsigned) cmd->body.type);
-
 
2447
		return -EINVAL;
-
 
2448
	}
-
 
2449
 
-
 
2450
	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
-
 
2451
		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
-
 
2452
		if (IS_ERR(res)) {
-
 
2453
			DRM_ERROR("Could not find shader for binding.\n");
-
 
2454
			return PTR_ERR(res);
-
 
2455
		}
-
 
2456
 
-
 
2457
		ret = vmw_resource_val_add(sw_context, res, NULL);
-
 
2458
		if (ret)
-
 
2459
			goto out_unref;
-
 
2460
	}
-
 
2461
 
-
 
2462
	binding.bi.ctx = ctx_node->res;
-
 
2463
	binding.bi.res = res;
-
 
2464
	binding.bi.bt = vmw_ctx_binding_dx_shader;
-
 
2465
	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
-
 
2466
 
-
 
2467
	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
-
 
2468
			binding.shader_slot, 0);
1753
#endif
2469
out_unref:
-
 
2470
	if (res)
-
 
2471
		vmw_resource_unreference(&res);
-
 
2472
 
-
 
2473
	return ret;
-
 
2474
}
-
 
2475
 
-
 
2476
/**
-
 
2477
 * vmw_cmd_dx_set_vertex_buffers - Validates an
-
 
2478
 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
-
 
2479
 *
-
 
2480
 * @dev_priv: Pointer to a device private struct.
-
 
2481
 * @sw_context: The software context being used for this batch.
-
 
2482
 * @header: Pointer to the command header in the command stream.
-
 
2483
 */
-
 
2484
static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
-
 
2485
					 struct vmw_sw_context *sw_context,
-
 
2486
					 SVGA3dCmdHeader *header)
-
 
2487
{
-
 
2488
	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
-
 
2489
	struct vmw_ctx_bindinfo_vb binding;
-
 
2490
	struct vmw_resource_val_node *res_node;
-
 
2491
	struct {
-
 
2492
		SVGA3dCmdHeader header;
-
 
2493
		SVGA3dCmdDXSetVertexBuffers body;
-
 
2494
		SVGA3dVertexBuffer buf[];
-
 
2495
	} *cmd;
-
 
2496
	int i, ret, num;
-
 
2497
 
-
 
2498
	if (unlikely(ctx_node == NULL)) {
-
 
2499
		DRM_ERROR("DX Context not set.\n");
-
 
2500
		return -EINVAL;
-
 
2501
	}
-
 
2502
 
-
 
2503
	cmd = container_of(header, typeof(*cmd), header);
-
 
2504
	num = (cmd->header.size - sizeof(cmd->body)) /
-
 
2505
		sizeof(SVGA3dVertexBuffer);
-
 
2506
	if ((u64)num + (u64)cmd->body.startBuffer >
-
 
2507
	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
-
 
2508
		DRM_ERROR("Invalid number of vertex buffers.\n");
-
 
2509
		return -EINVAL;
-
 
2510
	}
-
 
2511
 
-
 
2512
	for (i = 0; i < num; i++) {
-
 
2513
		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
-
 
2514
					user_surface_converter,
-
 
2515
					&cmd->buf[i].sid, &res_node);
-
 
2516
		if (unlikely(ret != 0))
-
 
2517
			return ret;
-
 
2518
 
-
 
2519
		binding.bi.ctx = ctx_node->res;
-
 
2520
		binding.bi.bt = vmw_ctx_binding_vb;
-
 
2521
		binding.bi.res = ((res_node) ? res_node->res : NULL);
-
 
2522
		binding.offset = cmd->buf[i].offset;
-
 
2523
		binding.stride = cmd->buf[i].stride;
-
 
2524
		binding.slot = i + cmd->body.startBuffer;
-
 
2525
 
-
 
2526
		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
-
 
2527
				0, binding.slot);
-
 
2528
	}
-
 
2529
 
-
 
2530
	return 0;
-
 
2531
}
-
 
2532
 
-
 
2533
/**
-
 
2534
 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
-
 
2535
 * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
-
 
2536
 *
-
 
2537
 * @dev_priv: Pointer to a device private struct.
-
 
2538
 * @sw_context: The software context being used for this batch.
-
 
2539
 * @header: Pointer to the command header in the command stream.
-
 
2540
 */
-
 
2541
static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
-
 
2542
				       struct vmw_sw_context *sw_context,
-
 
2543
				       SVGA3dCmdHeader *header)
-
 
2544
{
-
 
2545
	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
-
 
2546
	struct vmw_ctx_bindinfo_ib binding;
-
 
2547
	struct vmw_resource_val_node *res_node;
-
 
2548
	struct {
-
 
2549
		SVGA3dCmdHeader header;
-
 
2550
		SVGA3dCmdDXSetIndexBuffer body;
-
 
2551
	} *cmd;
-
 
2552
	int ret;
-
 
2553
 
-
 
2554
	if (unlikely(ctx_node == NULL)) {
-
 
2555
		DRM_ERROR("DX Context not set.\n");
-
 
2556
		return -EINVAL;
-
 
2557
	}
-
 
2558
 
-
 
2559
	cmd = container_of(header, typeof(*cmd), header);
-
 
2560
	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
-
 
2561
				user_surface_converter,
-
 
2562
				&cmd->body.sid, &res_node);
-
 
2563
	if (unlikely(ret != 0))
-
 
2564
		return ret;
-
 
2565
 
-
 
2566
	binding.bi.ctx = ctx_node->res;
-
 
2567
	binding.bi.res = ((res_node) ? res_node->res : NULL);
-
 
2568
	binding.bi.bt = vmw_ctx_binding_ib;
-
 
2569
	binding.offset = cmd->body.offset;
-
 
2570
	binding.format = cmd->body.format;
-
 
2571
 
-
 
2572
	vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
-
 
2573
 
-
 
2574
	return 0;
-
 
2575
}
-
 
2576
 
-
 
2577
/**
-
 
2578
 * vmw_cmd_dx_set_rendertarget - Validate an
-
 
2579
 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
-
 
2580
 *
-
 
2581
 * @dev_priv: Pointer to a device private struct.
-
 
2582
 * @sw_context: The software context being used for this batch.
-
 
2583
 * @header: Pointer to the command header in the command stream.
-
 
2584
 */
-
 
2585
static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
-
 
2586
					struct vmw_sw_context *sw_context,
-
 
2587
					SVGA3dCmdHeader *header)
-
 
2588
{
-
 
2589
	struct {
-
 
2590
		SVGA3dCmdHeader header;
-
 
2591
		SVGA3dCmdDXSetRenderTargets body;
-
 
2592
	} *cmd = container_of(header, typeof(*cmd), header);
-
 
2593
	int ret;
-
 
2594
	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
-
 
2595
		sizeof(SVGA3dRenderTargetViewId);
-
 
2596
 
-
 
2597
	if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
-
 
2598
		DRM_ERROR("Invalid DX Rendertarget binding.\n");
-
 
2599
		return -EINVAL;
-
 
2600
	}
-
 
2601
 
-
 
2602
	ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
-
 
2603
				    vmw_ctx_binding_ds, 0,
-
 
2604
				    &cmd->body.depthStencilViewId, 1, 0);
-
 
2605
	if (ret)
-
 
2606
		return ret;
-
 
2607
 
-
 
2608
	return vmw_view_bindings_add(sw_context, vmw_view_rt,
-
 
2609
				     vmw_ctx_binding_dx_rt, 0,
-
 
2610
				     (void *)&cmd[1], num_rt_view, 0);
-
 
2611
}
-
 
2612
 
-
 
2613
/**
-
 
2614
 * vmw_cmd_dx_clear_rendertarget_view - Validate an
-
 
2615
 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
-
 
2616
 *
-
 
2617
 * @dev_priv: Pointer to a device private struct.
-
 
2618
 * @sw_context: The software context being used for this batch.
-
 
2619
 * @header: Pointer to the command header in the command stream.
-
 
2620
 */
-
 
2621
static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
-
 
2622
					      struct vmw_sw_context *sw_context,
-
 
2623
					      SVGA3dCmdHeader *header)
-
 
2624
{
-
 
2625
	struct {
-
 
2626
		SVGA3dCmdHeader header;
-
 
2627
		SVGA3dCmdDXClearRenderTargetView body;
-
 
2628
	} *cmd = container_of(header, typeof(*cmd), header);
-
 
2629
 
-
 
2630
	return vmw_view_id_val_add(sw_context, vmw_view_rt,
-
 
2631
				   cmd->body.renderTargetViewId);
-
 
2632
}
-
 
2633
 
-
 
2634
/**
-
 
2635
 * vmw_cmd_dx_clear_rendertarget_view - Validate an
-
 
2636
 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
-
 
2637
 *
-
 
2638
 * @dev_priv: Pointer to a device private struct.
-
 
2639
 * @sw_context: The software context being used for this batch.
-
 
2640
 * @header: Pointer to the command header in the command stream.
-
 
2641
 */
-
 
2642
static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
-
 
2643
					      struct vmw_sw_context *sw_context,
-
 
2644
					      SVGA3dCmdHeader *header)
-
 
2645
{
-
 
2646
	struct {
-
 
2647
		SVGA3dCmdHeader header;
-
 
2648
		SVGA3dCmdDXClearDepthStencilView body;
-
 
2649
	} *cmd = container_of(header, typeof(*cmd), header);
-
 
2650
 
-
 
2651
	return vmw_view_id_val_add(sw_context, vmw_view_ds,
-
 
2652
				   cmd->body.depthStencilViewId);
-
 
2653
}
-
 
2654
 
-
 
2655
static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
-
 
2656
				  struct vmw_sw_context *sw_context,
-
 
2657
				  SVGA3dCmdHeader *header)
-
 
2658
{
-
 
2659
	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
-
 
2660
	struct vmw_resource_val_node *srf_node;
-
 
2661
	struct vmw_resource *res;
-
 
2662
	enum vmw_view_type view_type;
-
 
2663
	int ret;
-
 
2664
	/*
-
 
2665
	 * This is based on the fact that all affected define commands have
-
 
2666
	 * the same initial command body layout.
-
 
2667
	 */
-
 
2668
	struct {
-
 
2669
		SVGA3dCmdHeader header;
-
 
2670
		uint32 defined_id;
-
 
2671
		uint32 sid;
-
 
2672
	} *cmd;
-
 
2673
 
-
 
2674
	if (unlikely(ctx_node == NULL)) {
-
 
2675
		DRM_ERROR("DX Context not set.\n");
-
 
2676
		return -EINVAL;
-
 
2677
	}
-
 
2678
 
-
 
2679
	view_type = vmw_view_cmd_to_type(header->id);
-
 
2680
	cmd = container_of(header, typeof(*cmd), header);
-
 
2681
	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
-
 
2682
				user_surface_converter,
-
 
2683
				&cmd->sid, &srf_node);
-
 
2684
	if (unlikely(ret != 0))
-
 
2685
		return ret;
-
 
2686
 
-
 
2687
	res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
-
 
2688
	ret = vmw_cotable_notify(res, cmd->defined_id);
-
 
2689
	vmw_resource_unreference(&res);
-
 
2690
	if (unlikely(ret != 0))
-
 
2691
		return ret;
-
 
2692
 
-
 
2693
	return vmw_view_add(sw_context->man,
-
 
2694
			    ctx_node->res,
-
 
2695
			    srf_node->res,
-
 
2696
			    view_type,
-
 
2697
			    cmd->defined_id,
-
 
2698
			    header,
-
 
2699
			    header->size + sizeof(*header),
-
 
2700
			    &sw_context->staged_cmd_res);
-
 
2701
}
-
 
2702
 
-
 
2703
/**
-
 
2704
 * vmw_cmd_dx_set_so_targets - Validate an
-
 
2705
 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
-
 
2706
 *
-
 
2707
 * @dev_priv: Pointer to a device private struct.
-
 
2708
 * @sw_context: The software context being used for this batch.
-
 
2709
 * @header: Pointer to the command header in the command stream.
-
 
2710
 */
-
 
2711
static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
-
 
2712
				     struct vmw_sw_context *sw_context,
-
 
2713
				     SVGA3dCmdHeader *header)
-
 
2714
{
-
 
2715
	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
-
 
2716
	struct vmw_ctx_bindinfo_so binding;
-
 
2717
	struct vmw_resource_val_node *res_node;
-
 
2718
	struct {
-
 
2719
		SVGA3dCmdHeader header;
-
 
2720
		SVGA3dCmdDXSetSOTargets body;
-
 
2721
		SVGA3dSoTarget targets[];
-
 
2722
	} *cmd;
-
 
2723
	int i, ret, num;
-
 
2724
 
-
 
2725
	if (unlikely(ctx_node == NULL)) {
-
 
2726
		DRM_ERROR("DX Context not set.\n");
-
 
2727
		return -EINVAL;
-
 
2728
	}
-
 
2729
 
-
 
2730
	cmd = container_of(header, typeof(*cmd), header);
-
 
2731
	num = (cmd->header.size - sizeof(cmd->body)) /
-
 
2732
		sizeof(SVGA3dSoTarget);
-
 
2733
 
-
 
2734
	if (num > SVGA3D_DX_MAX_SOTARGETS) {
-
 
2735
		DRM_ERROR("Invalid DX SO binding.\n");
-
 
2736
		return -EINVAL;
-
 
2737
	}
-
 
2738
 
-
 
2739
	for (i = 0; i < num; i++) {
-
 
2740
		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
-
 
2741
					user_surface_converter,
-
 
2742
					&cmd->targets[i].sid, &res_node);
-
 
2743
		if (unlikely(ret != 0))
-
 
2744
			return ret;
-
 
2745
 
-
 
2746
		binding.bi.ctx = ctx_node->res;
-
 
2747
		binding.bi.res = ((res_node) ? res_node->res : NULL);
-
 
2748
		binding.bi.bt = vmw_ctx_binding_so,
-
 
2749
		binding.offset = cmd->targets[i].offset;
-
 
2750
		binding.size = cmd->targets[i].sizeInBytes;
-
 
2751
		binding.slot = i;
-
 
2752
 
-
 
2753
		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
-
 
2754
				0, binding.slot);
-
 
2755
	}
-
 
2756
 
-
 
2757
	return 0;
-
 
2758
}
-
 
2759
 
-
 
2760
static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
-
 
2761
				struct vmw_sw_context *sw_context,
-
 
2762
				SVGA3dCmdHeader *header)
-
 
2763
{
-
 
2764
	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
-
 
2765
	struct vmw_resource *res;
-
 
2766
	/*
-
 
2767
	 * This is based on the fact that all affected define commands have
-
 
2768
	 * the same initial command body layout.
-
 
2769
	 */
-
 
2770
	struct {
-
 
2771
		SVGA3dCmdHeader header;
-
 
2772
		uint32 defined_id;
-
 
2773
	} *cmd;
-
 
2774
	enum vmw_so_type so_type;
-
 
2775
	int ret;
-
 
2776
 
-
 
2777
	if (unlikely(ctx_node == NULL)) {
-
 
2778
		DRM_ERROR("DX Context not set.\n");
-
 
2779
		return -EINVAL;
-
 
2780
	}
-
 
2781
 
-
 
2782
	so_type = vmw_so_cmd_to_type(header->id);
-
 
2783
	res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
-
 
2784
	cmd = container_of(header, typeof(*cmd), header);
-
 
2785
	ret = vmw_cotable_notify(res, cmd->defined_id);
-
 
2786
	vmw_resource_unreference(&res);
-
 
2787
 
-
 
2788
	return ret;
-
 
2789
}
-
 
2790
 
-
 
2791
/**
-
 
2792
 * vmw_cmd_dx_check_subresource - Validate an
-
 
2793
 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
-
 
2794
 *
-
 
2795
 * @dev_priv: Pointer to a device private struct.
-
 
2796
 * @sw_context: The software context being used for this batch.
-
 
2797
 * @header: Pointer to the command header in the command stream.
-
 
2798
 */
-
 
2799
static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
-
 
2800
					struct vmw_sw_context *sw_context,
-
 
2801
					SVGA3dCmdHeader *header)
-
 
2802
{
-
 
2803
	struct {
-
 
2804
		SVGA3dCmdHeader header;
-
 
2805
		union {
-
 
2806
			SVGA3dCmdDXReadbackSubResource r_body;
-
 
2807
			SVGA3dCmdDXInvalidateSubResource i_body;
-
 
2808
			SVGA3dCmdDXUpdateSubResource u_body;
-
 
2809
			SVGA3dSurfaceId sid;
-
 
2810
		};
-
 
2811
	} *cmd;
-
 
2812
 
-
 
2813
	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
-
 
2814
		     offsetof(typeof(*cmd), sid));
-
 
2815
	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
-
 
2816
		     offsetof(typeof(*cmd), sid));
-
 
2817
	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
-
 
2818
		     offsetof(typeof(*cmd), sid));
-
 
2819
 
-
 
2820
	cmd = container_of(header, typeof(*cmd), header);
-
 
2821
 
-
 
2822
	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
-
 
2823
				 user_surface_converter,
-
 
2824
				 &cmd->sid, NULL);
-
 
2825
}
-
 
2826
 
-
 
2827
static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
-
 
2828
				struct vmw_sw_context *sw_context,
-
 
2829
				SVGA3dCmdHeader *header)
-
 
2830
{
-
 
2831
	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
-
 
2832
 
-
 
2833
	if (unlikely(ctx_node == NULL)) {
-
 
2834
		DRM_ERROR("DX Context not set.\n");
-
 
2835
		return -EINVAL;
-
 
2836
	}
-
 
2837
 
-
 
2838
	return 0;
-
 
2839
}
-
 
2840
 
-
 
2841
/**
-
 
2842
 * vmw_cmd_dx_view_remove - validate a view remove command and
-
 
2843
 * schedule the view resource for removal.
-
 
2844
 *
-
 
2845
 * @dev_priv: Pointer to a device private struct.
-
 
2846
 * @sw_context: The software context being used for this batch.
-
 
2847
 * @header: Pointer to the command header in the command stream.
-
 
2848
 *
-
 
2849
 * Check that the view exists, and if it was not created using this
-
 
2850
 * command batch, make sure it's validated (present in the device) so that
-
 
2851
 * the remove command will not confuse the device.
-
 
2852
 */
-
 
2853
static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
-
 
2854
				  struct vmw_sw_context *sw_context,
-
 
2855
				  SVGA3dCmdHeader *header)
-
 
2856
{
-
 
2857
	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
-
 
2858
	struct {
-
 
2859
		SVGA3dCmdHeader header;
-
 
2860
		union vmw_view_destroy body;
-
 
2861
	} *cmd = container_of(header, typeof(*cmd), header);
-
 
2862
	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
-
 
2863
	struct vmw_resource *view;
-
 
2864
	int ret;
-
 
2865
 
-
 
2866
	if (!ctx_node) {
-
 
2867
		DRM_ERROR("DX Context not set.\n");
-
 
2868
		return -EINVAL;
-
 
2869
	}
-
 
2870
 
-
 
2871
	ret = vmw_view_remove(sw_context->man,
-
 
2872
			      cmd->body.view_id, view_type,
-
 
2873
			      &sw_context->staged_cmd_res,
-
 
2874
			      &view);
-
 
2875
	if (ret || !view)
-
 
2876
		return ret;
-
 
2877
 
-
 
2878
	/*
-
 
2879
	 * Add view to the validate list iff it was not created using this
-
 
2880
	 * command batch.
-
 
2881
	 */
-
 
2882
	return vmw_view_res_val_add(sw_context, view);
-
 
2883
}
-
 
2884
 
-
 
2885
/**
-
 
2886
 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
-
 
2887
 * command
-
 
2888
 *
-
 
2889
 * @dev_priv: Pointer to a device private struct.
-
 
2890
 * @sw_context: The software context being used for this batch.
-
 
2891
 * @header: Pointer to the command header in the command stream.
-
 
2892
 */
-
 
2893
static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
-
 
2894
				    struct vmw_sw_context *sw_context,
-
 
2895
				    SVGA3dCmdHeader *header)
-
 
2896
{
-
 
2897
	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
-
 
2898
	struct vmw_resource *res;
-
 
2899
	struct {
-
 
2900
		SVGA3dCmdHeader header;
-
 
2901
		SVGA3dCmdDXDefineShader body;
-
 
2902
	} *cmd = container_of(header, typeof(*cmd), header);
-
 
2903
	int ret;
-
 
2904
 
-
 
2905
	if (!ctx_node) {
-
 
2906
		DRM_ERROR("DX Context not set.\n");
-
 
2907
		return -EINVAL;
-
 
2908
	}
-
 
2909
 
-
 
2910
	res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
-
 
2911
	ret = vmw_cotable_notify(res, cmd->body.shaderId);
-
 
2912
	vmw_resource_unreference(&res);
-
 
2913
	if (ret)
-
 
2914
		return ret;
-
 
2915
 
-
 
2916
	return vmw_dx_shader_add(sw_context->man, ctx_node->res,
-
 
2917
				 cmd->body.shaderId, cmd->body.type,
-
 
2918
				 &sw_context->staged_cmd_res);
-
 
2919
}
-
 
2920
 
-
 
2921
/**
-
 
2922
 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
-
 
2923
 * command
-
 
2924
 *
-
 
2925
 * @dev_priv: Pointer to a device private struct.
-
 
2926
 * @sw_context: The software context being used for this batch.
-
 
2927
 * @header: Pointer to the command header in the command stream.
-
 
2928
 */
-
 
2929
static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
-
 
2930
				     struct vmw_sw_context *sw_context,
-
 
2931
				     SVGA3dCmdHeader *header)
-
 
2932
{
-
 
2933
	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
-
 
2934
	struct {
-
 
2935
		SVGA3dCmdHeader header;
-
 
2936
		SVGA3dCmdDXDestroyShader body;
-
 
2937
	} *cmd = container_of(header, typeof(*cmd), header);
-
 
2938
	int ret;
-
 
2939
 
-
 
2940
	if (!ctx_node) {
-
 
2941
		DRM_ERROR("DX Context not set.\n");
-
 
2942
		return -EINVAL;
-
 
2943
	}
-
 
2944
 
-
 
2945
	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
-
 
2946
				&sw_context->staged_cmd_res);
-
 
2947
	if (ret)
-
 
2948
		DRM_ERROR("Could not find shader to remove.\n");
-
 
2949
 
-
 
2950
	return ret;
-
 
2951
}
-
 
2952
 
-
 
2953
/**
-
 
2954
 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
-
 
2955
 * command
-
 
2956
 *
-
 
2957
 * @dev_priv: Pointer to a device private struct.
-
 
2958
 * @sw_context: The software context being used for this batch.
-
 
2959
 * @header: Pointer to the command header in the command stream.
-
 
2960
 */
-
 
2961
static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
-
 
2962
				  struct vmw_sw_context *sw_context,
-
 
2963
				  SVGA3dCmdHeader *header)
-
 
2964
{
-
 
2965
	struct vmw_resource_val_node *ctx_node;
-
 
2966
	struct vmw_resource_val_node *res_node;
-
 
2967
	struct vmw_resource *res;
-
 
2968
	struct {
-
 
2969
		SVGA3dCmdHeader header;
-
 
2970
		SVGA3dCmdDXBindShader body;
-
 
2971
	} *cmd = container_of(header, typeof(*cmd), header);
-
 
2972
	int ret;
-
 
2973
 
-
 
2974
	if (cmd->body.cid != SVGA3D_INVALID_ID) {
-
 
2975
		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
-
 
2976
					user_context_converter,
-
 
2977
					&cmd->body.cid, &ctx_node);
-
 
2978
		if (ret)
-
 
2979
			return ret;
-
 
2980
	} else {
-
 
2981
		ctx_node = sw_context->dx_ctx_node;
-
 
2982
		if (!ctx_node) {
-
 
2983
			DRM_ERROR("DX Context not set.\n");
-
 
2984
			return -EINVAL;
-
 
2985
		}
-
 
2986
	}
-
 
2987
 
-
 
2988
	res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
-
 
2989
				cmd->body.shid, 0);
-
 
2990
	if (IS_ERR(res)) {
-
 
2991
		DRM_ERROR("Could not find shader to bind.\n");
-
 
2992
		return PTR_ERR(res);
-
 
2993
	}
-
 
2994
 
-
 
2995
	ret = vmw_resource_val_add(sw_context, res, &res_node);
-
 
2996
	if (ret) {
-
 
2997
		DRM_ERROR("Error creating resource validation node.\n");
-
 
2998
		goto out_unref;
-
 
2999
	}
-
 
3000
 
-
 
3001
 
-
 
3002
	ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
-
 
3003
					&cmd->body.mobid,
-
 
3004
					cmd->body.offsetInBytes);
-
 
3005
out_unref:
-
 
3006
	vmw_resource_unreference(&res);
-
 
3007
 
-
 
3008
	return ret;
-
 
3009
}
Line 1754... Line 3010...
1754
 
3010
 
1755
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3011
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1756
				struct vmw_sw_context *sw_context,
3012
				struct vmw_sw_context *sw_context,
1757
				void *buf, uint32_t *size)
3013
				void *buf, uint32_t *size)
1758
{
3014
{
1759
	uint32_t size_remaining = *size;
3015
	uint32_t size_remaining = *size;
Line 1760... Line 3016...
1760
	uint32_t cmd_id;
3016
	uint32_t cmd_id;
1761
 
3017
 
1762
	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
3018
	cmd_id = ((uint32_t *)buf)[0];
1763
	switch (cmd_id) {
3019
	switch (cmd_id) {
1764
	case SVGA_CMD_UPDATE:
3020
	case SVGA_CMD_UPDATE:
1765
		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3021
		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
Line 1888... Line 3144...
1888
		    false, false, true),
3144
		    false, false, true),
1889
	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3145
	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1890
		    false, false, true),
3146
		    false, false, true),
1891
	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3147
	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1892
		    false, false, true),
3148
		    false, false, true),
1893
	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
3149
	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
1894
		    false, false, true),
3150
		    false, false, true),
1895
	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3151
	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1896
		    false, false, true),
3152
		    false, false, true),
1897
	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3153
	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1898
		    false, false, true),
3154
		    false, false, true),
Line 1973... Line 3229...
1973
	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3229
	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
1974
	int ret;
3230
	int ret;
1975
	const struct vmw_cmd_entry *entry;
3231
	const struct vmw_cmd_entry *entry;
1976
	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3232
	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
Line 1977... Line 3233...
1977
 
3233
 
1978
	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
3234
	cmd_id = ((uint32_t *)buf)[0];
1979
	/* Handle any none 3D commands */
3235
	/* Handle any none 3D commands */
1980
	if (unlikely(cmd_id < SVGA_CMD_MAX))
3236
	if (unlikely(cmd_id < SVGA_CMD_MAX))
Line 1981... Line 3237...
1981
		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3237
		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
1982
 
3238
 
Line 1983... Line 3239...
1983
 
3239
 
1984
	cmd_id = le32_to_cpu(header->id);
3240
	cmd_id = header->id;
1985
	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
3241
	*size = header->size + sizeof(SVGA3dCmdHeader);
Line 2092... Line 3348...
2092
 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3348
 * vmw_resource_list_unrefererence - Free up a resource list and unreference
2093
 * all resources referenced by it.
3349
 * all resources referenced by it.
2094
 *
3350
 *
2095
 * @list: The resource list.
3351
 * @list: The resource list.
2096
 */
3352
 */
2097
static void vmw_resource_list_unreference(struct list_head *list)
3353
static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
-
 
3354
					  struct list_head *list)
2098
{
3355
{
2099
	struct vmw_resource_val_node *val, *val_next;
3356
	struct vmw_resource_val_node *val, *val_next;
Line 2100... Line 3357...
2100
 
3357
 
2101
	/*
3358
	/*
2102
	 * Drop references to resources held during command submission.
3359
	 * Drop references to resources held during command submission.
Line 2103... Line 3360...
2103
	 */
3360
	 */
2104
 
3361
 
2105
	list_for_each_entry_safe(val, val_next, list, head) {
3362
	list_for_each_entry_safe(val, val_next, list, head) {
-
 
3363
		list_del_init(&val->head);
2106
		list_del_init(&val->head);
3364
		vmw_resource_unreference(&val->res);
-
 
3365
 
-
 
3366
		if (val->staged_bindings) {
-
 
3367
			if (val->staged_bindings != sw_context->staged_bindings)
-
 
3368
				vmw_binding_state_free(val->staged_bindings);
2107
		vmw_resource_unreference(&val->res);
3369
			else
-
 
3370
				sw_context->staged_bindings_inuse = false;
-
 
3371
			val->staged_bindings = NULL;
2108
		if (unlikely(val->staged_bindings))
3372
		}
2109
			kfree(val->staged_bindings);
3373
 
2110
		kfree(val);
3374
		kfree(val);
Line 2111... Line 3375...
2111
	}
3375
	}
Line 2130... Line 3394...
2130
 
3394
 
2131
	list_for_each_entry(val, &sw_context->resource_list, head)
3395
	list_for_each_entry(val, &sw_context->resource_list, head)
2132
		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3396
		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
Line 2133... Line 3397...
2133
}
3397
}
2134
 
3398
 
-
 
3399
int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2135
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3400
			       struct ttm_buffer_object *bo,
2136
				      struct ttm_buffer_object *bo,
3401
			       bool interruptible,
-
 
3402
			       bool validate_as_mob)
-
 
3403
{
2137
				      bool validate_as_mob)
3404
	struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
Line 2138... Line -...
2138
{
-
 
2139
	int ret;
-
 
2140
 
-
 
2141
 
-
 
2142
	/*
-
 
2143
	 * Don't validate pinned buffers.
3405
						  base);
2144
	 */
-
 
2145
 
-
 
2146
	if (bo == dev_priv->pinned_bo ||
3406
	int ret;
Line 2147... Line 3407...
2147
	    (bo == dev_priv->dummy_query_bo &&
3407
 
2148
	     dev_priv->dummy_query_bo_pinned))
3408
	if (vbo->pin_count > 0)
-
 
3409
		return 0;
Line 2149... Line 3410...
2149
		return 0;
3410
 
2150
 
3411
	if (validate_as_mob)
2151
	if (validate_as_mob)
3412
		return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
2152
		return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
3413
				       false);
2153
 
3414
 
2154
	/**
3415
	/**
Line 2155... Line 3416...
2155
	 * Put BO in VRAM if there is space, otherwise as a GMR.
3416
	 * Put BO in VRAM if there is space, otherwise as a GMR.
-
 
3417
	 * If there is no space in VRAM and GMR ids are all used up,
2156
	 * If there is no space in VRAM and GMR ids are all used up,
3418
	 * start evicting GMRs to make room. If the DMA buffer can't be
2157
	 * start evicting GMRs to make room. If the DMA buffer can't be
3419
	 * used as a GMR, this will return -ENOMEM.
Line 2158... Line 3420...
2158
	 * used as a GMR, this will return -ENOMEM.
3420
	 */
2159
	 */
3421
 
2160
 
3422
	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
2161
	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
3423
			      false);
Line 2162... Line -...
2162
	if (likely(ret == 0 || ret == -ERESTARTSYS))
-
 
2163
		return ret;
3424
	if (likely(ret == 0 || ret == -ERESTARTSYS))
2164
 
3425
		return ret;
2165
	/**
3426
 
Line 2166... Line 3427...
2166
	 * If that failed, try VRAM again, this time evicting
3427
	/**
2167
	 * previous contents.
3428
	 * If that failed, try VRAM again, this time evicting
Line 2178... Line 3439...
2178
	struct vmw_validate_buffer *entry;
3439
	struct vmw_validate_buffer *entry;
2179
	int ret;
3440
	int ret;
Line 2180... Line 3441...
2180
 
3441
 
2181
	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3442
	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
-
 
3443
		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
2182
		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3444
						 true,
2183
						 entry->validate_as_mob);
3445
						 entry->validate_as_mob);
2184
		if (unlikely(ret != 0))
3446
		if (unlikely(ret != 0))
2185
			return ret;
3447
			return ret;
2186
	}
3448
	}
Line 2245... Line 3507...
2245
		synced = true;
3507
		synced = true;
2246
	}
3508
	}
Line 2247... Line 3509...
2247
 
3509
 
2248
	if (p_handle != NULL)
3510
	if (p_handle != NULL)
2249
		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
-
 
2250
					    sequence,
-
 
2251
					    DRM_VMW_FENCE_FLAG_EXEC,
3511
		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2252
					    p_fence, p_handle);
3512
					    sequence, p_fence, p_handle);
2253
	else
3513
	else
2254
		ret = vmw_fence_create(dev_priv->fman, sequence,
-
 
2255
				       DRM_VMW_FENCE_FLAG_EXEC,
-
 
Line 2256... Line 3514...
2256
				       p_fence);
3514
		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
2257
 
3515
 
2258
	if (unlikely(ret != 0 && !synced)) {
3516
	if (unlikely(ret != 0 && !synced)) {
2259
		(void) vmw_fallback_wait(dev_priv, false, false,
3517
		(void) vmw_fallback_wait(dev_priv, false, false,
Line 2303... Line 3561...
2303
	fence_rep.error = ret;
3561
	fence_rep.error = ret;
2304
	if (ret == 0) {
3562
	if (ret == 0) {
2305
		BUG_ON(fence == NULL);
3563
		BUG_ON(fence == NULL);
Line 2306... Line 3564...
2306
 
3564
 
2307
		fence_rep.handle = fence_handle;
3565
		fence_rep.handle = fence_handle;
2308
		fence_rep.seqno = fence->seqno;
3566
		fence_rep.seqno = fence->base.seqno;
2309
		vmw_update_seqno(dev_priv, &dev_priv->fifo);
3567
		vmw_update_seqno(dev_priv, &dev_priv->fifo);
2310
		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3568
		fence_rep.passed_seqno = dev_priv->last_read_seqno;
Line 2311... Line 3569...
2311
	}
3569
	}
2312
 
3570
 
2313
	/*
3571
	/*
2314
	 * copy_to_user errors will be detected by user space not
3572
	 * copy_to_user errors will be detected by user space not
2315
	 * seeing fence_rep::error filled in. Typically
3573
	 * seeing fence_rep::error filled in. Typically
2316
	 * user-space would have pre-set that member to -EFAULT.
3574
	 * user-space would have pre-set that member to -EFAULT.
2317
	 */
3575
	 */
Line 2318... Line 3576...
2318
//   ret = copy_to_user(user_fence_rep, &fence_rep,
3576
	ret = copy_to_user(user_fence_rep, &fence_rep,
2319
//              sizeof(fence_rep));
3577
			   sizeof(fence_rep));
2320
 
3578
 
2321
	/*
3579
	/*
2322
	 * User-space lost the fence object. We need to sync
3580
	 * User-space lost the fence object. We need to sync
2323
	 * and unreference the handle.
3581
	 * and unreference the handle.
2324
	 */
3582
	 */
2325
	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3583
	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2326
		ttm_ref_object_base_unref(vmw_fp->tfile,
3584
		ttm_ref_object_base_unref(vmw_fp->tfile,
2327
					  fence_handle, TTM_REF_USAGE);
-
 
2328
		DRM_ERROR("Fence copy error. Syncing.\n");
3585
					  fence_handle, TTM_REF_USAGE);
2329
		(void) vmw_fence_obj_wait(fence, fence->signal_mask,
3586
		DRM_ERROR("Fence copy error. Syncing.\n");
2330
					  false, false,
3587
		(void) vmw_fence_obj_wait(fence, false, false,
Line -... Line 3588...
-
 
3588
					  VMW_FENCE_WAIT_TIMEOUT);
-
 
3589
	}
-
 
3590
}
-
 
3591
 
-
 
3592
/**
-
 
3593
 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
-
 
3594
 * the fifo.
-
 
3595
 *
-
 
3596
 * @dev_priv: Pointer to a device private structure.
-
 
3597
 * @kernel_commands: Pointer to the unpatched command batch.
-
 
3598
 * @command_size: Size of the unpatched command batch.
-
 
3599
 * @sw_context: Structure holding the relocation lists.
-
 
3600
 *
-
 
3601
 * Side effects: If this function returns 0, then the command batch
-
 
3602
 * pointed to by @kernel_commands will have been modified.
-
 
3603
 */
-
 
3604
static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
-
 
3605
				   void *kernel_commands,
-
 
3606
				   u32 command_size,
-
 
3607
				   struct vmw_sw_context *sw_context)
-
 
3608
{
-
 
3609
	void *cmd;
-
 
3610
 
-
 
3611
	if (sw_context->dx_ctx_node)
-
 
3612
		cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
-
 
3613
					  sw_context->dx_ctx_node->res->id);
-
 
3614
	else
-
 
3615
		cmd = vmw_fifo_reserve(dev_priv, command_size);
-
 
3616
	if (!cmd) {
-
 
3617
		DRM_ERROR("Failed reserving fifo space for commands.\n");
-
 
3618
		return -ENOMEM;
-
 
3619
	}
-
 
3620
 
-
 
3621
	vmw_apply_relocations(sw_context);
Line -... Line 3622...
-
 
3622
	memcpy(cmd, kernel_commands, command_size);
-
 
3623
	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
-
 
3624
	vmw_resource_relocations_free(&sw_context->res_relocations);
-
 
3625
	vmw_fifo_commit(dev_priv, command_size);
-
 
3626
 
-
 
3627
	return 0;
-
 
3628
}
-
 
3629
 
-
 
3630
/**
-
 
3631
 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
-
 
3632
 * the command buffer manager.
-
 
3633
 *
-
 
3634
 * @dev_priv: Pointer to a device private structure.
-
 
3635
 * @header: Opaque handle to the command buffer allocation.
-
 
3636
 * @command_size: Size of the unpatched command batch.
-
 
3637
 * @sw_context: Structure holding the relocation lists.
-
 
3638
 *
-
 
3639
 * Side effects: If this function returns 0, then the command buffer
-
 
3640
 * represented by @header will have been modified.
-
 
3641
 */
-
 
3642
static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
-
 
3643
				     struct vmw_cmdbuf_header *header,
-
 
3644
				     u32 command_size,
-
 
3645
				     struct vmw_sw_context *sw_context)
-
 
3646
{
-
 
3647
	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
-
 
3648
		  SVGA3D_INVALID_ID);
-
 
3649
	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
-
 
3650
				       id, false, header);
-
 
3651
 
-
 
3652
	vmw_apply_relocations(sw_context);
-
 
3653
	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
-
 
3654
	vmw_resource_relocations_free(&sw_context->res_relocations);
-
 
3655
	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
-
 
3656
 
-
 
3657
	return 0;
-
 
3658
}
-
 
3659
 
-
 
3660
/**
-
 
3661
 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
-
 
3662
 * submission using a command buffer.
-
 
3663
 *
-
 
3664
 * @dev_priv: Pointer to a device private structure.
-
 
3665
 * @user_commands: User-space pointer to the commands to be submitted.
-
 
3666
 * @command_size: Size of the unpatched command batch.
-
 
3667
 * @header: Out parameter returning the opaque pointer to the command buffer.
-
 
3668
 *
-
 
3669
 * This function checks whether we can use the command buffer manager for
-
 
3670
 * submission and if so, creates a command buffer of suitable size and
-
 
3671
 * copies the user data into that buffer.
-
 
3672
 *
-
 
3673
 * On successful return, the function returns a pointer to the data in the
-
 
3674
 * command buffer and *@header is set to non-NULL.
-
 
3675
 * If command buffers could not be used, the function will return the value
-
 
3676
 * of @kernel_commands on function call. That value may be NULL. In that case,
-
 
3677
 * the value of *@header will be set to NULL.
-
 
3678
 * If an error is encountered, the function will return a pointer error value.
-
 
3679
 * If the function is interrupted by a signal while sleeping, it will return
-
 
3680
 * -ERESTARTSYS casted to a pointer error value.
-
 
3681
 */
-
 
3682
static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
-
 
3683
				void __user *user_commands,
-
 
3684
				void *kernel_commands,
-
 
3685
				u32 command_size,
-
 
3686
				struct vmw_cmdbuf_header **header)
-
 
3687
{
-
 
3688
	size_t cmdbuf_size;
-
 
3689
	int ret;
-
 
3690
 
-
 
3691
	*header = NULL;
-
 
3692
	if (!dev_priv->cman || kernel_commands)
-
 
3693
		return kernel_commands;
-
 
3694
 
-
 
3695
	if (command_size > SVGA_CB_MAX_SIZE) {
-
 
3696
		DRM_ERROR("Command buffer is too large.\n");
-
 
3697
		return ERR_PTR(-EINVAL);
-
 
3698
	}
-
 
3699
 
-
 
3700
	/* If possible, add a little space for fencing. */
-
 
3701
	cmdbuf_size = command_size + 512;
-
 
3702
	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
-
 
3703
	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
-
 
3704
					   true, header);
-
 
3705
	if (IS_ERR(kernel_commands))
-
 
3706
		return kernel_commands;
-
 
3707
 
-
 
3708
	ret = copy_from_user(kernel_commands, user_commands,
-
 
3709
			     command_size);
-
 
3710
	if (ret) {
-
 
3711
		DRM_ERROR("Failed copying commands.\n");
-
 
3712
		vmw_cmdbuf_header_free(*header);
-
 
3713
		*header = NULL;
-
 
3714
		return ERR_PTR(-EFAULT);
-
 
3715
	}
-
 
3716
 
-
 
3717
	return kernel_commands;
-
 
3718
}
-
 
3719
 
-
 
3720
static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
-
 
3721
				   struct vmw_sw_context *sw_context,
-
 
3722
				   uint32_t handle)
-
 
3723
{
-
 
3724
	struct vmw_resource_val_node *ctx_node;
-
 
3725
	struct vmw_resource *res;
-
 
3726
	int ret;
-
 
3727
 
-
 
3728
	if (handle == SVGA3D_INVALID_ID)
-
 
3729
		return 0;
-
 
3730
 
-
 
3731
	ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
-
 
3732
					      handle, user_context_converter,
-
 
3733
					      &res);
-
 
3734
	if (unlikely(ret != 0)) {
-
 
3735
		DRM_ERROR("Could not find or user DX context 0x%08x.\n",
-
 
3736
			  (unsigned) handle);
-
 
3737
		return ret;
-
 
3738
	}
-
 
3739
 
-
 
3740
	ret = vmw_resource_val_add(sw_context, res, &ctx_node);
-
 
3741
	if (unlikely(ret != 0))
-
 
3742
		goto out_err;
-
 
3743
 
-
 
3744
	sw_context->dx_ctx_node = ctx_node;
Line 2331... Line 3745...
2331
					  VMW_FENCE_WAIT_TIMEOUT);
3745
	sw_context->man = vmw_context_res_man(res);
2332
	}
3746
out_err:
2333
}
3747
	vmw_resource_unreference(&res);
2334
 
3748
	return ret;
2335
 
3749
}
2336
 
3750
 
-
 
3751
int vmw_execbuf_process(struct drm_file *file_priv,
2337
int vmw_execbuf_process(struct drm_file *file_priv,
3752
			struct vmw_private *dev_priv,
2338
			struct vmw_private *dev_priv,
3753
			void __user *user_commands,
2339
			void __user *user_commands,
3754
			void *kernel_commands,
2340
			void *kernel_commands,
3755
			uint32_t command_size,
2341
			uint32_t command_size,
3756
			uint64_t throttle_us,
2342
			uint64_t throttle_us,
3757
			uint32_t dx_context_handle,
2343
			struct drm_vmw_fence_rep __user *user_fence_rep,
3758
			struct drm_vmw_fence_rep __user *user_fence_rep,
-
 
3759
			struct vmw_fence_obj **out_fence)
2344
			struct vmw_fence_obj **out_fence)
3760
{
2345
{
3761
	struct vmw_sw_context *sw_context = &dev_priv->ctx;
2346
	struct vmw_sw_context *sw_context = &dev_priv->ctx;
-
 
2347
	struct vmw_fence_obj *fence = NULL;
3762
	struct vmw_fence_obj *fence = NULL;
Line -... Line 3763...
-
 
3763
	struct vmw_resource *error_resource;
-
 
3764
	struct list_head resource_list;
-
 
3765
	struct vmw_cmdbuf_header *header;
-
 
3766
	struct ww_acquire_ctx ticket;
-
 
3767
	uint32_t handle;
-
 
3768
	int ret;
-
 
3769
 
-
 
3770
	if (throttle_us) {
-
 
3771
		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
-
 
3772
				   throttle_us);
-
 
3773
 
-
 
3774
		if (ret)
-
 
3775
			return ret;
-
 
3776
	}
2348
	struct vmw_resource *error_resource;
3777
 
2349
	struct list_head resource_list;
3778
	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
2350
	struct ww_acquire_ctx ticket;
3779
					     kernel_commands, command_size,
-
 
3780
					     &header);
-
 
3781
	if (IS_ERR(kernel_commands))
Line 2351... Line -...
2351
	uint32_t handle;
-
 
2352
	void *cmd;
-
 
2353
	int ret;
3782
		return PTR_ERR(kernel_commands);
2354
 
-
 
-
 
3783
 
2355
	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3784
	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2356
	if (unlikely(ret != 0))
3785
	if (ret) {
2357
		return -ERESTARTSYS;
3786
		ret = -ERESTARTSYS;
Line 2372... Line 3801...
2372
			ret = -EFAULT;
3801
			ret = -EFAULT;
2373
			DRM_ERROR("Failed copying commands.\n");
3802
			DRM_ERROR("Failed copying commands.\n");
2374
			goto out_unlock;
3803
			goto out_unlock;
2375
		}
3804
		}
2376
		kernel_commands = sw_context->cmd_bounce;
3805
		kernel_commands = sw_context->cmd_bounce;
2377
    } else  */
3806
	} else if (!header)
2378
		sw_context->kernel = true;
3807
		sw_context->kernel = true;
Line 2379... Line 3808...
2379
 
3808
 
2380
	sw_context->fp = vmw_fpriv(file_priv);
3809
	sw_context->fp = vmw_fpriv(file_priv);
2381
	sw_context->cur_reloc = 0;
3810
	sw_context->cur_reloc = 0;
2382
	sw_context->cur_val_buf = 0;
-
 
2383
	sw_context->fence_flags = 0;
3811
	sw_context->cur_val_buf = 0;
-
 
3812
	INIT_LIST_HEAD(&sw_context->resource_list);
2384
	INIT_LIST_HEAD(&sw_context->resource_list);
3813
	INIT_LIST_HEAD(&sw_context->ctx_resource_list);
2385
	sw_context->cur_query_bo = dev_priv->pinned_bo;
3814
	sw_context->cur_query_bo = dev_priv->pinned_bo;
2386
	sw_context->last_query_ctx = NULL;
3815
	sw_context->last_query_ctx = NULL;
-
 
3816
	sw_context->needs_post_query_barrier = false;
-
 
3817
	sw_context->dx_ctx_node = NULL;
-
 
3818
	sw_context->dx_query_mob = NULL;
2387
	sw_context->needs_post_query_barrier = false;
3819
	sw_context->dx_query_ctx = NULL;
2388
	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
3820
	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2389
	INIT_LIST_HEAD(&sw_context->validate_nodes);
3821
	INIT_LIST_HEAD(&sw_context->validate_nodes);
-
 
3822
	INIT_LIST_HEAD(&sw_context->res_relocations);
-
 
3823
	if (sw_context->staged_bindings)
-
 
3824
		vmw_binding_state_reset(sw_context->staged_bindings);
2390
	INIT_LIST_HEAD(&sw_context->res_relocations);
3825
 
2391
	if (!sw_context->res_ht_initialized) {
3826
	if (!sw_context->res_ht_initialized) {
2392
		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3827
		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2393
		if (unlikely(ret != 0))
3828
		if (unlikely(ret != 0))
2394
			goto out_unlock;
3829
			goto out_unlock;
2395
		sw_context->res_ht_initialized = true;
3830
		sw_context->res_ht_initialized = true;
2396
	}
3831
	}
2397
	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
-
 
2398
 
3832
	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
-
 
3833
	INIT_LIST_HEAD(&resource_list);
-
 
3834
	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
-
 
3835
	if (unlikely(ret != 0)) {
-
 
3836
		list_splice_init(&sw_context->ctx_resource_list,
-
 
3837
				 &sw_context->resource_list);
-
 
3838
		goto out_err_nores;
-
 
3839
	}
2399
	INIT_LIST_HEAD(&resource_list);
3840
 
2400
	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3841
	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
-
 
3842
				command_size);
-
 
3843
	/*
-
 
3844
	 * Merge the resource lists before checking the return status
-
 
3845
	 * from vmd_cmd_check_all so that all the open hashtabs will
-
 
3846
	 * be handled properly even if vmw_cmd_check_all fails.
-
 
3847
	 */
-
 
3848
	list_splice_init(&sw_context->ctx_resource_list,
-
 
3849
			 &sw_context->resource_list);
2401
				command_size);
3850
 
2402
	if (unlikely(ret != 0))
3851
	if (unlikely(ret != 0))
Line 2403... Line 3852...
2403
		goto out_err_nores;
3852
		goto out_err_nores;
2404
 
3853
 
2405
	ret = vmw_resources_reserve(sw_context);
3854
	ret = vmw_resources_reserve(sw_context);
Line 2406... Line 3855...
2406
	if (unlikely(ret != 0))
3855
	if (unlikely(ret != 0))
-
 
3856
		goto out_err_nores;
2407
		goto out_err_nores;
3857
 
2408
 
3858
	ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
Line 2409... Line 3859...
2409
	ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
3859
				     true, NULL);
2410
	if (unlikely(ret != 0))
3860
	if (unlikely(ret != 0))
2411
		goto out_err;
3861
		goto out_err_nores;
Line 2412... Line 3862...
2412
 
3862
 
2413
	ret = vmw_validate_buffers(dev_priv, sw_context);
3863
	ret = vmw_validate_buffers(dev_priv, sw_context);
2414
	if (unlikely(ret != 0))
3864
	if (unlikely(ret != 0))
Line 2415... Line -...
2415
		goto out_err;
-
 
2416
 
-
 
2417
	ret = vmw_resources_validate(sw_context);
-
 
2418
	if (unlikely(ret != 0))
-
 
2419
		goto out_err;
-
 
2420
 
-
 
2421
	if (throttle_us) {
-
 
2422
		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
-
 
2423
				   throttle_us);
3865
		goto out_err;
2424
 
3866
 
2425
		if (unlikely(ret != 0))
3867
	ret = vmw_resources_validate(sw_context);
2426
			goto out_err;
3868
	if (unlikely(ret != 0))
2427
	}
3869
		goto out_err;
Line 2436... Line 3878...
2436
		ret = vmw_rebind_contexts(sw_context);
3878
		ret = vmw_rebind_contexts(sw_context);
2437
		if (unlikely(ret != 0))
3879
		if (unlikely(ret != 0))
2438
			goto out_unlock_binding;
3880
			goto out_unlock_binding;
2439
	}
3881
	}
Line -... Line 3882...
-
 
3882
 
2440
 
3883
	if (!header) {
2441
	cmd = vmw_fifo_reserve(dev_priv, command_size);
3884
		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
-
 
3885
					      command_size, sw_context);
2442
	if (unlikely(cmd == NULL)) {
3886
	} else {
2443
		DRM_ERROR("Failed reserving fifo space for commands.\n");
3887
		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
2444
		ret = -ENOMEM;
3888
						sw_context);
2445
		goto out_unlock_binding;
3889
		header = NULL;
2446
	}
-
 
2447
 
3890
	}
2448
	vmw_apply_relocations(sw_context);
3891
	mutex_unlock(&dev_priv->binding_mutex);
2449
	memcpy(cmd, kernel_commands, command_size);
-
 
2450
 
-
 
2451
	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
-
 
2452
	vmw_resource_relocations_free(&sw_context->res_relocations);
-
 
2453
 
3892
	if (ret)
Line 2454... Line 3893...
2454
	vmw_fifo_commit(dev_priv, command_size);
3893
		goto out_err;
2455
 
3894
 
2456
	vmw_query_bo_switch_commit(dev_priv, sw_context);
3895
	vmw_query_bo_switch_commit(dev_priv, sw_context);
2457
	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
3896
	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
Line 2464... Line 3903...
2464
	 */
3903
	 */
Line 2465... Line 3904...
2465
 
3904
 
2466
	if (ret != 0)
3905
	if (ret != 0)
Line 2467... Line 3906...
2467
		DRM_ERROR("Fence submission error. Syncing.\n");
3906
		DRM_ERROR("Fence submission error. Syncing.\n");
2468
 
-
 
Line 2469... Line 3907...
2469
	vmw_resource_list_unreserve(&sw_context->resource_list, false);
3907
 
2470
	mutex_unlock(&dev_priv->binding_mutex);
3908
	vmw_resources_unreserve(sw_context, false);
Line 2471... Line 3909...
2471
 
3909
 
Line 2494... Line 3932...
2494
 
3932
 
2495
	/*
3933
	/*
2496
	 * Unreference resources outside of the cmdbuf_mutex to
3934
	 * Unreference resources outside of the cmdbuf_mutex to
2497
	 * avoid deadlocks in resource destruction paths.
3935
	 * avoid deadlocks in resource destruction paths.
2498
	 */
3936
	 */
Line 2499... Line 3937...
2499
	vmw_resource_list_unreference(&resource_list);
3937
	vmw_resource_list_unreference(sw_context, &resource_list);
Line 2500... Line 3938...
2500
 
3938
 
2501
	return 0;
3939
	return 0;
2502
 
3940
 
2503
out_unlock_binding:
3941
out_unlock_binding:
2504
	mutex_unlock(&dev_priv->binding_mutex);
3942
	mutex_unlock(&dev_priv->binding_mutex);
2505
out_err:
3943
out_err:
2506
	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
3944
	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2507
out_err_nores:
3945
out_err_nores:
2508
	vmw_resource_list_unreserve(&sw_context->resource_list, true);
3946
	vmw_resources_unreserve(sw_context, true);
2509
	vmw_resource_relocations_free(&sw_context->res_relocations);
3947
	vmw_resource_relocations_free(&sw_context->res_relocations);
2510
	vmw_free_relocations(sw_context);
3948
	vmw_free_relocations(sw_context);
Line 2521... Line 3959...
2521
 
3959
 
2522
	/*
3960
	/*
2523
	 * Unreference resources outside of the cmdbuf_mutex to
3961
	 * Unreference resources outside of the cmdbuf_mutex to
2524
	 * avoid deadlocks in resource destruction paths.
3962
	 * avoid deadlocks in resource destruction paths.
2525
	 */
3963
	 */
2526
	vmw_resource_list_unreference(&resource_list);
3964
	vmw_resource_list_unreference(sw_context, &resource_list);
2527
	if (unlikely(error_resource != NULL))
3965
	if (unlikely(error_resource != NULL))
-
 
3966
		vmw_resource_unreference(&error_resource);
-
 
3967
out_free_header:
-
 
3968
	if (header)
Line 2528... Line 3969...
2528
		vmw_resource_unreference(&error_resource);
3969
		vmw_cmdbuf_header_free(header);
2529
 
3970
 
Line 2530... Line 3971...
2530
	return ret;
3971
	return ret;
Line 2542... Line 3983...
2542
static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
3983
static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2543
{
3984
{
2544
	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
3985
	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
Line 2545... Line 3986...
2545
 
3986
 
2546
	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
3987
	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
-
 
3988
	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
2547
	vmw_bo_pin(dev_priv->pinned_bo, false);
3989
	if (dev_priv->dummy_query_bo_pinned) {
2548
	vmw_bo_pin(dev_priv->dummy_query_bo, false);
3990
		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
2549
	dev_priv->dummy_query_bo_pinned = false;
3991
		dev_priv->dummy_query_bo_pinned = false;
-
 
3992
	}
Line 2550... Line 3993...
2550
}
3993
}
2551
 
3994
 
2552
 
3995
 
Line 2586... Line 4029...
2586
	if (dev_priv->pinned_bo == NULL)
4029
	if (dev_priv->pinned_bo == NULL)
2587
		goto out_unlock;
4030
		goto out_unlock;
Line 2588... Line 4031...
2588
 
4031
 
Line 2589... Line 4032...
2589
	INIT_LIST_HEAD(&validate_list);
4032
	INIT_LIST_HEAD(&validate_list);
-
 
4033
 
2590
 
4034
	pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
Line 2591... Line 4035...
2591
	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
4035
	pinned_val.shared = false;
-
 
4036
	list_add_tail(&pinned_val.head, &validate_list);
2592
	list_add_tail(&pinned_val.head, &validate_list);
4037
 
Line 2593... Line -...
2593
 
-
 
2594
	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
4038
	query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
2595
	list_add_tail(&query_val.head, &validate_list);
4039
	query_val.shared = false;
2596
 
-
 
2597
	do {
4040
	list_add_tail(&query_val.head, &validate_list);
2598
		ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
4041
 
2599
	} while (ret == -ERESTARTSYS);
4042
	ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
2600
 
4043
				     false, NULL);
Line 2611... Line 4054...
2611
			goto out_no_emit;
4054
			goto out_no_emit;
2612
		}
4055
		}
2613
		dev_priv->query_cid_valid = false;
4056
		dev_priv->query_cid_valid = false;
2614
	}
4057
	}
Line 2615... Line 4058...
2615
 
4058
 
-
 
4059
	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
2616
	vmw_bo_pin(dev_priv->pinned_bo, false);
4060
	if (dev_priv->dummy_query_bo_pinned) {
2617
	vmw_bo_pin(dev_priv->dummy_query_bo, false);
4061
		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
2618
	dev_priv->dummy_query_bo_pinned = false;
4062
		dev_priv->dummy_query_bo_pinned = false;
2619
 
4063
	}
2620
	if (fence == NULL) {
4064
	if (fence == NULL) {
2621
		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4065
		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2622
						  NULL);
4066
						  NULL);
2623
		fence = lfence;
4067
		fence = lfence;
Line 2626... Line 4070...
2626
	if (lfence != NULL)
4070
	if (lfence != NULL)
2627
		vmw_fence_obj_unreference(&lfence);
4071
		vmw_fence_obj_unreference(&lfence);
Line 2628... Line 4072...
2628
 
4072
 
2629
	ttm_bo_unref(&query_val.bo);
4073
	ttm_bo_unref(&query_val.bo);
2630
	ttm_bo_unref(&pinned_val.bo);
4074
	ttm_bo_unref(&pinned_val.bo);
-
 
4075
	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
-
 
4076
	DRM_INFO("Dummy query bo pin count: %d\n",
Line 2631... Line 4077...
2631
	ttm_bo_unref(&dev_priv->pinned_bo);
4077
		 dev_priv->dummy_query_bo->pin_count);
2632
 
4078
 
Line 2633... Line 4079...
2633
out_unlock:
4079
out_unlock:
2634
	return;
4080
	return;
2635
 
4081
 
2636
out_no_emit:
4082
out_no_emit:
2637
	ttm_eu_backoff_reservation(&ticket, &validate_list);
4083
	ttm_eu_backoff_reservation(&ticket, &validate_list);
2638
out_no_reserve:
4084
out_no_reserve:
2639
	ttm_bo_unref(&query_val.bo);
4085
	ttm_bo_unref(&query_val.bo);
Line 2640... Line 4086...
2640
	ttm_bo_unref(&pinned_val.bo);
4086
	ttm_bo_unref(&pinned_val.bo);
2641
	ttm_bo_unref(&dev_priv->pinned_bo);
4087
	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
2642
}
4088
}
Line 2665... Line 4111...
2665
	if (dev_priv->query_cid_valid)
4111
	if (dev_priv->query_cid_valid)
2666
		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4112
		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2667
	mutex_unlock(&dev_priv->cmdbuf_mutex);
4113
	mutex_unlock(&dev_priv->cmdbuf_mutex);
2668
}
4114
}
Line 2669... Line -...
2669
 
-
 
2670
 
4115
 
2671
int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4116
int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
2672
		      struct drm_file *file_priv)
4117
		      struct drm_file *file_priv, size_t size)
2673
{
4118
{
2674
	struct vmw_private *dev_priv = vmw_priv(dev);
4119
	struct vmw_private *dev_priv = vmw_priv(dev);
2675
	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
4120
	struct drm_vmw_execbuf_arg arg;
-
 
4121
	int ret;
-
 
4122
	static const size_t copy_offset[] = {
-
 
4123
		offsetof(struct drm_vmw_execbuf_arg, context_handle),
-
 
4124
		sizeof(struct drm_vmw_execbuf_arg)};
-
 
4125
 
-
 
4126
	if (unlikely(size < copy_offset[0])) {
-
 
4127
		DRM_ERROR("Invalid command size, ioctl %d\n",
-
 
4128
			  DRM_VMW_EXECBUF);
-
 
4129
		return -EINVAL;
-
 
4130
	}
-
 
4131
 
-
 
4132
	if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
Line 2676... Line 4133...
2676
	int ret;
4133
		return -EFAULT;
2677
 
4134
 
2678
	/*
4135
	/*
2679
	 * This will allow us to extend the ioctl argument while
4136
	 * Extend the ioctl argument while
2680
	 * maintaining backwards compatibility:
4137
	 * maintaining backwards compatibility:
2681
	 * We take different code paths depending on the value of
4138
	 * We take different code paths depending on the value of
Line 2682... Line 4139...
2682
	 * arg->version.
4139
	 * arg.version.
-
 
4140
	 */
2683
	 */
4141
 
2684
 
-
 
2685
	if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
-
 
2686
		DRM_ERROR("Incorrect execbuf version.\n");
4142
	if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
2687
		DRM_ERROR("You're running outdated experimental "
4143
		     arg.version == 0)) {
Line -... Line 4144...
-
 
4144
		DRM_ERROR("Incorrect execbuf version.\n");
-
 
4145
		return -EINVAL;
-
 
4146
	}
-
 
4147
 
-
 
4148
	if (arg.version > 1 &&
-
 
4149
	    copy_from_user(&arg.context_handle,
-
 
4150
			   (void __user *) (data + copy_offset[0]),
-
 
4151
			   copy_offset[arg.version - 1] -
-
 
4152
			   copy_offset[0]) != 0)
-
 
4153
		return -EFAULT;
-
 
4154
 
-
 
4155
	switch (arg.version) {
-
 
4156
	case 1:
-
 
4157
		arg.context_handle = (uint32_t) -1;
-
 
4158
		break;
-
 
4159
	case 2:
-
 
4160
		if (arg.pad64 != 0) {
-
 
4161
			DRM_ERROR("Unused IOCTL data not set to zero.\n");
-
 
4162
			return -EINVAL;
-
 
4163
		}
-
 
4164
		break;
2688
			  "vmwgfx user-space drivers.");
4165
	default:
2689
		return -EINVAL;
4166
		break;
2690
	}
4167
	}
Line 2691... Line 4168...
2691
 
4168
 
2692
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4169
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2693
	if (unlikely(ret != 0))
4170
	if (unlikely(ret != 0))
-
 
4171
		return ret;
2694
		return ret;
4172
 
2695
 
4173
	ret = vmw_execbuf_process(file_priv, dev_priv,
2696
	ret = vmw_execbuf_process(file_priv, dev_priv,
-
 
-
 
4174
				  (void __user *)(unsigned long)arg.commands,
2697
				  (void __user *)(unsigned long)arg->commands,
4175
				  NULL, arg.command_size, arg.throttle_us,
2698
				  NULL, arg->command_size, arg->throttle_us,
4176
				  arg.context_handle,
Line 2699... Line 4177...
2699
				  (void __user *)(unsigned long)arg->fence_rep,
4177
				  (void __user *)(unsigned long)arg.fence_rep,
Line 2700... Line -...
2700
				  NULL);
-
 
2701
 
-
 
2702
	if (unlikely(ret != 0))
4178
				  NULL);
2703
		goto out_unlock;
4179
	ttm_read_unlock(&dev_priv->reservation_sem);