Subversion Repositories Kolibri OS

Rev

Rev 5078 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
6296 serge 3
 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4075 Serge 4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
 
28
#include "vmwgfx_drv.h"
29
#include "vmwgfx_resource_priv.h"
6296 serge 30
#include "vmwgfx_so.h"
31
#include "vmwgfx_binding.h"
4075 Serge 32
#include 
6296 serge 33
#include "device_include/svga3d_surfacedefs.h"
4075 Serge 34
 
6296 serge 35
 
4075 Serge 36
/**
37
 * struct vmw_user_surface - User-space visible surface resource
38
 *
39
 * @base:           The TTM base object handling user-space visibility.
40
 * @srf:            The surface metadata.
41
 * @size:           TTM accounting size for the surface.
6296 serge 42
 * @master: master of the creating client. Used for security check.
4075 Serge 43
 */
44
struct vmw_user_surface {
4569 Serge 45
	struct ttm_prime_object prime;
4075 Serge 46
	struct vmw_surface srf;
47
	uint32_t size;
6296 serge 48
	struct drm_master *master;
49
	struct ttm_base_object *backup_base;
4075 Serge 50
};
51
 
52
/**
53
 * struct vmw_surface_offset - Backing store mip level offset info
54
 *
55
 * @face:           Surface face.
56
 * @mip:            Mip level.
57
 * @bo_offset:      Offset into backing store of this mip level.
58
 *
59
 */
60
struct vmw_surface_offset {
61
	uint32_t face;
62
	uint32_t mip;
63
	uint32_t bo_offset;
64
};
65
 
66
static void vmw_user_surface_free(struct vmw_resource *res);
67
static struct vmw_resource *
68
vmw_user_surface_base_to_res(struct ttm_base_object *base);
69
static int vmw_legacy_srf_bind(struct vmw_resource *res,
70
			       struct ttm_validate_buffer *val_buf);
71
static int vmw_legacy_srf_unbind(struct vmw_resource *res,
72
				 bool readback,
73
				 struct ttm_validate_buffer *val_buf);
74
static int vmw_legacy_srf_create(struct vmw_resource *res);
75
static int vmw_legacy_srf_destroy(struct vmw_resource *res);
4569 Serge 76
static int vmw_gb_surface_create(struct vmw_resource *res);
77
static int vmw_gb_surface_bind(struct vmw_resource *res,
78
			       struct ttm_validate_buffer *val_buf);
79
static int vmw_gb_surface_unbind(struct vmw_resource *res,
80
				 bool readback,
81
				 struct ttm_validate_buffer *val_buf);
82
static int vmw_gb_surface_destroy(struct vmw_resource *res);
4075 Serge 83
 
4569 Serge 84
 
4075 Serge 85
static const struct vmw_user_resource_conv user_surface_conv = {
86
	.object_type = VMW_RES_SURFACE,
87
	.base_obj_to_res = vmw_user_surface_base_to_res,
88
	.res_free = vmw_user_surface_free
89
};
90
 
91
const struct vmw_user_resource_conv *user_surface_converter =
92
	&user_surface_conv;
93
 
94
 
95
static uint64_t vmw_user_surface_size;
96
 
97
static const struct vmw_res_func vmw_legacy_surface_func = {
98
	.res_type = vmw_res_surface,
99
	.needs_backup = false,
100
	.may_evict = true,
101
	.type_name = "legacy surfaces",
102
	.backup_placement = &vmw_srf_placement,
103
	.create = &vmw_legacy_srf_create,
104
	.destroy = &vmw_legacy_srf_destroy,
105
	.bind = &vmw_legacy_srf_bind,
106
	.unbind = &vmw_legacy_srf_unbind
107
};
108
 
4569 Serge 109
static const struct vmw_res_func vmw_gb_surface_func = {
110
	.res_type = vmw_res_surface,
111
	.needs_backup = true,
112
	.may_evict = true,
113
	.type_name = "guest backed surfaces",
114
	.backup_placement = &vmw_mob_placement,
115
	.create = vmw_gb_surface_create,
116
	.destroy = vmw_gb_surface_destroy,
117
	.bind = vmw_gb_surface_bind,
118
	.unbind = vmw_gb_surface_unbind
119
};
120
 
4075 Serge 121
/**
122
 * struct vmw_surface_dma - SVGA3D DMA command
123
 */
124
struct vmw_surface_dma {
125
	SVGA3dCmdHeader header;
126
	SVGA3dCmdSurfaceDMA body;
127
	SVGA3dCopyBox cb;
128
	SVGA3dCmdSurfaceDMASuffix suffix;
129
};
130
 
131
/**
132
 * struct vmw_surface_define - SVGA3D Surface Define command
133
 */
134
struct vmw_surface_define {
135
	SVGA3dCmdHeader header;
136
	SVGA3dCmdDefineSurface body;
137
};
138
 
139
/**
140
 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
141
 */
142
struct vmw_surface_destroy {
143
	SVGA3dCmdHeader header;
144
	SVGA3dCmdDestroySurface body;
145
};
146
 
147
 
148
/**
149
 * vmw_surface_dma_size - Compute fifo size for a dma command.
150
 *
151
 * @srf: Pointer to a struct vmw_surface
152
 *
153
 * Computes the required size for a surface dma command for backup or
154
 * restoration of the surface represented by @srf.
155
 */
156
static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
157
{
158
	return srf->num_sizes * sizeof(struct vmw_surface_dma);
159
}
160
 
161
 
162
/**
163
 * vmw_surface_define_size - Compute fifo size for a surface define command.
164
 *
165
 * @srf: Pointer to a struct vmw_surface
166
 *
167
 * Computes the required size for a surface define command for the definition
168
 * of the surface represented by @srf.
169
 */
170
static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
171
{
172
	return sizeof(struct vmw_surface_define) + srf->num_sizes *
173
		sizeof(SVGA3dSize);
174
}
175
 
176
 
177
/**
178
 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
179
 *
180
 * Computes the required size for a surface destroy command for the destruction
181
 * of a hw surface.
182
 */
183
static inline uint32_t vmw_surface_destroy_size(void)
184
{
185
	return sizeof(struct vmw_surface_destroy);
186
}
187
 
188
/**
189
 * vmw_surface_destroy_encode - Encode a surface_destroy command.
190
 *
191
 * @id: The surface id
192
 * @cmd_space: Pointer to memory area in which the commands should be encoded.
193
 */
194
static void vmw_surface_destroy_encode(uint32_t id,
195
				       void *cmd_space)
196
{
197
	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
198
		cmd_space;
199
 
200
	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
201
	cmd->header.size = sizeof(cmd->body);
202
	cmd->body.sid = id;
203
}
204
 
205
/**
206
 * vmw_surface_define_encode - Encode a surface_define command.
207
 *
208
 * @srf: Pointer to a struct vmw_surface object.
209
 * @cmd_space: Pointer to memory area in which the commands should be encoded.
210
 */
211
static void vmw_surface_define_encode(const struct vmw_surface *srf,
212
				      void *cmd_space)
213
{
214
	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
215
		cmd_space;
216
	struct drm_vmw_size *src_size;
217
	SVGA3dSize *cmd_size;
218
	uint32_t cmd_len;
219
	int i;
220
 
221
	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
222
 
223
	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
224
	cmd->header.size = cmd_len;
225
	cmd->body.sid = srf->res.id;
226
	cmd->body.surfaceFlags = srf->flags;
6296 serge 227
	cmd->body.format = srf->format;
4075 Serge 228
	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
229
		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
230
 
231
	cmd += 1;
232
	cmd_size = (SVGA3dSize *) cmd;
233
	src_size = srf->sizes;
234
 
235
	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
236
		cmd_size->width = src_size->width;
237
		cmd_size->height = src_size->height;
238
		cmd_size->depth = src_size->depth;
239
	}
240
}
241
 
242
/**
243
 * vmw_surface_dma_encode - Encode a surface_dma command.
244
 *
245
 * @srf: Pointer to a struct vmw_surface object.
246
 * @cmd_space: Pointer to memory area in which the commands should be encoded.
247
 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
248
 * should be placed or read from.
249
 * @to_surface: Boolean whether to DMA to the surface or from the surface.
250
 */
251
static void vmw_surface_dma_encode(struct vmw_surface *srf,
252
				   void *cmd_space,
253
				   const SVGAGuestPtr *ptr,
254
				   bool to_surface)
255
{
256
	uint32_t i;
257
	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
258
	const struct svga3d_surface_desc *desc =
259
		svga3dsurface_get_desc(srf->format);
260
 
261
	for (i = 0; i < srf->num_sizes; ++i) {
262
		SVGA3dCmdHeader *header = &cmd->header;
263
		SVGA3dCmdSurfaceDMA *body = &cmd->body;
264
		SVGA3dCopyBox *cb = &cmd->cb;
265
		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
266
		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
267
		const struct drm_vmw_size *cur_size = &srf->sizes[i];
268
 
269
		header->id = SVGA_3D_CMD_SURFACE_DMA;
270
		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
271
 
272
		body->guest.ptr = *ptr;
273
		body->guest.ptr.offset += cur_offset->bo_offset;
274
		body->guest.pitch = svga3dsurface_calculate_pitch(desc,
275
								  cur_size);
276
		body->host.sid = srf->res.id;
277
		body->host.face = cur_offset->face;
278
		body->host.mipmap = cur_offset->mip;
279
		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
280
				  SVGA3D_READ_HOST_VRAM);
281
		cb->x = 0;
282
		cb->y = 0;
283
		cb->z = 0;
284
		cb->srcx = 0;
285
		cb->srcy = 0;
286
		cb->srcz = 0;
287
		cb->w = cur_size->width;
288
		cb->h = cur_size->height;
289
		cb->d = cur_size->depth;
290
 
291
		suffix->suffixSize = sizeof(*suffix);
292
		suffix->maximumOffset =
293
			svga3dsurface_get_image_buffer_size(desc, cur_size,
294
							    body->guest.pitch);
295
		suffix->flags.discard = 0;
296
		suffix->flags.unsynchronized = 0;
297
		suffix->flags.reserved = 0;
298
		++cmd;
299
	}
300
};
301
 
302
 
303
/**
304
 * vmw_hw_surface_destroy - destroy a Device surface
305
 *
306
 * @res:        Pointer to a struct vmw_resource embedded in a struct
307
 *              vmw_surface.
308
 *
309
 * Destroys a the device surface associated with a struct vmw_surface if
310
 * any, and adjusts accounting and resource count accordingly.
311
 */
312
static void vmw_hw_surface_destroy(struct vmw_resource *res)
313
{
314
 
315
	struct vmw_private *dev_priv = res->dev_priv;
316
	struct vmw_surface *srf;
317
	void *cmd;
318
 
4569 Serge 319
	if (res->func->destroy == vmw_gb_surface_destroy) {
320
		(void) vmw_gb_surface_destroy(res);
321
		return;
322
	}
323
 
4075 Serge 324
	if (res->id != -1) {
325
 
326
		cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
327
		if (unlikely(cmd == NULL)) {
328
			DRM_ERROR("Failed reserving FIFO space for surface "
329
				  "destruction.\n");
330
			return;
331
		}
332
 
333
		vmw_surface_destroy_encode(res->id, cmd);
334
		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
335
 
336
		/*
337
		 * used_memory_size_atomic, or separate lock
338
		 * to avoid taking dev_priv::cmdbuf_mutex in
339
		 * the destroy path.
340
		 */
341
 
342
		mutex_lock(&dev_priv->cmdbuf_mutex);
343
		srf = vmw_res_to_srf(res);
344
		dev_priv->used_memory_size -= res->backup_size;
345
		mutex_unlock(&dev_priv->cmdbuf_mutex);
346
	}
6296 serge 347
	vmw_fifo_resource_dec(dev_priv);
4075 Serge 348
}
349
 
350
/**
351
 * vmw_legacy_srf_create - Create a device surface as part of the
352
 * resource validation process.
353
 *
354
 * @res: Pointer to a struct vmw_surface.
355
 *
356
 * If the surface doesn't have a hw id.
357
 *
358
 * Returns -EBUSY if there wasn't sufficient device resources to
359
 * complete the validation. Retry after freeing up resources.
360
 *
361
 * May return other errors if the kernel is out of guest resources.
362
 */
363
static int vmw_legacy_srf_create(struct vmw_resource *res)
364
{
365
	struct vmw_private *dev_priv = res->dev_priv;
366
	struct vmw_surface *srf;
367
	uint32_t submit_size;
368
	uint8_t *cmd;
369
	int ret;
370
 
371
	if (likely(res->id != -1))
372
		return 0;
373
 
374
	srf = vmw_res_to_srf(res);
375
	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
376
		     dev_priv->memory_size))
377
		return -EBUSY;
378
 
379
	/*
380
	 * Alloc id for the resource.
381
	 */
382
 
383
	ret = vmw_resource_alloc_id(res);
384
	if (unlikely(ret != 0)) {
385
		DRM_ERROR("Failed to allocate a surface id.\n");
386
		goto out_no_id;
387
	}
388
 
389
	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
390
		ret = -EBUSY;
391
		goto out_no_fifo;
392
	}
393
 
394
	/*
395
	 * Encode surface define- commands.
396
	 */
397
 
398
	submit_size = vmw_surface_define_size(srf);
399
	cmd = vmw_fifo_reserve(dev_priv, submit_size);
400
	if (unlikely(cmd == NULL)) {
401
		DRM_ERROR("Failed reserving FIFO space for surface "
402
			  "creation.\n");
403
		ret = -ENOMEM;
404
		goto out_no_fifo;
405
	}
406
 
407
	vmw_surface_define_encode(srf, cmd);
408
	vmw_fifo_commit(dev_priv, submit_size);
409
	/*
410
	 * Surface memory usage accounting.
411
	 */
412
 
413
	dev_priv->used_memory_size += res->backup_size;
414
	return 0;
415
 
416
out_no_fifo:
417
	vmw_resource_release_id(res);
418
out_no_id:
419
	return ret;
420
}
421
 
422
/**
423
 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
424
 *
425
 * @res:            Pointer to a struct vmw_res embedded in a struct
426
 *                  vmw_surface.
427
 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
428
 *                  information about the backup buffer.
429
 * @bind:           Boolean wether to DMA to the surface.
430
 *
431
 * Transfer backup data to or from a legacy surface as part of the
432
 * validation process.
433
 * May return other errors if the kernel is out of guest resources.
434
 * The backup buffer will be fenced or idle upon successful completion,
435
 * and if the surface needs persistent backup storage, the backup buffer
436
 * will also be returned reserved iff @bind is true.
437
 */
438
static int vmw_legacy_srf_dma(struct vmw_resource *res,
439
			      struct ttm_validate_buffer *val_buf,
440
			      bool bind)
441
{
442
	SVGAGuestPtr ptr;
443
	struct vmw_fence_obj *fence;
444
	uint32_t submit_size;
445
	struct vmw_surface *srf = vmw_res_to_srf(res);
446
	uint8_t *cmd;
447
	struct vmw_private *dev_priv = res->dev_priv;
448
 
449
	BUG_ON(val_buf->bo == NULL);
450
 
451
	submit_size = vmw_surface_dma_size(srf);
452
	cmd = vmw_fifo_reserve(dev_priv, submit_size);
453
	if (unlikely(cmd == NULL)) {
454
		DRM_ERROR("Failed reserving FIFO space for surface "
455
			  "DMA.\n");
456
		return -ENOMEM;
457
	}
458
	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
459
	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
460
 
461
	vmw_fifo_commit(dev_priv, submit_size);
462
 
463
	/*
464
	 * Create a fence object and fence the backup buffer.
465
	 */
466
 
467
	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
468
					  &fence, NULL);
469
 
470
	vmw_fence_single_bo(val_buf->bo, fence);
471
 
472
	if (likely(fence != NULL))
473
		vmw_fence_obj_unreference(&fence);
474
 
475
	return 0;
476
}
477
 
478
/**
479
 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
480
 *                       surface validation process.
481
 *
482
 * @res:            Pointer to a struct vmw_res embedded in a struct
483
 *                  vmw_surface.
484
 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
485
 *                  information about the backup buffer.
486
 *
487
 * This function will copy backup data to the surface if the
488
 * backup buffer is dirty.
489
 */
490
static int vmw_legacy_srf_bind(struct vmw_resource *res,
491
			       struct ttm_validate_buffer *val_buf)
492
{
493
	if (!res->backup_dirty)
494
		return 0;
495
 
496
	return vmw_legacy_srf_dma(res, val_buf, true);
497
}
498
 
499
 
500
/**
501
 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
502
 *                         surface eviction process.
503
 *
504
 * @res:            Pointer to a struct vmw_res embedded in a struct
505
 *                  vmw_surface.
506
 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
507
 *                  information about the backup buffer.
508
 *
509
 * This function will copy backup data from the surface.
510
 */
511
static int vmw_legacy_srf_unbind(struct vmw_resource *res,
512
				 bool readback,
513
				 struct ttm_validate_buffer *val_buf)
514
{
515
	if (unlikely(readback))
516
		return vmw_legacy_srf_dma(res, val_buf, false);
517
	return 0;
518
}
519
 
520
/**
521
 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
522
 *                          resource eviction process.
523
 *
524
 * @res:            Pointer to a struct vmw_res embedded in a struct
525
 *                  vmw_surface.
526
 */
527
static int vmw_legacy_srf_destroy(struct vmw_resource *res)
528
{
529
	struct vmw_private *dev_priv = res->dev_priv;
530
	uint32_t submit_size;
531
	uint8_t *cmd;
532
 
533
	BUG_ON(res->id == -1);
534
 
535
	/*
536
	 * Encode the dma- and surface destroy commands.
537
	 */
538
 
539
	submit_size = vmw_surface_destroy_size();
540
	cmd = vmw_fifo_reserve(dev_priv, submit_size);
541
	if (unlikely(cmd == NULL)) {
542
		DRM_ERROR("Failed reserving FIFO space for surface "
543
			  "eviction.\n");
544
		return -ENOMEM;
545
	}
546
 
547
	vmw_surface_destroy_encode(res->id, cmd);
548
	vmw_fifo_commit(dev_priv, submit_size);
549
 
550
	/*
551
	 * Surface memory usage accounting.
552
	 */
553
 
554
	dev_priv->used_memory_size -= res->backup_size;
555
 
556
	/*
557
	 * Release the surface ID.
558
	 */
559
 
560
	vmw_resource_release_id(res);
561
 
562
	return 0;
563
}
564
 
565
 
566
/**
567
 * vmw_surface_init - initialize a struct vmw_surface
568
 *
569
 * @dev_priv:       Pointer to a device private struct.
570
 * @srf:            Pointer to the struct vmw_surface to initialize.
571
 * @res_free:       Pointer to a resource destructor used to free
572
 *                  the object.
573
 */
574
static int vmw_surface_init(struct vmw_private *dev_priv,
575
			    struct vmw_surface *srf,
576
			    void (*res_free) (struct vmw_resource *res))
577
{
578
	int ret;
579
	struct vmw_resource *res = &srf->res;
580
 
581
	BUG_ON(res_free == NULL);
4569 Serge 582
	if (!dev_priv->has_mob)
6296 serge 583
		vmw_fifo_resource_inc(dev_priv);
4075 Serge 584
	ret = vmw_resource_init(dev_priv, res, true, res_free,
4569 Serge 585
				(dev_priv->has_mob) ? &vmw_gb_surface_func :
4075 Serge 586
				&vmw_legacy_surface_func);
587
 
588
	if (unlikely(ret != 0)) {
4569 Serge 589
		if (!dev_priv->has_mob)
6296 serge 590
			vmw_fifo_resource_dec(dev_priv);
4075 Serge 591
		res_free(res);
592
		return ret;
593
	}
594
 
595
	/*
596
	 * The surface won't be visible to hardware until a
597
	 * surface validate.
598
	 */
599
 
6296 serge 600
	INIT_LIST_HEAD(&srf->view_list);
4075 Serge 601
	vmw_resource_activate(res, vmw_hw_surface_destroy);
602
	return ret;
603
}
604
 
605
/**
606
 * vmw_user_surface_base_to_res - TTM base object to resource converter for
607
 *                                user visible surfaces
608
 *
609
 * @base:           Pointer to a TTM base object
610
 *
611
 * Returns the struct vmw_resource embedded in a struct vmw_surface
612
 * for the user-visible object identified by the TTM base object @base.
613
 */
614
static struct vmw_resource *
615
vmw_user_surface_base_to_res(struct ttm_base_object *base)
616
{
4569 Serge 617
	return &(container_of(base, struct vmw_user_surface,
618
			      prime.base)->srf.res);
4075 Serge 619
}
620
 
621
/**
622
 * vmw_user_surface_free - User visible surface resource destructor
623
 *
624
 * @res:            A struct vmw_resource embedded in a struct vmw_surface.
625
 */
626
static void vmw_user_surface_free(struct vmw_resource *res)
627
{
628
	struct vmw_surface *srf = vmw_res_to_srf(res);
629
	struct vmw_user_surface *user_srf =
630
	    container_of(srf, struct vmw_user_surface, srf);
631
	struct vmw_private *dev_priv = srf->res.dev_priv;
632
	uint32_t size = user_srf->size;
633
 
6296 serge 634
	if (user_srf->master)
635
		drm_master_put(&user_srf->master);
4075 Serge 636
	kfree(srf->offsets);
637
	kfree(srf->sizes);
638
	kfree(srf->snooper.image);
6296 serge 639
	ttm_prime_object_kfree(user_srf, prime);
4075 Serge 640
	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
641
}
642
 
643
/**
644
 * vmw_user_surface_free - User visible surface TTM base object destructor
645
 *
646
 * @p_base:         Pointer to a pointer to a TTM base object
647
 *                  embedded in a struct vmw_user_surface.
648
 *
649
 * Drops the base object's reference on its resource, and the
650
 * pointer pointed to by *p_base is set to NULL.
651
 */
652
static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
653
{
654
	struct ttm_base_object *base = *p_base;
655
	struct vmw_user_surface *user_srf =
4569 Serge 656
	    container_of(base, struct vmw_user_surface, prime.base);
4075 Serge 657
	struct vmw_resource *res = &user_srf->srf.res;
658
 
659
	*p_base = NULL;
6296 serge 660
	if (user_srf->backup_base)
661
		ttm_base_object_unref(&user_srf->backup_base);
4075 Serge 662
	vmw_resource_unreference(&res);
663
}
664
 
665
#if 0
6296 serge 666
 * vmw_user_surface_destroy_ioctl - Ioctl function implementing
667
 *                                  the user surface destroy functionality.
668
 *
669
 * @dev:            Pointer to a struct drm_device.
670
 * @data:           Pointer to data copied from / to user-space.
671
 * @file_priv:      Pointer to a drm file private structure.
672
 */
673
int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
674
			      struct drm_file *file_priv)
675
{
676
	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
677
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
678
 
679
	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
680
}
681
 
4075 Serge 682
/**
683
 * vmw_user_surface_define_ioctl - Ioctl function implementing
684
 *                                  the user surface define functionality.
685
 *
686
 * @dev:            Pointer to a struct drm_device.
687
 * @data:           Pointer to data copied from / to user-space.
688
 * @file_priv:      Pointer to a drm file private structure.
689
 */
690
int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
691
			     struct drm_file *file_priv)
692
{
693
	struct vmw_private *dev_priv = vmw_priv(dev);
694
	struct vmw_user_surface *user_srf;
695
	struct vmw_surface *srf;
696
	struct vmw_resource *res;
697
	struct vmw_resource *tmp;
698
	union drm_vmw_surface_create_arg *arg =
699
	    (union drm_vmw_surface_create_arg *)data;
700
	struct drm_vmw_surface_create_req *req = &arg->req;
701
	struct drm_vmw_surface_arg *rep = &arg->rep;
702
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
703
	struct drm_vmw_size __user *user_sizes;
704
	int ret;
705
	int i, j;
706
	uint32_t cur_bo_offset;
707
	struct drm_vmw_size *cur_size;
708
	struct vmw_surface_offset *cur_offset;
709
	uint32_t num_sizes;
710
	uint32_t size;
711
	const struct svga3d_surface_desc *desc;
712
 
713
	if (unlikely(vmw_user_surface_size == 0))
714
		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
715
			128;
716
 
717
	num_sizes = 0;
718
	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
719
		num_sizes += req->mip_levels[i];
720
 
721
	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
722
	    DRM_VMW_MAX_MIP_LEVELS)
723
		return -EINVAL;
724
 
725
	size = vmw_user_surface_size + 128 +
726
		ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
727
		ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
728
 
729
 
730
	desc = svga3dsurface_get_desc(req->format);
731
	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
732
		DRM_ERROR("Invalid surface format for surface creation.\n");
6296 serge 733
		DRM_ERROR("Format requested is: %d\n", req->format);
4075 Serge 734
		return -EINVAL;
735
	}
736
 
5078 serge 737
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4075 Serge 738
	if (unlikely(ret != 0))
739
		return ret;
740
 
741
	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
742
				   size, false, true);
743
	if (unlikely(ret != 0)) {
744
		if (ret != -ERESTARTSYS)
745
			DRM_ERROR("Out of graphics memory for surface"
746
				  " creation.\n");
747
		goto out_unlock;
748
	}
749
 
750
	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
751
	if (unlikely(user_srf == NULL)) {
752
		ret = -ENOMEM;
753
		goto out_no_user_srf;
754
	}
755
 
756
	srf = &user_srf->srf;
757
	res = &srf->res;
758
 
759
	srf->flags = req->flags;
760
	srf->format = req->format;
761
	srf->scanout = req->scanout;
762
 
763
	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
764
	srf->num_sizes = num_sizes;
765
	user_srf->size = size;
766
 
767
	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
768
	if (unlikely(srf->sizes == NULL)) {
769
		ret = -ENOMEM;
770
		goto out_no_sizes;
771
	}
772
	srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
773
			       GFP_KERNEL);
774
	if (unlikely(srf->sizes == NULL)) {
775
		ret = -ENOMEM;
776
		goto out_no_offsets;
777
	}
778
 
779
	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
780
	    req->size_addr;
781
 
782
	ret = copy_from_user(srf->sizes, user_sizes,
783
			     srf->num_sizes * sizeof(*srf->sizes));
784
	if (unlikely(ret != 0)) {
785
		ret = -EFAULT;
786
		goto out_no_copy;
787
	}
788
 
789
	srf->base_size = *srf->sizes;
790
	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
4569 Serge 791
	srf->multisample_count = 0;
4075 Serge 792
 
793
	cur_bo_offset = 0;
794
	cur_offset = srf->offsets;
795
	cur_size = srf->sizes;
796
 
797
	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
798
		for (j = 0; j < srf->mip_levels[i]; ++j) {
799
			uint32_t stride = svga3dsurface_calculate_pitch
800
				(desc, cur_size);
801
 
802
			cur_offset->face = i;
803
			cur_offset->mip = j;
804
			cur_offset->bo_offset = cur_bo_offset;
805
			cur_bo_offset += svga3dsurface_get_image_buffer_size
806
				(desc, cur_size, stride);
807
			++cur_offset;
808
			++cur_size;
809
		}
810
	}
811
	res->backup_size = cur_bo_offset;
812
	if (srf->scanout &&
813
	    srf->num_sizes == 1 &&
814
	    srf->sizes[0].width == 64 &&
815
	    srf->sizes[0].height == 64 &&
816
	    srf->format == SVGA3D_A8R8G8B8) {
817
 
818
		srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
819
		/* clear the image */
820
		if (srf->snooper.image) {
821
			memset(srf->snooper.image, 0x00, 64 * 64 * 4);
822
		} else {
823
			DRM_ERROR("Failed to allocate cursor_image\n");
824
			ret = -ENOMEM;
825
			goto out_no_copy;
826
		}
827
	} else {
828
		srf->snooper.image = NULL;
829
	}
830
	srf->snooper.crtc = NULL;
831
 
4569 Serge 832
	user_srf->prime.base.shareable = false;
833
	user_srf->prime.base.tfile = NULL;
4075 Serge 834
 
835
	/**
836
	 * From this point, the generic resource management functions
837
	 * destroy the object on failure.
838
	 */
839
 
840
	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
841
	if (unlikely(ret != 0))
842
		goto out_unlock;
843
 
6296 serge 844
	/*
845
	 * A gb-aware client referencing a shared surface will
846
	 * expect a backup buffer to be present.
847
	 */
848
	if (dev_priv->has_mob && req->shareable) {
849
		uint32_t backup_handle;
850
 
851
		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
852
					    res->backup_size,
853
					    true,
854
					    &backup_handle,
855
					    &res->backup,
856
					    &user_srf->backup_base);
857
		if (unlikely(ret != 0)) {
858
			vmw_resource_unreference(&res);
859
			goto out_unlock;
860
		}
861
	}
862
 
4075 Serge 863
	tmp = vmw_resource_reference(&srf->res);
4569 Serge 864
	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
6296 serge 865
				    req->shareable, VMW_RES_SURFACE,
866
				    &vmw_user_surface_base_release, NULL);
4075 Serge 867
 
868
	if (unlikely(ret != 0)) {
869
		vmw_resource_unreference(&tmp);
870
		vmw_resource_unreference(&res);
871
		goto out_unlock;
872
	}
873
 
4569 Serge 874
	rep->sid = user_srf->prime.base.hash.key;
4075 Serge 875
	vmw_resource_unreference(&res);
876
 
5078 serge 877
	ttm_read_unlock(&dev_priv->reservation_sem);
4075 Serge 878
	return 0;
879
out_no_copy:
880
	kfree(srf->offsets);
881
out_no_offsets:
882
	kfree(srf->sizes);
883
out_no_sizes:
4569 Serge 884
	ttm_prime_object_kfree(user_srf, prime);
4075 Serge 885
out_no_user_srf:
886
	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
887
out_unlock:
5078 serge 888
	ttm_read_unlock(&dev_priv->reservation_sem);
6296 serge 889
	return ret;
890
}
5078 serge 891
 
6296 serge 892
 
893
static int
894
vmw_surface_handle_reference(struct vmw_private *dev_priv,
895
			     struct drm_file *file_priv,
896
			     uint32_t u_handle,
897
			     enum drm_vmw_handle_type handle_type,
898
			     struct ttm_base_object **base_p)
899
{
900
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
901
	struct vmw_user_surface *user_srf;
902
	uint32_t handle;
903
	struct ttm_base_object *base;
904
	int ret;
905
 
906
	if (handle_type == DRM_VMW_HANDLE_PRIME) {
907
		ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
908
		if (unlikely(ret != 0))
909
			return ret;
910
	} else {
911
		if (unlikely(drm_is_render_client(file_priv))) {
912
			DRM_ERROR("Render client refused legacy "
913
				  "surface reference.\n");
914
			return -EACCES;
915
		}
916
		if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
917
			DRM_ERROR("Locked master refused legacy "
918
				  "surface reference.\n");
919
			return -EACCES;
920
		}
921
 
922
		handle = u_handle;
923
	}
924
 
925
	ret = -EINVAL;
926
	base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
927
	if (unlikely(base == NULL)) {
928
		DRM_ERROR("Could not find surface to reference.\n");
929
		goto out_no_lookup;
930
	}
931
 
932
	if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
933
		DRM_ERROR("Referenced object is not a surface.\n");
934
		goto out_bad_resource;
935
	}
936
 
937
	if (handle_type != DRM_VMW_HANDLE_PRIME) {
938
		user_srf = container_of(base, struct vmw_user_surface,
939
					prime.base);
940
 
941
		/*
942
		 * Make sure the surface creator has the same
943
		 * authenticating master.
944
		 */
945
		if (drm_is_primary_client(file_priv) &&
946
		    user_srf->master != file_priv->master) {
947
			DRM_ERROR("Trying to reference surface outside of"
948
				  " master domain.\n");
949
			ret = -EACCES;
950
			goto out_bad_resource;
951
		}
952
 
953
		ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
954
		if (unlikely(ret != 0)) {
955
			DRM_ERROR("Could not add a reference to a surface.\n");
956
			goto out_bad_resource;
957
		}
958
	}
959
 
960
	*base_p = base;
961
	return 0;
962
 
963
out_bad_resource:
964
	ttm_base_object_unref(&base);
965
out_no_lookup:
966
	if (handle_type == DRM_VMW_HANDLE_PRIME)
967
		(void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
968
 
4075 Serge 969
	return ret;
970
}
971
 
972
/**
973
 * vmw_user_surface_define_ioctl - Ioctl function implementing
974
 *                                  the user surface reference functionality.
975
 *
976
 * @dev:            Pointer to a struct drm_device.
977
 * @data:           Pointer to data copied from / to user-space.
978
 * @file_priv:      Pointer to a drm file private structure.
979
 */
980
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
981
				struct drm_file *file_priv)
982
{
4569 Serge 983
	struct vmw_private *dev_priv = vmw_priv(dev);
4075 Serge 984
	union drm_vmw_surface_reference_arg *arg =
985
	    (union drm_vmw_surface_reference_arg *)data;
986
	struct drm_vmw_surface_arg *req = &arg->req;
987
	struct drm_vmw_surface_create_req *rep = &arg->rep;
988
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
989
	struct vmw_surface *srf;
990
	struct vmw_user_surface *user_srf;
991
	struct drm_vmw_size __user *user_sizes;
992
	struct ttm_base_object *base;
5078 serge 993
	int ret;
4075 Serge 994
 
5078 serge 995
	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
996
					   req->handle_type, &base);
997
	if (unlikely(ret != 0))
998
		return ret;
4075 Serge 999
 
4569 Serge 1000
	user_srf = container_of(base, struct vmw_user_surface, prime.base);
4075 Serge 1001
	srf = &user_srf->srf;
1002
 
1003
	rep->flags = srf->flags;
1004
	rep->format = srf->format;
1005
	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
1006
	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1007
	    rep->size_addr;
1008
 
1009
	if (user_sizes)
5078 serge 1010
		ret = copy_to_user(user_sizes, &srf->base_size,
1011
				   sizeof(srf->base_size));
4075 Serge 1012
	if (unlikely(ret != 0)) {
1013
		DRM_ERROR("copy_to_user failed %p %u\n",
1014
			  user_sizes, srf->num_sizes);
5078 serge 1015
		ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
4075 Serge 1016
		ret = -EFAULT;
1017
	}
6296 serge 1018
 
4075 Serge 1019
	ttm_base_object_unref(&base);
1020
 
1021
	return ret;
1022
}
1023
 
1024
#endif
6296 serge 1025
/**
1026
 * vmw_surface_define_encode - Encode a surface_define command.
1027
 *
1028
 * @srf: Pointer to a struct vmw_surface object.
1029
 * @cmd_space: Pointer to memory area in which the commands should be encoded.
1030
 */
1031
static int vmw_gb_surface_create(struct vmw_resource *res)
1032
{
1033
	struct vmw_private *dev_priv = res->dev_priv;
1034
	struct vmw_surface *srf = vmw_res_to_srf(res);
1035
	uint32_t cmd_len, cmd_id, submit_len;
1036
	int ret;
1037
	struct {
1038
		SVGA3dCmdHeader header;
1039
		SVGA3dCmdDefineGBSurface body;
1040
	} *cmd;
1041
	struct {
1042
		SVGA3dCmdHeader header;
1043
		SVGA3dCmdDefineGBSurface_v2 body;
1044
	} *cmd2;
1045
 
1046
	if (likely(res->id != -1))
1047
		return 0;
1048
 
1049
	vmw_fifo_resource_inc(dev_priv);
1050
	ret = vmw_resource_alloc_id(res);
1051
	if (unlikely(ret != 0)) {
1052
		DRM_ERROR("Failed to allocate a surface id.\n");
1053
		goto out_no_id;
1054
	}
1055
 
1056
	if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
1057
		ret = -EBUSY;
1058
		goto out_no_fifo;
1059
	}
1060
 
1061
	if (srf->array_size > 0) {
1062
		/* has_dx checked on creation time. */
1063
		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
1064
		cmd_len = sizeof(cmd2->body);
1065
		submit_len = sizeof(*cmd2);
1066
	} else {
1067
		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
1068
		cmd_len = sizeof(cmd->body);
1069
		submit_len = sizeof(*cmd);
1070
	}
1071
 
1072
	cmd = vmw_fifo_reserve(dev_priv, submit_len);
1073
	cmd2 = (typeof(cmd2))cmd;
1074
	if (unlikely(cmd == NULL)) {
1075
		DRM_ERROR("Failed reserving FIFO space for surface "
1076
			  "creation.\n");
1077
		ret = -ENOMEM;
1078
		goto out_no_fifo;
1079
	}
1080
 
1081
	if (srf->array_size > 0) {
1082
		cmd2->header.id = cmd_id;
1083
		cmd2->header.size = cmd_len;
1084
		cmd2->body.sid = srf->res.id;
1085
		cmd2->body.surfaceFlags = srf->flags;
1086
		cmd2->body.format = cpu_to_le32(srf->format);
1087
		cmd2->body.numMipLevels = srf->mip_levels[0];
1088
		cmd2->body.multisampleCount = srf->multisample_count;
1089
		cmd2->body.autogenFilter = srf->autogen_filter;
1090
		cmd2->body.size.width = srf->base_size.width;
1091
		cmd2->body.size.height = srf->base_size.height;
1092
		cmd2->body.size.depth = srf->base_size.depth;
1093
		cmd2->body.arraySize = srf->array_size;
1094
	} else {
1095
		cmd->header.id = cmd_id;
1096
		cmd->header.size = cmd_len;
1097
		cmd->body.sid = srf->res.id;
1098
		cmd->body.surfaceFlags = srf->flags;
1099
		cmd->body.format = cpu_to_le32(srf->format);
1100
		cmd->body.numMipLevels = srf->mip_levels[0];
1101
		cmd->body.multisampleCount = srf->multisample_count;
1102
		cmd->body.autogenFilter = srf->autogen_filter;
1103
		cmd->body.size.width = srf->base_size.width;
1104
		cmd->body.size.height = srf->base_size.height;
1105
		cmd->body.size.depth = srf->base_size.depth;
1106
	}
1107
 
1108
	vmw_fifo_commit(dev_priv, submit_len);
1109
 
1110
	return 0;
1111
 
1112
out_no_fifo:
1113
	vmw_resource_release_id(res);
1114
out_no_id:
1115
	vmw_fifo_resource_dec(dev_priv);
1116
	return ret;
1117
}
1118
 
1119
 
1120
static int vmw_gb_surface_bind(struct vmw_resource *res,
1121
			       struct ttm_validate_buffer *val_buf)
1122
{
1123
	struct vmw_private *dev_priv = res->dev_priv;
1124
	struct {
1125
		SVGA3dCmdHeader header;
1126
		SVGA3dCmdBindGBSurface body;
1127
	} *cmd1;
1128
	struct {
1129
		SVGA3dCmdHeader header;
1130
		SVGA3dCmdUpdateGBSurface body;
1131
	} *cmd2;
1132
	uint32_t submit_size;
1133
	struct ttm_buffer_object *bo = val_buf->bo;
1134
 
1135
	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1136
 
1137
	submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1138
 
1139
	cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
1140
	if (unlikely(cmd1 == NULL)) {
1141
		DRM_ERROR("Failed reserving FIFO space for surface "
1142
			  "binding.\n");
1143
		return -ENOMEM;
1144
	}
1145
 
1146
	cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1147
	cmd1->header.size = sizeof(cmd1->body);
1148
	cmd1->body.sid = res->id;
1149
	cmd1->body.mobid = bo->mem.start;
1150
	if (res->backup_dirty) {
1151
		cmd2 = (void *) &cmd1[1];
1152
		cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
1153
		cmd2->header.size = sizeof(cmd2->body);
1154
		cmd2->body.sid = res->id;
1155
		res->backup_dirty = false;
1156
	}
1157
	vmw_fifo_commit(dev_priv, submit_size);
1158
 
1159
	return 0;
1160
}
1161
 
1162
static int vmw_gb_surface_unbind(struct vmw_resource *res,
1163
				 bool readback,
1164
				 struct ttm_validate_buffer *val_buf)
1165
{
1166
	struct vmw_private *dev_priv = res->dev_priv;
1167
	struct ttm_buffer_object *bo = val_buf->bo;
1168
	struct vmw_fence_obj *fence;
1169
 
1170
	struct {
1171
		SVGA3dCmdHeader header;
1172
		SVGA3dCmdReadbackGBSurface body;
1173
	} *cmd1;
1174
	struct {
1175
		SVGA3dCmdHeader header;
1176
		SVGA3dCmdInvalidateGBSurface body;
1177
	} *cmd2;
1178
	struct {
1179
		SVGA3dCmdHeader header;
1180
		SVGA3dCmdBindGBSurface body;
1181
	} *cmd3;
1182
	uint32_t submit_size;
1183
	uint8_t *cmd;
1184
 
1185
 
1186
	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1187
 
1188
	submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1189
	cmd = vmw_fifo_reserve(dev_priv, submit_size);
1190
	if (unlikely(cmd == NULL)) {
1191
		DRM_ERROR("Failed reserving FIFO space for surface "
1192
			  "unbinding.\n");
1193
		return -ENOMEM;
1194
	}
1195
 
1196
	if (readback) {
1197
		cmd1 = (void *) cmd;
1198
		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1199
		cmd1->header.size = sizeof(cmd1->body);
1200
		cmd1->body.sid = res->id;
1201
		cmd3 = (void *) &cmd1[1];
1202
	} else {
1203
		cmd2 = (void *) cmd;
1204
		cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
1205
		cmd2->header.size = sizeof(cmd2->body);
1206
		cmd2->body.sid = res->id;
1207
		cmd3 = (void *) &cmd2[1];
1208
	}
1209
 
1210
	cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1211
	cmd3->header.size = sizeof(cmd3->body);
1212
	cmd3->body.sid = res->id;
1213
	cmd3->body.mobid = SVGA3D_INVALID_ID;
1214
 
1215
	vmw_fifo_commit(dev_priv, submit_size);
1216
 
1217
	/*
1218
	 * Create a fence object and fence the backup buffer.
1219
	 */
1220
 
1221
	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
1222
					  &fence, NULL);
1223
 
1224
	vmw_fence_single_bo(val_buf->bo, fence);
1225
 
1226
	if (likely(fence != NULL))
1227
		vmw_fence_obj_unreference(&fence);
1228
 
1229
	return 0;
1230
}
1231
 
1232
static int vmw_gb_surface_destroy(struct vmw_resource *res)
1233
{
1234
	struct vmw_private *dev_priv = res->dev_priv;
1235
	struct vmw_surface *srf = vmw_res_to_srf(res);
1236
	struct {
1237
		SVGA3dCmdHeader header;
1238
		SVGA3dCmdDestroyGBSurface body;
1239
	} *cmd;
1240
 
1241
	if (likely(res->id == -1))
1242
		return 0;
1243
 
1244
	mutex_lock(&dev_priv->binding_mutex);
1245
	vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
1246
	vmw_binding_res_list_scrub(&res->binding_head);
1247
 
1248
	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
1249
	if (unlikely(cmd == NULL)) {
1250
		DRM_ERROR("Failed reserving FIFO space for surface "
1251
			  "destruction.\n");
1252
		mutex_unlock(&dev_priv->binding_mutex);
1253
		return -ENOMEM;
1254
	}
1255
 
1256
	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
1257
	cmd->header.size = sizeof(cmd->body);
1258
	cmd->body.sid = res->id;
1259
	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1260
	mutex_unlock(&dev_priv->binding_mutex);
1261
	vmw_resource_release_id(res);
1262
	vmw_fifo_resource_dec(dev_priv);
1263
 
1264
	return 0;
1265
}
1266
/**
1267
 * vmw_surface_gb_priv_define - Define a private GB surface
1268
 *
1269
 * @dev:  Pointer to a struct drm_device
1270
 * @user_accounting_size:  Used to track user-space memory usage, set
1271
 *                         to 0 for kernel mode only memory
1272
 * @svga3d_flags: SVGA3d surface flags for the device
1273
 * @format: requested surface format
1274
 * @for_scanout: true if inteded to be used for scanout buffer
1275
 * @num_mip_levels:  number of MIP levels
1276
 * @multisample_count:
1277
 * @array_size: Surface array size.
1278
 * @size: width, heigh, depth of the surface requested
1279
 * @user_srf_out: allocated user_srf.  Set to NULL on failure.
1280
 *
1281
 * GB surfaces allocated by this function will not have a user mode handle, and
1282
 * thus will only be visible to vmwgfx.  For optimization reasons the
1283
 * surface may later be given a user mode handle by another function to make
1284
 * it available to user mode drivers.
1285
 */
1286
int vmw_surface_gb_priv_define(struct drm_device *dev,
1287
			       uint32_t user_accounting_size,
1288
			       uint32_t svga3d_flags,
1289
			       SVGA3dSurfaceFormat format,
1290
			       bool for_scanout,
1291
			       uint32_t num_mip_levels,
1292
			       uint32_t multisample_count,
1293
			       uint32_t array_size,
1294
			       struct drm_vmw_size size,
1295
			       struct vmw_surface **srf_out)
1296
{
1297
	struct vmw_private *dev_priv = vmw_priv(dev);
1298
	struct vmw_user_surface *user_srf;
1299
	struct vmw_surface *srf;
1300
	int ret;
1301
	u32 num_layers;
1302
 
1303
	*srf_out = NULL;
1304
 
1305
	if (for_scanout) {
1306
		if (!svga3dsurface_is_screen_target_format(format)) {
1307
			DRM_ERROR("Invalid Screen Target surface format.");
1308
			return -EINVAL;
1309
		}
1310
	} else {
1311
		const struct svga3d_surface_desc *desc;
1312
 
1313
		desc = svga3dsurface_get_desc(format);
1314
		if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
1315
			DRM_ERROR("Invalid surface format.\n");
1316
			return -EINVAL;
1317
		}
1318
	}
1319
 
1320
	/* array_size must be null for non-GL3 host. */
1321
	if (array_size > 0 && !dev_priv->has_dx) {
1322
		DRM_ERROR("Tried to create DX surface on non-DX host.\n");
1323
		return -EINVAL;
1324
	}
1325
 
1326
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1327
	if (unlikely(ret != 0))
1328
		return ret;
1329
 
1330
	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1331
				   user_accounting_size, false, true);
1332
	if (unlikely(ret != 0)) {
1333
		if (ret != -ERESTARTSYS)
1334
			DRM_ERROR("Out of graphics memory for surface"
1335
				  " creation.\n");
1336
		goto out_unlock;
1337
	}
1338
 
1339
	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1340
	if (unlikely(user_srf == NULL)) {
1341
		ret = -ENOMEM;
1342
		goto out_no_user_srf;
1343
	}
1344
 
1345
	*srf_out  = &user_srf->srf;
1346
	user_srf->size = user_accounting_size;
1347
	user_srf->prime.base.shareable = false;
1348
	user_srf->prime.base.tfile     = NULL;
1349
 
1350
	srf = &user_srf->srf;
1351
	srf->flags             = svga3d_flags;
1352
	srf->format            = format;
1353
	srf->scanout           = for_scanout;
1354
	srf->mip_levels[0]     = num_mip_levels;
1355
	srf->num_sizes         = 1;
1356
	srf->sizes             = NULL;
1357
	srf->offsets           = NULL;
1358
	srf->base_size         = size;
1359
	srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE;
1360
	srf->array_size        = array_size;
1361
	srf->multisample_count = multisample_count;
1362
 
1363
	if (array_size)
1364
		num_layers = array_size;
1365
	else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
1366
		num_layers = SVGA3D_MAX_SURFACE_FACES;
1367
	else
1368
		num_layers = 1;
1369
 
1370
	srf->res.backup_size   =
1371
		svga3dsurface_get_serialized_size(srf->format,
1372
						  srf->base_size,
1373
						  srf->mip_levels[0],
1374
						  num_layers);
1375
 
1376
	if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
1377
		srf->res.backup_size += sizeof(SVGA3dDXSOState);
1378
 
1379
	if (dev_priv->active_display_unit == vmw_du_screen_target &&
1380
	    for_scanout)
1381
		srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
1382
 
1383
	/*
1384
	 * From this point, the generic resource management functions
1385
	 * destroy the object on failure.
1386
	 */
1387
	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1388
 
1389
	ttm_read_unlock(&dev_priv->reservation_sem);
1390
	return ret;
1391
 
1392
out_no_user_srf:
1393
	ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
1394
 
1395
out_unlock:
1396
	ttm_read_unlock(&dev_priv->reservation_sem);
1397
	return ret;
1398
}